summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Wieser <wieser.eric@gmail.com>2019-10-15 20:20:20 +0100
committerGitHub <noreply@github.com>2019-10-15 20:20:20 +0100
commit10a7a4a815105e16828fe83fb89778c3bbafe692 (patch)
tree2c73effc6bf4b8404e63564f78661caff034b255
parentd0731e118a5c40d866702f1b5da2be4d4f52ded9 (diff)
parent83da5faca3a313c5d37226b86fa781956f8d162b (diff)
downloadnumpy-10a7a4a815105e16828fe83fb89778c3bbafe692.tar.gz
Merge branch 'master' into master
-rw-r--r--.appveyor.yml159
-rw-r--r--.circleci/config.yml20
-rw-r--r--.codecov.yml24
-rw-r--r--.coveragerc1
-rw-r--r--.ctags.d1
-rw-r--r--.dependabot/config.yml9
-rw-r--r--.github/CONTRIBUTING.md6
-rw-r--r--.github/FUNDING.yml2
-rw-r--r--.github/PULL_REQUEST_TEMPLATE.md4
-rw-r--r--.gitignore18
-rw-r--r--.lgtm.yml15
-rw-r--r--.mailmap55
-rw-r--r--.travis.yml53
-rw-r--r--INSTALL.rst.txt25
-rw-r--r--LICENSE.txt32
-rw-r--r--LICENSES_bundled.txt27
-rw-r--r--MANIFEST.in22
-rw-r--r--README.md24
-rw-r--r--azure-pipelines.yml298
-rw-r--r--azure-steps-windows.yml56
-rw-r--r--benchmarks/asv.conf.json2
-rw-r--r--benchmarks/benchmarks/bench_avx.py34
-rw-r--r--benchmarks/benchmarks/bench_core.py14
-rw-r--r--benchmarks/benchmarks/bench_function_base.py187
-rw-r--r--benchmarks/benchmarks/bench_import.py36
-rw-r--r--benchmarks/benchmarks/bench_io.py3
-rw-r--r--benchmarks/benchmarks/bench_lib.py103
-rw-r--r--benchmarks/benchmarks/bench_linalg.py2
-rw-r--r--benchmarks/benchmarks/bench_overrides.py16
-rw-r--r--benchmarks/benchmarks/bench_random.py114
-rw-r--r--benchmarks/benchmarks/bench_records.py43
-rw-r--r--benchmarks/benchmarks/bench_reduce.py2
-rw-r--r--benchmarks/benchmarks/bench_shape_base.py31
-rw-r--r--benchmarks/benchmarks/bench_ufunc.py2
-rw-r--r--changelog/13829.enhancement.rst6
-rw-r--r--doc/CAPI.rst.txt320
-rw-r--r--doc/C_STYLE_GUIDE.rst.txt19
-rw-r--r--doc/DISTUTILS.rst.txt183
-rw-r--r--doc/HOWTO_RELEASE.rst.txt113
-rw-r--r--doc/Makefile107
-rw-r--r--doc/Py3K.rst.txt40
-rw-r--r--doc/RELEASE_WALKTHROUGH.rst.txt94
-rw-r--r--doc/TESTS.rst.txt32
-rwxr-xr-xdoc/cdoc/numpyfilter.py5
-rw-r--r--doc/changelog/1.15.0-changelog.rst2
-rw-r--r--doc/changelog/1.15.3-changelog.rst32
-rw-r--r--doc/changelog/1.15.4-changelog.rst21
-rw-r--r--doc/changelog/1.16.0-changelog.rst616
-rw-r--r--doc/changelog/1.16.1-changelog.rst62
-rw-r--r--doc/changelog/1.16.2-changelog.rst25
-rw-r--r--doc/changelog/1.16.3-changelog.rst55
-rw-r--r--doc/changelog/1.16.4-changelog.rst39
-rw-r--r--doc/changelog/1.16.5-changelog.rst54
-rw-r--r--doc/changelog/1.17.0-changelog.rst694
-rw-r--r--doc/changelog/1.17.1-changelog.rst55
-rw-r--r--doc/changelog/1.17.2-changelog.rst28
-rw-r--r--doc/neps/_static/nep-0000.pngbin20813 -> 12925 bytes
-rw-r--r--doc/neps/index.rst.tmpl32
-rw-r--r--doc/neps/nep-0000.rst26
-rw-r--r--doc/neps/nep-0010-new-iterator-ufunc.rst4
-rw-r--r--doc/neps/nep-0015-merge-multiarray-umath.rst2
-rw-r--r--doc/neps/nep-0016-abstract-array.rst328
-rw-r--r--doc/neps/nep-0016-benchmark.py48
-rw-r--r--doc/neps/nep-0018-array-function-protocol.rst312
-rw-r--r--doc/neps/nep-0019-rng-policy.rst76
-rw-r--r--doc/neps/nep-0020-gufunc-signature-enhancement.rst2
-rw-r--r--doc/neps/nep-0021-advanced-indexing.rst2
-rw-r--r--doc/neps/nep-0022-ndarray-duck-typing-overview.rst3
-rw-r--r--doc/neps/nep-0024-missing-data-2.rst2
-rw-r--r--doc/neps/nep-0026-missing-data-summary.rst4
-rw-r--r--doc/neps/nep-0027-zero-rank-arrarys.rst254
-rw-r--r--doc/neps/nep-0028-website-redesign.rst334
-rw-r--r--doc/neps/nep-0029-deprecation_policy.rst301
-rw-r--r--doc/neps/nep-0030-duck-array-protocol.rst183
-rw-r--r--doc/neps/nep-0032-remove-financial-functions.rst214
-rw-r--r--doc/neps/nep-template.rst36
-rw-r--r--doc/neps/roadmap.rst129
-rw-r--r--doc/records.rst.txt6
-rw-r--r--doc/release/1.16.0-notes.rst217
-rw-r--r--doc/release/time_based_proposal.rst129
-rw-r--r--doc/release/upcoming_changes/10151.improvement.rst9
-rw-r--r--doc/release/upcoming_changes/12284.new_feature.rst5
-rw-r--r--doc/release/upcoming_changes/13605.deprecation.rst9
-rw-r--r--doc/release/upcoming_changes/13610.improvement.rst5
-rw-r--r--doc/release/upcoming_changes/13899.change.rst4
-rw-r--r--doc/release/upcoming_changes/14036.deprecation.rst4
-rw-r--r--doc/release/upcoming_changes/14036.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14039.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14100.expired.rst3
-rw-r--r--doc/release/upcoming_changes/14181.deprecation.rst3
-rw-r--r--doc/release/upcoming_changes/14248.change.rst10
-rw-r--r--doc/release/upcoming_changes/14255.improvement.rst4
-rw-r--r--doc/release/upcoming_changes/14256.expired.rst3
-rw-r--r--doc/release/upcoming_changes/14259.expired.rst6
-rw-r--r--doc/release/upcoming_changes/14325.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14335.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14393.c_api.rst5
-rw-r--r--doc/release/upcoming_changes/14464.improvement.rst6
-rw-r--r--doc/release/upcoming_changes/14498.change.rst7
-rw-r--r--doc/release/upcoming_changes/14501.improvement.rst6
-rw-r--r--doc/release/upcoming_changes/14510.compatibility.rst12
-rw-r--r--doc/release/upcoming_changes/14518.change.rst18
-rw-r--r--doc/release/upcoming_changes/14567.expired.rst5
-rw-r--r--doc/release/upcoming_changes/14583.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14596.expired.rst2
-rw-r--r--doc/release/upcoming_changes/14620.expired.rst1
-rw-r--r--doc/release/upcoming_changes/14682.expired.rst2
-rw-r--r--doc/release/upcoming_changes/README.rst55
-rw-r--r--doc/release/upcoming_changes/template.rst38
m---------doc/scipy-sphinx-theme0
-rw-r--r--doc/source/_static/numpy_logo.pngbin0 -> 6103 bytes
-rw-r--r--doc/source/_templates/autosummary/base.rst14
-rw-r--r--doc/source/_templates/indexcontent.html2
-rw-r--r--doc/source/_templates/indexsidebar.html1
-rw-r--r--doc/source/_templates/layout.html10
-rw-r--r--doc/source/about.rst2
-rw-r--r--doc/source/benchmarking.rst1
-rw-r--r--doc/source/conf.py63
-rw-r--r--doc/source/dev/conduct/code_of_conduct.rst3
-rw-r--r--doc/source/dev/conduct/report_handling_manual.rst2
-rw-r--r--doc/source/dev/development_environment.rst42
-rw-r--r--doc/source/dev/development_workflow.rst (renamed from doc/source/dev/gitwash/development_workflow.rst)40
-rw-r--r--doc/source/dev/gitwash/development_setup.rst2
-rw-r--r--doc/source/dev/gitwash/following_latest.rst4
-rw-r--r--doc/source/dev/gitwash/git_development.rst14
-rw-r--r--doc/source/dev/gitwash/git_intro.rst40
-rw-r--r--doc/source/dev/gitwash/git_links.inc5
-rw-r--r--doc/source/dev/gitwash/index.rst26
-rw-r--r--doc/source/dev/governance/people.rst9
-rw-r--r--doc/source/dev/index.rst256
-rw-r--r--doc/source/dev/pull_button.png (renamed from doc/source/dev/gitwash/pull_button.png)bin12893 -> 12893 bytes
-rw-r--r--doc/source/dev/style_guide.rst8
-rw-r--r--doc/source/docs/howto_build_docs.rst17
-rw-r--r--doc/source/f2py/distutils.rst4
-rw-r--r--doc/source/f2py/run_main_session.dat2
-rw-r--r--doc/source/f2py/usage.rst29
-rw-r--r--doc/source/reference/alignment.rst (renamed from doc/source/dev/alignment.rst)32
-rw-r--r--doc/source/reference/arrays.classes.rst134
-rw-r--r--doc/source/reference/arrays.datetime.rst164
-rw-r--r--doc/source/reference/arrays.dtypes.rst9
-rw-r--r--doc/source/reference/arrays.indexing.rst34
-rw-r--r--doc/source/reference/arrays.ndarray.rst38
-rw-r--r--doc/source/reference/arrays.nditer.rst81
-rw-r--r--doc/source/reference/arrays.scalars.rst8
-rw-r--r--doc/source/reference/c-api/array.rst (renamed from doc/source/reference/c-api.array.rst)432
-rw-r--r--doc/source/reference/c-api/config.rst (renamed from doc/source/reference/c-api.config.rst)19
-rw-r--r--doc/source/reference/c-api/coremath.rst (renamed from doc/source/reference/c-api.coremath.rst)15
-rw-r--r--doc/source/reference/c-api/deprecations.rst (renamed from doc/source/reference/c-api.deprecations.rst)0
-rw-r--r--doc/source/reference/c-api/dtype.rst (renamed from doc/source/reference/c-api.dtype.rst)59
-rw-r--r--doc/source/reference/c-api/generalized-ufuncs.rst (renamed from doc/source/reference/c-api.generalized-ufuncs.rst)62
-rw-r--r--doc/source/reference/c-api/index.rst (renamed from doc/source/reference/c-api.rst)18
-rw-r--r--doc/source/reference/c-api/iterator.rst (renamed from doc/source/reference/c-api.iterator.rst)49
-rw-r--r--doc/source/reference/c-api/types-and-structures.rst (renamed from doc/source/reference/c-api.types-and-structures.rst)244
-rw-r--r--doc/source/reference/c-api/ufunc.rst (renamed from doc/source/reference/c-api.ufunc.rst)55
-rw-r--r--doc/source/reference/distutils.rst159
-rw-r--r--doc/source/reference/distutils/misc_util.rst7
-rw-r--r--doc/source/reference/distutils_guide.rst7
-rw-r--r--doc/source/reference/index.rst5
-rw-r--r--doc/source/reference/internals.rst1
-rw-r--r--doc/source/reference/maskedarray.baseclass.rst90
-rw-r--r--doc/source/reference/maskedarray.generic.rst2
-rw-r--r--doc/source/reference/random/bit_generators/bitgenerators.rst11
-rw-r--r--doc/source/reference/random/bit_generators/index.rst112
-rw-r--r--doc/source/reference/random/bit_generators/mt19937.rst32
-rw-r--r--doc/source/reference/random/bit_generators/pcg64.rst31
-rw-r--r--doc/source/reference/random/bit_generators/philox.rst33
-rw-r--r--doc/source/reference/random/bit_generators/sfc64.rst26
-rw-r--r--doc/source/reference/random/extending.rst165
-rw-r--r--doc/source/reference/random/generator.rst84
-rw-r--r--doc/source/reference/random/index.rst208
-rw-r--r--doc/source/reference/random/legacy.rst123
-rw-r--r--doc/source/reference/random/multithreading.rst108
-rw-r--r--doc/source/reference/random/new-or-different.rst115
-rw-r--r--doc/source/reference/random/parallel.rst193
-rw-r--r--doc/source/reference/random/performance.py87
-rw-r--r--doc/source/reference/random/performance.rst153
-rw-r--r--doc/source/reference/routines.char.rst17
-rw-r--r--doc/source/reference/routines.ctypeslib.rst3
-rw-r--r--doc/source/reference/routines.dtype.rst3
-rw-r--r--doc/source/reference/routines.linalg.rst15
-rw-r--r--doc/source/reference/routines.ma.rst24
-rw-r--r--doc/source/reference/routines.math.rst1
-rw-r--r--doc/source/reference/routines.matlib.rst2
-rw-r--r--doc/source/reference/routines.other.rst27
-rw-r--r--doc/source/reference/routines.polynomials.package.rst2
-rw-r--r--doc/source/reference/routines.polynomials.polynomial.rst2
-rw-r--r--doc/source/reference/routines.random.rst81
-rw-r--r--doc/source/reference/routines.rst2
-rw-r--r--doc/source/reference/routines.testing.rst12
-rw-r--r--doc/source/reference/ufuncs.rst78
-rw-r--r--doc/source/release.rst96
-rw-r--r--doc/source/release/1.10.0-notes.rst (renamed from doc/release/1.10.0-notes.rst)0
-rw-r--r--doc/source/release/1.10.1-notes.rst (renamed from doc/release/1.10.1-notes.rst)0
-rw-r--r--doc/source/release/1.10.2-notes.rst (renamed from doc/release/1.10.2-notes.rst)0
-rw-r--r--doc/source/release/1.10.3-notes.rst (renamed from doc/release/1.10.3-notes.rst)0
-rw-r--r--doc/source/release/1.10.4-notes.rst (renamed from doc/release/1.10.4-notes.rst)0
-rw-r--r--doc/source/release/1.11.0-notes.rst (renamed from doc/release/1.11.0-notes.rst)0
-rw-r--r--doc/source/release/1.11.1-notes.rst (renamed from doc/release/1.11.1-notes.rst)0
-rw-r--r--doc/source/release/1.11.2-notes.rst (renamed from doc/release/1.11.2-notes.rst)0
-rw-r--r--doc/source/release/1.11.3-notes.rst (renamed from doc/release/1.11.3-notes.rst)0
-rw-r--r--doc/source/release/1.12.0-notes.rst (renamed from doc/release/1.12.0-notes.rst)0
-rw-r--r--doc/source/release/1.12.1-notes.rst (renamed from doc/release/1.12.1-notes.rst)0
-rw-r--r--doc/source/release/1.13.0-notes.rst (renamed from doc/release/1.13.0-notes.rst)0
-rw-r--r--doc/source/release/1.13.1-notes.rst (renamed from doc/release/1.13.1-notes.rst)0
-rw-r--r--doc/source/release/1.13.2-notes.rst (renamed from doc/release/1.13.2-notes.rst)0
-rw-r--r--doc/source/release/1.13.3-notes.rst (renamed from doc/release/1.13.3-notes.rst)0
-rw-r--r--doc/source/release/1.14.0-notes.rst (renamed from doc/release/1.14.0-notes.rst)0
-rw-r--r--doc/source/release/1.14.1-notes.rst (renamed from doc/release/1.14.1-notes.rst)0
-rw-r--r--doc/source/release/1.14.2-notes.rst (renamed from doc/release/1.14.2-notes.rst)0
-rw-r--r--doc/source/release/1.14.3-notes.rst (renamed from doc/release/1.14.3-notes.rst)0
-rw-r--r--doc/source/release/1.14.4-notes.rst (renamed from doc/release/1.14.4-notes.rst)2
-rw-r--r--doc/source/release/1.14.5-notes.rst (renamed from doc/release/1.14.5-notes.rst)0
-rw-r--r--doc/source/release/1.14.6-notes.rst (renamed from doc/release/1.14.6-notes.rst)0
-rw-r--r--doc/source/release/1.15.0-notes.rst (renamed from doc/release/1.15.0-notes.rst)0
-rw-r--r--doc/source/release/1.15.1-notes.rst (renamed from doc/release/1.15.1-notes.rst)0
-rw-r--r--doc/source/release/1.15.2-notes.rst (renamed from doc/release/1.15.2-notes.rst)0
-rw-r--r--doc/source/release/1.15.3-notes.rst49
-rw-r--r--doc/source/release/1.15.4-notes.rst38
-rw-r--r--doc/source/release/1.16.0-notes.rst536
-rw-r--r--doc/source/release/1.16.1-notes.rst107
-rw-r--r--doc/source/release/1.16.2-notes.rst70
-rw-r--r--doc/source/release/1.16.3-notes.rst46
-rw-r--r--doc/source/release/1.16.4-notes.rst94
-rw-r--r--doc/source/release/1.16.5-notes.rst68
-rw-r--r--doc/source/release/1.17.0-notes.rst562
-rw-r--r--doc/source/release/1.17.1-notes.rst73
-rw-r--r--doc/source/release/1.17.2-notes.rst49
-rw-r--r--doc/source/release/1.18.0-notes.rst8
-rw-r--r--doc/source/release/1.3.0-notes.rst (renamed from doc/release/1.3.0-notes.rst)0
-rw-r--r--doc/source/release/1.4.0-notes.rst (renamed from doc/release/1.4.0-notes.rst)0
-rw-r--r--doc/source/release/1.5.0-notes.rst (renamed from doc/release/1.5.0-notes.rst)0
-rw-r--r--doc/source/release/1.6.0-notes.rst (renamed from doc/release/1.6.0-notes.rst)0
-rw-r--r--doc/source/release/1.6.1-notes.rst (renamed from doc/release/1.6.1-notes.rst)0
-rw-r--r--doc/source/release/1.6.2-notes.rst (renamed from doc/release/1.6.2-notes.rst)0
-rw-r--r--doc/source/release/1.7.0-notes.rst (renamed from doc/release/1.7.0-notes.rst)0
-rw-r--r--doc/source/release/1.7.1-notes.rst (renamed from doc/release/1.7.1-notes.rst)0
-rw-r--r--doc/source/release/1.7.2-notes.rst (renamed from doc/release/1.7.2-notes.rst)0
-rw-r--r--doc/source/release/1.8.0-notes.rst (renamed from doc/release/1.8.0-notes.rst)0
-rw-r--r--doc/source/release/1.8.1-notes.rst (renamed from doc/release/1.8.1-notes.rst)0
-rw-r--r--doc/source/release/1.8.2-notes.rst (renamed from doc/release/1.8.2-notes.rst)0
-rw-r--r--doc/source/release/1.9.0-notes.rst (renamed from doc/release/1.9.0-notes.rst)0
-rw-r--r--doc/source/release/1.9.1-notes.rst (renamed from doc/release/1.9.1-notes.rst)0
-rw-r--r--doc/source/release/1.9.2-notes.rst (renamed from doc/release/1.9.2-notes.rst)0
-rw-r--r--doc/source/release/template.rst (renamed from doc/release/template.rst)2
-rw-r--r--doc/source/user/basics.broadcasting.rst6
-rw-r--r--doc/source/user/basics.dispatch.rst8
-rw-r--r--doc/source/user/basics.indexing.rst6
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst20
-rw-r--r--doc/source/user/basics.rst1
-rw-r--r--doc/source/user/building.rst72
-rw-r--r--doc/source/user/c-info.beyond-basics.rst7
-rw-r--r--doc/source/user/c-info.how-to-extend.rst83
-rw-r--r--doc/source/user/c-info.python-as-glue.rst10
-rw-r--r--doc/source/user/numpy-for-matlab-users.rst14
-rw-r--r--doc/source/user/quickstart.rst25
-rw-r--r--doc/source/user/theory.broadcast_1.gifbin0 -> 2987 bytes
-rw-r--r--doc/source/user/theory.broadcast_2.gifbin0 -> 6641 bytes
-rw-r--r--doc/source/user/theory.broadcast_3.gifbin0 -> 4681 bytes
-rw-r--r--doc/source/user/theory.broadcast_4.gifbin0 -> 7287 bytes
-rw-r--r--doc/source/user/theory.broadcast_5.pngbin0 -> 16721 bytes
-rw-r--r--doc/source/user/theory.broadcasting.rst229
-rw-r--r--doc/source/user/whatisnumpy.rst17
m---------doc/sphinxext0
-rw-r--r--numpy/__init__.pxd978
-rw-r--r--numpy/__init__.py45
-rw-r--r--numpy/_build_utils/src/apple_sgemv_fix.c90
-rw-r--r--numpy/_globals.py11
-rw-r--r--numpy/_pytesttester.py27
-rw-r--r--numpy/compat/_inspect.py5
-rw-r--r--numpy/compat/py3k.py80
-rw-r--r--numpy/conftest.py9
-rw-r--r--numpy/core/__init__.py56
-rw-r--r--numpy/core/_add_newdocs.py2825
-rw-r--r--numpy/core/_asarray.py324
-rw-r--r--numpy/core/_dtype.py89
-rw-r--r--numpy/core/_dtype_ctypes.py113
-rw-r--r--numpy/core/_exceptions.py200
-rw-r--r--numpy/core/_internal.py153
-rw-r--r--numpy/core/_methods.py123
-rw-r--r--numpy/core/_type_aliases.py30
-rw-r--r--numpy/core/_ufunc_config.py458
-rw-r--r--numpy/core/arrayprint.py310
-rw-r--r--numpy/core/code_generators/cversions.txt7
-rw-r--r--numpy/core/code_generators/genapi.py20
-rw-r--r--numpy/core/code_generators/generate_numpy_api.py5
-rw-r--r--numpy/core/code_generators/generate_umath.py292
-rw-r--r--numpy/core/code_generators/numpy_api.py2
-rw-r--r--numpy/core/code_generators/ufunc_docstrings.py357
-rw-r--r--numpy/core/defchararray.py226
-rw-r--r--numpy/core/einsumfunc.py123
-rw-r--r--numpy/core/fromnumeric.py668
-rw-r--r--numpy/core/function_base.py274
-rw-r--r--numpy/core/getlimits.py360
-rw-r--r--numpy/core/include/numpy/ndarrayobject.h14
-rw-r--r--numpy/core/include/numpy/ndarraytypes.h43
-rw-r--r--numpy/core/include/numpy/npy_1_7_deprecated_api.h3
-rw-r--r--numpy/core/include/numpy/npy_3kcompat.h51
-rw-r--r--numpy/core/include/numpy/npy_common.h21
-rw-r--r--numpy/core/include/numpy/npy_math.h95
-rw-r--r--numpy/core/include/numpy/ufuncobject.h64
-rw-r--r--numpy/core/info.py87
-rw-r--r--numpy/core/machar.py4
-rw-r--r--numpy/core/memmap.py30
-rw-r--r--numpy/core/multiarray.py1600
-rw-r--r--numpy/core/numeric.py1183
-rw-r--r--numpy/core/numerictypes.py55
-rw-r--r--numpy/core/overrides.py230
-rw-r--r--numpy/core/records.py184
-rw-r--r--numpy/core/setup.py182
-rw-r--r--numpy/core/setup_common.py108
-rw-r--r--numpy/core/shape_base.py345
-rw-r--r--numpy/core/src/common/array_assign.c8
-rw-r--r--numpy/core/src/common/array_assign.h6
-rw-r--r--numpy/core/src/common/cblasfuncs.c7
-rw-r--r--numpy/core/src/common/get_attr_string.h1
-rw-r--r--numpy/core/src/common/lowlevel_strided_loops.h3
-rw-r--r--numpy/core/src/common/npy_ctypes.h50
-rw-r--r--numpy/core/src/common/npy_longdouble.c100
-rw-r--r--numpy/core/src/common/npy_longdouble.h10
-rw-r--r--numpy/core/src/common/npy_partition.h.src3
-rw-r--r--numpy/core/src/common/npy_sort.h204
-rw-r--r--numpy/core/src/common/npy_sort.h.src100
-rw-r--r--numpy/core/src/common/numpyos.c (renamed from numpy/core/src/multiarray/numpyos.c)28
-rw-r--r--numpy/core/src/common/numpyos.h (renamed from numpy/core/src/multiarray/numpyos.h)7
-rw-r--r--numpy/core/src/common/ufunc_override.c208
-rw-r--r--numpy/core/src/common/ufunc_override.h31
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src132
-rw-r--r--numpy/core/src/multiarray/alloc.c18
-rw-r--r--numpy/core/src/multiarray/array_assign_array.c73
-rw-r--r--numpy/core/src/multiarray/array_assign_scalar.c17
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.c376
-rw-r--r--numpy/core/src/multiarray/arrayfunction_override.h16
-rw-r--r--numpy/core/src/multiarray/arrayobject.c72
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src475
-rw-r--r--numpy/core/src/multiarray/buffer.c33
-rw-r--r--numpy/core/src/multiarray/buffer.h2
-rw-r--r--numpy/core/src/multiarray/calculation.c21
-rw-r--r--numpy/core/src/multiarray/common.c95
-rw-r--r--numpy/core/src/multiarray/common.h4
-rw-r--r--numpy/core/src/multiarray/compiled_base.c617
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c71
-rw-r--r--numpy/core/src/multiarray/convert.c45
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c295
-rw-r--r--numpy/core/src/multiarray/convert_datatype.h18
-rw-r--r--numpy/core/src/multiarray/ctors.c656
-rw-r--r--numpy/core/src/multiarray/ctors.h33
-rw-r--r--numpy/core/src/multiarray/datetime.c403
-rw-r--r--numpy/core/src/multiarray/datetime_busday.c12
-rw-r--r--numpy/core/src/multiarray/descriptor.c448
-rw-r--r--numpy/core/src/multiarray/descriptor.h16
-rw-r--r--numpy/core/src/multiarray/dragon4.c40
-rw-r--r--numpy/core/src/multiarray/dragon4.h38
-rw-r--r--numpy/core/src/multiarray/dtype_transfer.c90
-rw-r--r--numpy/core/src/multiarray/einsum.c.src188
-rw-r--r--numpy/core/src/multiarray/flagsobject.c88
-rw-r--r--numpy/core/src/multiarray/getset.c30
-rw-r--r--numpy/core/src/multiarray/hashdescr.c22
-rw-r--r--numpy/core/src/multiarray/item_selection.c270
-rw-r--r--numpy/core/src/multiarray/item_selection.h4
-rw-r--r--numpy/core/src/multiarray/iterators.c482
-rw-r--r--numpy/core/src/multiarray/iterators.h14
-rw-r--r--numpy/core/src/multiarray/lowlevel_strided_loops.c.src101
-rw-r--r--numpy/core/src/multiarray/mapping.c231
-rw-r--r--numpy/core/src/multiarray/methods.c397
-rw-r--r--numpy/core/src/multiarray/methods.h27
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c388
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.h2
-rw-r--r--numpy/core/src/multiarray/nditer_api.c7
-rw-r--r--numpy/core/src/multiarray/nditer_constr.c73
-rw-r--r--numpy/core/src/multiarray/nditer_pywrap.c12
-rw-r--r--numpy/core/src/multiarray/number.c63
-rw-r--r--numpy/core/src/multiarray/number.h6
-rw-r--r--numpy/core/src/multiarray/refcount.c118
-rw-r--r--numpy/core/src/multiarray/scalarapi.c17
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src157
-rw-r--r--numpy/core/src/multiarray/shape.c33
-rw-r--r--numpy/core/src/multiarray/temp_elide.c2
-rw-r--r--numpy/core/src/multiarray/typeinfo.c49
-rw-r--r--numpy/core/src/multiarray/typeinfo.h12
-rw-r--r--numpy/core/src/multiarray/usertypes.c33
-rw-r--r--numpy/core/src/npymath/halffloat.c31
-rw-r--r--numpy/core/src/npymath/ieee754.c.src152
-rw-r--r--numpy/core/src/npymath/npy_math_complex.c.src43
-rw-r--r--numpy/core/src/npymath/npy_math_internal.h.src45
-rw-r--r--numpy/core/src/npysort/npysort_common.h8
-rw-r--r--numpy/core/src/npysort/radixsort.c.src231
-rw-r--r--numpy/core/src/npysort/selection.c.src2
-rw-r--r--numpy/core/src/npysort/timsort.c.src2574
-rw-r--r--numpy/core/src/umath/_rational_tests.c.src4
-rw-r--r--numpy/core/src/umath/_struct_ufunc_tests.c.src62
-rw-r--r--numpy/core/src/umath/_umath_tests.c.src139
-rw-r--r--numpy/core/src/umath/clip.c.src119
-rw-r--r--numpy/core/src/umath/clip.h.src18
-rw-r--r--numpy/core/src/umath/cpuid.c45
-rw-r--r--numpy/core/src/umath/fast_loop_macros.h234
-rw-r--r--numpy/core/src/umath/funcs.inc.src53
-rw-r--r--numpy/core/src/umath/loops.c.src631
-rw-r--r--numpy/core/src/umath/loops.h.src78
-rw-r--r--numpy/core/src/umath/matmul.c.src504
-rw-r--r--numpy/core/src/umath/matmul.h.src12
-rw-r--r--numpy/core/src/umath/override.c100
-rw-r--r--numpy/core/src/umath/reduction.c50
-rw-r--r--numpy/core/src/umath/scalarmath.c.src137
-rw-r--r--numpy/core/src/umath/simd.inc.src1455
-rw-r--r--numpy/core/src/umath/ufunc_object.c1071
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.c705
-rw-r--r--numpy/core/src/umath/ufunc_type_resolution.h31
-rw-r--r--numpy/core/src/umath/umathmodule.c24
-rw-r--r--numpy/core/tests/data/umath-validation-set-README15
-rw-r--r--numpy/core/tests/data/umath-validation-set-cos707
-rw-r--r--numpy/core/tests/data/umath-validation-set-exp135
-rw-r--r--numpy/core/tests/data/umath-validation-set-log118
-rw-r--r--numpy/core/tests/data/umath-validation-set-sin707
-rw-r--r--numpy/core/tests/test__exceptions.py42
-rw-r--r--numpy/core/tests/test_api.py12
-rw-r--r--numpy/core/tests/test_arrayprint.py13
-rw-r--r--numpy/core/tests/test_datetime.py282
-rw-r--r--numpy/core/tests/test_deprecations.py163
-rw-r--r--numpy/core/tests/test_dtype.py522
-rw-r--r--numpy/core/tests/test_einsum.py11
-rw-r--r--numpy/core/tests/test_errstate.py8
-rw-r--r--numpy/core/tests/test_extint128.py2
-rw-r--r--numpy/core/tests/test_function_base.py68
-rw-r--r--numpy/core/tests/test_getlimits.py15
-rw-r--r--numpy/core/tests/test_half.py99
-rw-r--r--numpy/core/tests/test_indexing.py22
-rw-r--r--numpy/core/tests/test_item_selection.py10
-rw-r--r--numpy/core/tests/test_longdouble.py64
-rw-r--r--numpy/core/tests/test_mem_overlap.py2
-rw-r--r--numpy/core/tests/test_memmap.py10
-rw-r--r--numpy/core/tests/test_multiarray.py1194
-rw-r--r--numpy/core/tests/test_nditer.py91
-rw-r--r--numpy/core/tests/test_numeric.py404
-rw-r--r--numpy/core/tests/test_numerictypes.py38
-rw-r--r--numpy/core/tests/test_overrides.py283
-rw-r--r--numpy/core/tests/test_records.py75
-rw-r--r--numpy/core/tests/test_regression.py155
-rw-r--r--numpy/core/tests/test_scalar_methods.py109
-rw-r--r--numpy/core/tests/test_scalarbuffer.py2
-rw-r--r--numpy/core/tests/test_scalarinherit.py5
-rw-r--r--numpy/core/tests/test_scalarmath.py38
-rw-r--r--numpy/core/tests/test_scalarprint.py4
-rw-r--r--numpy/core/tests/test_shape_base.py208
-rw-r--r--numpy/core/tests/test_ufunc.py620
-rw-r--r--numpy/core/tests/test_umath.py323
-rw-r--r--numpy/core/tests/test_umath_accuracy.py54
-rw-r--r--numpy/core/tests/test_umath_complex.py3
-rw-r--r--numpy/core/umath.py4
-rw-r--r--numpy/ctypeslib.py279
-rw-r--r--numpy/distutils/__init__.py29
-rw-r--r--numpy/distutils/__version__.py6
-rw-r--r--numpy/distutils/_shell_utils.py91
-rw-r--r--numpy/distutils/ccompiler.py155
-rw-r--r--numpy/distutils/command/autodist.py128
-rw-r--r--numpy/distutils/command/build.py13
-rw-r--r--numpy/distutils/command/build_clib.py14
-rw-r--r--numpy/distutils/command/build_ext.py23
-rw-r--r--numpy/distutils/command/build_src.py85
-rw-r--r--numpy/distutils/command/config.py159
-rw-r--r--numpy/distutils/command/install.py19
-rw-r--r--numpy/distutils/command/install_clib.py3
-rw-r--r--numpy/distutils/conv_template.py34
-rw-r--r--numpy/distutils/cpuinfo.py28
-rw-r--r--numpy/distutils/exec_command.py65
-rw-r--r--numpy/distutils/extension.py18
-rw-r--r--numpy/distutils/fcompiler/__init__.py19
-rw-r--r--numpy/distutils/fcompiler/absoft.py2
-rw-r--r--numpy/distutils/fcompiler/compaq.py2
-rw-r--r--numpy/distutils/fcompiler/environment.py21
-rw-r--r--numpy/distutils/fcompiler/gnu.py15
-rw-r--r--numpy/distutils/fcompiler/ibm.py17
-rw-r--r--numpy/distutils/fcompiler/intel.py5
-rw-r--r--numpy/distutils/fcompiler/pg.py8
-rw-r--r--numpy/distutils/fcompiler/sun.py2
-rw-r--r--numpy/distutils/from_template.py28
-rw-r--r--numpy/distutils/info.py6
-rw-r--r--numpy/distutils/line_endings.py16
-rw-r--r--numpy/distutils/log.py2
-rw-r--r--numpy/distutils/mingw32ccompiler.py32
-rw-r--r--numpy/distutils/misc_util.py247
-rw-r--r--numpy/distutils/npy_pkg_config.py6
-rw-r--r--numpy/distutils/system_info.py713
-rw-r--r--numpy/distutils/tests/test_exec_command.py31
-rw-r--r--numpy/distutils/tests/test_fcompiler.py7
-rw-r--r--numpy/distutils/tests/test_misc_util.py3
-rw-r--r--numpy/distutils/tests/test_shell_utils.py79
-rw-r--r--numpy/distutils/tests/test_system_info.py34
-rw-r--r--numpy/doc/basics.py43
-rw-r--r--numpy/doc/broadcasting.py16
-rw-r--r--numpy/doc/byteswapping.py20
-rw-r--r--numpy/doc/constants.py307
-rw-r--r--numpy/doc/dispatch.py271
-rw-r--r--numpy/doc/glossary.py70
-rw-r--r--numpy/doc/indexing.py16
-rw-r--r--numpy/doc/structured_arrays.py240
-rw-r--r--numpy/doc/subclassing.py3
-rw-r--r--numpy/doc/ufuncs.py4
-rw-r--r--numpy/dual.py4
-rw-r--r--numpy/f2py/__init__.py23
-rw-r--r--numpy/f2py/__main__.py2
-rw-r--r--numpy/f2py/capi_maps.py5
-rw-r--r--numpy/f2py/cfuncs.py25
-rw-r--r--numpy/f2py/common_rules.py13
-rwxr-xr-xnumpy/f2py/crackfortran.py22
-rwxr-xr-xnumpy/f2py/f2py2e.py29
-rw-r--r--numpy/f2py/info.py6
-rwxr-xr-x[-rw-r--r--]numpy/f2py/rules.py149
-rw-r--r--numpy/f2py/setup.py2
-rw-r--r--numpy/f2py/src/fortranobject.c46
-rw-r--r--numpy/f2py/src/test/foomodule.c20
-rw-r--r--numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c139
-rw-r--r--numpy/f2py/tests/test_block_docstring.py4
-rw-r--r--numpy/f2py/tests/test_callback.py6
-rw-r--r--numpy/f2py/tests/test_compile_function.py21
-rw-r--r--numpy/f2py/tests/test_mixed.py7
-rw-r--r--numpy/f2py/tests/test_parameter.py1
-rw-r--r--numpy/f2py/tests/test_quoted_character.py4
-rw-r--r--numpy/f2py/tests/test_regression.py1
-rw-r--r--numpy/f2py/tests/util.py90
-rw-r--r--numpy/fft/README.md48
-rw-r--r--numpy/fft/__init__.py190
-rw-r--r--numpy/fft/_pocketfft.c2406
-rw-r--r--numpy/fft/_pocketfft.py (renamed from numpy/fft/fftpack.py)301
-rw-r--r--numpy/fft/fftpack.c1536
-rw-r--r--numpy/fft/fftpack.h28
-rw-r--r--numpy/fft/fftpack_litemodule.c366
-rw-r--r--numpy/fft/helper.py117
-rw-r--r--numpy/fft/info.py187
-rw-r--r--numpy/fft/setup.py6
-rw-r--r--numpy/fft/tests/test_fftpack.py185
-rw-r--r--numpy/fft/tests/test_helper.py79
-rw-r--r--numpy/fft/tests/test_pocketfft.py261
-rw-r--r--numpy/lib/__init__.py26
-rw-r--r--numpy/lib/_datasource.py53
-rw-r--r--numpy/lib/_iotools.py41
-rw-r--r--numpy/lib/_version.py5
-rw-r--r--numpy/lib/arraypad.py1557
-rw-r--r--numpy/lib/arraysetops.py114
-rw-r--r--numpy/lib/arrayterator.py7
-rw-r--r--numpy/lib/financial.py124
-rw-r--r--numpy/lib/format.py243
-rw-r--r--numpy/lib/function_base.py684
-rw-r--r--numpy/lib/histograms.py147
-rw-r--r--numpy/lib/index_tricks.py81
-rw-r--r--numpy/lib/info.py160
-rw-r--r--numpy/lib/mixins.py9
-rw-r--r--numpy/lib/nanfunctions.py237
-rw-r--r--numpy/lib/npyio.py784
-rw-r--r--numpy/lib/polynomial.py160
-rw-r--r--numpy/lib/recfunctions.py641
-rw-r--r--numpy/lib/scimath.py83
-rw-r--r--numpy/lib/shape_base.py282
-rw-r--r--numpy/lib/stride_tricks.py36
-rw-r--r--numpy/lib/tests/test__datasource.py15
-rw-r--r--numpy/lib/tests/test__iotools.py23
-rw-r--r--numpy/lib/tests/test_arraypad.py697
-rw-r--r--numpy/lib/tests/test_arraysetops.py37
-rw-r--r--numpy/lib/tests/test_financial.py6
-rw-r--r--numpy/lib/tests/test_format.py122
-rw-r--r--numpy/lib/tests/test_function_base.py349
-rw-r--r--numpy/lib/tests/test_histograms.py106
-rw-r--r--numpy/lib/tests/test_index_tricks.py128
-rw-r--r--numpy/lib/tests/test_io.py266
-rw-r--r--numpy/lib/tests/test_mixins.py11
-rw-r--r--numpy/lib/tests/test_nanfunctions.py28
-rw-r--r--numpy/lib/tests/test_packbits.py122
-rw-r--r--numpy/lib/tests/test_polynomial.py68
-rw-r--r--numpy/lib/tests/test_recfunctions.py157
-rw-r--r--numpy/lib/tests/test_regression.py6
-rw-r--r--numpy/lib/tests/test_shape_base.py33
-rw-r--r--numpy/lib/tests/test_stride_tricks.py43
-rw-r--r--numpy/lib/tests/test_twodim_base.py26
-rw-r--r--numpy/lib/tests/test_type_check.py40
-rw-r--r--numpy/lib/tests/test_utils.py78
-rw-r--r--numpy/lib/twodim_base.py111
-rw-r--r--numpy/lib/type_check.py170
-rw-r--r--numpy/lib/ufunclike.py54
-rw-r--r--numpy/lib/utils.py159
-rw-r--r--numpy/linalg/__init__.py110
-rw-r--r--numpy/linalg/info.py37
-rw-r--r--numpy/linalg/lapack_lite/clapack_scrub.py5
-rw-r--r--numpy/linalg/lapack_lite/fortran.py19
-rw-r--r--numpy/linalg/linalg.py384
-rw-r--r--numpy/linalg/tests/test_linalg.py135
-rw-r--r--numpy/linalg/umath_linalg.c.src2
-rw-r--r--numpy/ma/core.py1462
-rw-r--r--numpy/ma/extras.py279
-rw-r--r--numpy/ma/mrecords.py16
-rw-r--r--numpy/ma/tests/test_core.py268
-rw-r--r--numpy/ma/tests/test_extras.py124
-rw-r--r--numpy/ma/tests/test_mrecords.py17
-rw-r--r--numpy/ma/tests/test_old_ma.py38
-rw-r--r--numpy/ma/tests/test_regression.py11
-rw-r--r--numpy/ma/tests/test_subclassing.py4
-rw-r--r--numpy/ma/timer_comparison.py15
-rw-r--r--numpy/ma/version.py14
-rw-r--r--numpy/matlib.py52
-rw-r--r--numpy/matrixlib/defmatrix.py78
-rw-r--r--numpy/matrixlib/tests/test_defmatrix.py2
-rw-r--r--numpy/matrixlib/tests/test_masked_matrix.py8
-rw-r--r--numpy/matrixlib/tests/test_matrix_linalg.py2
-rw-r--r--numpy/matrixlib/tests/test_multiarray.py2
-rw-r--r--numpy/matrixlib/tests/test_numeric.py2
-rw-r--r--numpy/matrixlib/tests/test_regression.py2
-rw-r--r--numpy/polynomial/_polybase.py77
-rw-r--r--numpy/polynomial/chebyshev.py272
-rw-r--r--numpy/polynomial/hermite.py300
-rw-r--r--numpy/polynomial/hermite_e.py297
-rw-r--r--numpy/polynomial/laguerre.py279
-rw-r--r--numpy/polynomial/legendre.py291
-rw-r--r--numpy/polynomial/polynomial.py296
-rw-r--r--numpy/polynomial/polyutils.py378
-rw-r--r--numpy/polynomial/tests/test_chebyshev.py6
-rw-r--r--numpy/polynomial/tests/test_classes.py13
-rw-r--r--numpy/polynomial/tests/test_hermite.py6
-rw-r--r--numpy/polynomial/tests/test_hermite_e.py6
-rw-r--r--numpy/polynomial/tests/test_laguerre.py6
-rw-r--r--numpy/polynomial/tests/test_legendre.py6
-rw-r--r--numpy/polynomial/tests/test_polynomial.py23
-rw-r--r--numpy/random/LICENSE.md71
-rw-r--r--numpy/random/__init__.py112
-rw-r--r--numpy/random/_pickle.py82
-rw-r--r--numpy/random/bit_generator.pxd26
-rw-r--r--numpy/random/bit_generator.pyx629
-rw-r--r--numpy/random/bounded_integers.pxd.in26
-rw-r--r--numpy/random/bounded_integers.pyx.in305
-rw-r--r--numpy/random/common.pxd114
-rw-r--r--numpy/random/common.pyx976
-rw-r--r--numpy/random/distributions.pxd140
-rw-r--r--numpy/random/examples/cython/extending.pyx78
-rw-r--r--numpy/random/examples/cython/extending_distributions.pyx59
-rw-r--r--numpy/random/examples/cython/setup.py27
-rw-r--r--numpy/random/examples/numba/extending.py77
-rw-r--r--numpy/random/examples/numba/extending_distributions.py61
-rw-r--r--numpy/random/generator.pyx4030
-rw-r--r--numpy/random/info.py5
-rw-r--r--numpy/random/legacy_distributions.pxd50
-rw-r--r--numpy/random/mt19937.pyx274
-rw-r--r--numpy/random/mtrand.pyx (renamed from numpy/random/mtrand/mtrand.pyx)2869
-rw-r--r--numpy/random/mtrand/Python.pxi43
-rw-r--r--numpy/random/mtrand/distributions.c926
-rw-r--r--numpy/random/mtrand/distributions.h185
-rw-r--r--numpy/random/mtrand/generate_mtrand_c.py42
-rw-r--r--numpy/random/mtrand/initarray.c152
-rw-r--r--numpy/random/mtrand/initarray.h8
-rw-r--r--numpy/random/mtrand/mtrand_py_helper.h23
-rw-r--r--numpy/random/mtrand/numpy.pxd163
-rw-r--r--numpy/random/mtrand/randint_helpers.pxi.in77
-rw-r--r--numpy/random/mtrand/randomkit.c626
-rw-r--r--numpy/random/pcg64.pyx270
-rw-r--r--numpy/random/philox.pyx336
-rw-r--r--numpy/random/setup.py132
-rw-r--r--numpy/random/sfc64.pyx144
-rw-r--r--numpy/random/src/aligned_malloc/aligned_malloc.c9
-rw-r--r--numpy/random/src/aligned_malloc/aligned_malloc.h54
-rw-r--r--numpy/random/src/bitgen.h20
-rw-r--r--numpy/random/src/distributions/LICENSE.md61
-rw-r--r--numpy/random/src/distributions/distributions.c1782
-rw-r--r--numpy/random/src/distributions/distributions.h214
-rw-r--r--numpy/random/src/distributions/logfactorial.c158
-rw-r--r--numpy/random/src/distributions/logfactorial.h9
-rw-r--r--numpy/random/src/distributions/random_hypergeometric.c260
-rw-r--r--numpy/random/src/distributions/ziggurat_constants.h1206
-rw-r--r--numpy/random/src/legacy/legacy-distributions.c392
-rw-r--r--numpy/random/src/legacy/legacy-distributions.h49
-rw-r--r--numpy/random/src/mt19937/LICENSE.md61
-rw-r--r--numpy/random/src/mt19937/mt19937-benchmark.c31
-rw-r--r--numpy/random/src/mt19937/mt19937-jump.c224
-rw-r--r--numpy/random/src/mt19937/mt19937-jump.h15
-rw-r--r--numpy/random/src/mt19937/mt19937-poly.h207
-rw-r--r--numpy/random/src/mt19937/mt19937-test-data-gen.c59
-rw-r--r--numpy/random/src/mt19937/mt19937.c107
-rw-r--r--numpy/random/src/mt19937/mt19937.h61
-rw-r--r--numpy/random/src/mt19937/randomkit.c578
-rw-r--r--numpy/random/src/mt19937/randomkit.h (renamed from numpy/random/mtrand/randomkit.h)75
-rw-r--r--numpy/random/src/pcg64/LICENSE.md22
-rw-r--r--numpy/random/src/pcg64/pcg64-benchmark.c42
-rw-r--r--numpy/random/src/pcg64/pcg64-test-data-gen.c73
-rw-r--r--numpy/random/src/pcg64/pcg64.c187
-rw-r--r--numpy/random/src/pcg64/pcg64.h294
-rw-r--r--numpy/random/src/pcg64/pcg64.orig.c11
-rw-r--r--numpy/random/src/pcg64/pcg64.orig.h2025
-rw-r--r--numpy/random/src/philox/LICENSE.md31
-rw-r--r--numpy/random/src/philox/philox-benchmark.c38
-rw-r--r--numpy/random/src/philox/philox-test-data-gen.c82
-rw-r--r--numpy/random/src/philox/philox.c29
-rw-r--r--numpy/random/src/philox/philox.h248
-rw-r--r--numpy/random/src/sfc64/LICENSE.md27
-rw-r--r--numpy/random/src/sfc64/sfc64.c39
-rw-r--r--numpy/random/src/sfc64/sfc64.h60
-rw-r--r--numpy/random/src/splitmix64/LICENSE.md9
-rw-r--r--numpy/random/src/splitmix64/splitmix64.c29
-rw-r--r--numpy/random/src/splitmix64/splitmix64.h30
-rw-r--r--numpy/random/src/splitmix64/splitmix64.orig.c28
-rw-r--r--numpy/random/tests/data/__init__.py (renamed from numpy/core/_aliased_types.py)0
-rw-r--r--numpy/random/tests/data/mt19937-testset-1.csv1001
-rw-r--r--numpy/random/tests/data/mt19937-testset-2.csv1001
-rw-r--r--numpy/random/tests/data/pcg64-testset-1.csv1001
-rw-r--r--numpy/random/tests/data/pcg64-testset-2.csv1001
-rw-r--r--numpy/random/tests/data/philox-testset-1.csv1001
-rw-r--r--numpy/random/tests/data/philox-testset-2.csv1001
-rw-r--r--numpy/random/tests/data/sfc64-testset-1.csv1001
-rw-r--r--numpy/random/tests/data/sfc64-testset-2.csv1001
-rw-r--r--numpy/random/tests/test_direct.py418
-rw-r--r--numpy/random/tests/test_generator_mt19937.py2085
-rw-r--r--numpy/random/tests/test_generator_mt19937_regressions.py158
-rw-r--r--numpy/random/tests/test_random.py61
-rw-r--r--numpy/random/tests/test_randomstate.py1966
-rw-r--r--numpy/random/tests/test_randomstate_regression.py210
-rw-r--r--numpy/random/tests/test_regression.py3
-rw-r--r--numpy/random/tests/test_seed_sequence.py54
-rw-r--r--numpy/random/tests/test_smoke.py808
-rw-r--r--numpy/testing/_private/nosetester.py2
-rw-r--r--numpy/testing/_private/parameterized.py21
-rw-r--r--numpy/testing/_private/utils.py319
-rw-r--r--numpy/testing/decorators.py15
-rw-r--r--numpy/testing/noseclasses.py14
-rw-r--r--numpy/testing/nosetester.py19
-rwxr-xr-xnumpy/testing/print_coercion_tables.py40
-rw-r--r--numpy/testing/tests/test_decorators.py4
-rw-r--r--numpy/testing/tests/test_utils.py190
-rw-r--r--numpy/testing/utils.py7
-rw-r--r--numpy/tests/test_ctypeslib.py191
-rw-r--r--numpy/tests/test_public_api.py498
-rw-r--r--numpy/tests/test_reloading.py8
-rw-r--r--numpy/tests/test_scripts.py90
-rw-r--r--numpy/tests/test_warnings.py2
-rw-r--r--pavement.py612
-rw-r--r--pyproject.toml71
-rw-r--r--pytest.ini2
-rwxr-xr-xruntests.py29
-rwxr-xr-xsetup.py88
-rw-r--r--shippable.yml24
-rw-r--r--site.cfg.example74
-rw-r--r--test_requirements.txt7
-rwxr-xr-xtools/changelog.py6
-rw-r--r--tools/ci/appveyor/requirements.txt6
-rwxr-xr-xtools/ci/test_all_newsfragments_used.py16
-rwxr-xr-xtools/cythonize.py112
-rw-r--r--tools/npy_tempita/__init__.py56
-rw-r--r--tools/npy_tempita/_looper.py20
-rw-r--r--tools/npy_tempita/compat3.py2
-rw-r--r--tools/openblas_support.py229
-rwxr-xr-xtools/pypy-test.sh48
-rw-r--r--tools/refguide_check.py958
-rwxr-xr-xtools/swig/test/testFarray.py2
-rwxr-xr-xtools/swig/test/testFlat.py6
-rw-r--r--tools/swig/test/testFortran.py4
-rwxr-xr-xtools/swig/test/testMatrix.py12
-rw-r--r--tools/swig/test/testSuperTensor.py10
-rwxr-xr-xtools/swig/test/testTensor.py12
-rwxr-xr-xtools/swig/test/testVector.py16
-rw-r--r--tools/test-installed-numpy.py62
-rwxr-xr-xtools/travis-before-install.sh16
-rwxr-xr-xtools/travis-test.sh92
-rw-r--r--tox.ini23
756 files changed, 84114 insertions, 27950 deletions
diff --git a/.appveyor.yml b/.appveyor.yml
deleted file mode 100644
index 01440c6a0..000000000
--- a/.appveyor.yml
+++ /dev/null
@@ -1,159 +0,0 @@
-# As config was originally based on an example by Olivier Grisel. Thanks!
-# https://github.com/ogrisel/python-appveyor-demo/blob/master/appveyor.yml
-clone_depth: 50
-
-# No reason for us to restrict the number concurrent jobs
-max_jobs: 100
-
-cache:
- - '%LOCALAPPDATA%\pip\Cache'
-
-environment:
- global:
- MINGW_32: C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin
- MINGW_64: C:\mingw-w64\x86_64-6.3.0-posix-seh-rt_v5-rev1\mingw64\bin
- OPENBLAS_32: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win32.zip
- OPENBLAS_64: https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com/openblas-5f998ef_gcc7_1_0_win64.zip
- APPVEYOR_SAVE_CACHE_ON_ERROR: true
- APPVEYOR_SKIP_FINALIZE_ON_EXIT: true
- TEST_TIMEOUT: 1000
- NPY_NUM_BUILD_JOBS: 4
-
- matrix:
- - PYTHON: C:\Python36
- PYTHON_VERSION: 3.6
- PYTHON_ARCH: 32
- TEST_MODE: fast
-
- - PYTHON: C:\Python37
- PYTHON_VERSION: 3.7
- PYTHON_ARCH: 32
- TEST_MODE: fast
-
- - PYTHON: C:\Python27-x64
- PYTHON_VERSION: 2.7
- PYTHON_ARCH: 64
- TEST_MODE: fast
-
- - PYTHON: C:\Python36-x64
- PYTHON_VERSION: 3.6
- PYTHON_ARCH: 64
- TEST_MODE: full
- INSTALL_PICKLE5: 1
-
- - PYTHON: C:\Python37-x64
- PYTHON_VERSION: 3.7
- PYTHON_ARCH: 64
- TEST_MODE: full
- INSTALL_PICKLE5: 1
-
-init:
- - "ECHO %PYTHON% %PYTHON_VERSION% %PYTHON_ARCH%"
- - "ECHO \"%APPVEYOR_SCHEDULED_BUILD%\""
- # If there is a newer build queued for the same PR, cancel this one.
- # The AppVeyor 'rollout builds' option is supposed to serve the same
- # purpose but it is problematic because it tends to cancel builds pushed
- # directly to master instead of just PR builds (or the converse).
- # credits: JuliaLang developers.
- - ps: if ($env:APPVEYOR_PULL_REQUEST_NUMBER -and $env:APPVEYOR_BUILD_NUMBER -ne ((Invoke-RestMethod `
- https://ci.appveyor.com/api/projects/$env:APPVEYOR_ACCOUNT_NAME/$env:APPVEYOR_PROJECT_SLUG/history?recordsNumber=50).builds | `
- Where-Object pullRequestId -eq $env:APPVEYOR_PULL_REQUEST_NUMBER)[0].buildNumber) { `
- raise "There are newer queued builds for this pull request, skipping build."
- }
-
-install:
- # Prepend newly installed Python to the PATH of this build (this cannot be
- # done from inside the powershell script as it would require to restart
- # the parent CMD process).
- - SET PATH=%PYTHON%;%PYTHON%\Scripts;%PATH%
- - if [%PYTHON_ARCH%]==[32] SET PATH=%MINGW_32%;%PATH% & SET OPENBLAS=%OPENBLAS_32%
- - if [%PYTHON_ARCH%]==[64] SET PATH=%MINGW_64%;%PATH% & SET OPENBLAS=%OPENBLAS_64%
-
- # Check that we have the expected version and architecture for Python
- - python --version
- - >-
- %CMD_IN_ENV%
- python -c "import sys,platform,struct;
- print(sys.platform, platform.machine(), struct.calcsize('P') * 8, )"
-
- # Install "openblas.a" to PYTHON\lib
- # Library provided by Matthew Brett at https://github.com/matthew-brett/build-openblas
- - ps: |
- $clnt = new-object System.Net.WebClient
- $file = "$(New-TemporaryFile).zip"
- $tmpdir = New-TemporaryFile | %{ rm $_; mkdir $_ }
- $destination = "$env:PYTHON\lib\openblas.a"
-
- echo $file
- echo $tmpdir
- echo $env:OPENBLAS
-
- $clnt.DownloadFile($env:OPENBLAS, $file)
- Get-FileHash $file | Format-List
-
- Expand-Archive $file $tmpdir
-
- rm $tmpdir\$env:PYTHON_ARCH\lib\*.dll.a
- $lib = ls $tmpdir\$env:PYTHON_ARCH\lib\*.a | ForEach { ls $_ } | Select-Object -first 1
- echo $lib
-
- cp $lib $destination
- ls $destination
-
- # Upgrade to the latest pip.
- - 'python -m pip install -U pip setuptools wheel'
-
- - if [%INSTALL_PICKLE5%]==[1] echo pickle5 >> tools/ci/appveyor/requirements.txt
-
- # Install the numpy test dependencies.
- - 'pip install -U --timeout 5 --retries 2 -r tools/ci/appveyor/requirements.txt'
-
-build_script:
- # Here, we add MinGW to the path to be able to link an OpenBLAS.dll
- # We then use the import library from the DLL to compile with MSVC
- - ps: |
- pip wheel -v -v -v --wheel-dir=dist .
-
- # For each wheel that pip has placed in the "dist" directory
- # First, upload the wheel to the "artifacts" tab and then
- # install the wheel. If we have only built numpy (as is the case here),
- # then there will be one wheel to install.
-
- # This method is more representative of what will be distributed,
- # because it actually tests what the built wheels will be rather than
- # what 'setup.py install' will do and at it uploads the wheels so that
- # they can be inspected.
-
- ls dist -r | Foreach-Object {
- Push-AppveyorArtifact $_.FullName
- pip install $_.FullName
- }
-
-test_script:
- python runtests.py -v -n -m %TEST_MODE% -- --junitxml=%cd%\junit-results.xml
-
-after_build:
- # Remove old or huge cache files to hopefully not exceed the 1GB cache limit.
- #
- # If the cache limit is reached, the cache will not be updated (of not even
- # created in the first run). So this is a trade of between keeping the cache
- # current and having a cache at all.
- # NB: This is done only `on_success` since the cache in uploaded only on
- # success anyway.
- - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -mtime +360 -delete
- - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -type f -size +10M -delete
- - C:\cygwin\bin\find "%LOCALAPPDATA%\pip" -empty -delete
- # Show size of cache
- - C:\cygwin\bin\du -hs "%LOCALAPPDATA%\pip\Cache"
-
-on_finish:
- # We can get a nice display of test results in the "test" tab with py.test
- # For now, this does nothing.
- - ps: |
- If (Test-Path .\junit-results.xml) {
- (new-object net.webclient).UploadFile(
- "https://ci.appveyor.com/api/testresults/junit/$($env:APPVEYOR_JOB_ID)",
- (Resolve-Path .\junit-results.xml)
- )
- }
- $LastExitCode = 0
diff --git a/.circleci/config.yml b/.circleci/config.yml
index 9e227ab35..772c3fbfd 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -19,8 +19,9 @@ jobs:
name: install dependencies
command: |
python3 -m venv venv
+ ln -s $(which python3) venv/bin/python3.6
. venv/bin/activate
- pip install cython sphinx==1.7.9 matplotlib
+ pip install cython sphinx==2.2.0 matplotlib ipython
sudo apt-get update
sudo apt-get install -y graphviz texlive-fonts-recommended texlive-latex-recommended texlive-latex-extra texlive-generic-extra latexmk texlive-xetex
@@ -34,23 +35,30 @@ jobs:
pip install scipy
- run:
+ name: create release notes
+ command: |
+ . venv/bin/activate
+ pip install git+https://github.com/hawkowl/towncrier.git@master
+ VERSION=$(python -c "import setup; print(setup.VERSION)")
+ towncrier --version $VERSION --yes
+ ./tools/ci/test_all_newsfragments_used.py
+ - run:
name: build devdocs
command: |
. venv/bin/activate
cd doc
git submodule update --init
- make html
+ SPHINXOPTS=-q make -e html
- run:
name: build neps
command: |
. venv/bin/activate
cd doc/neps
- make html
+ SPHINXOPTS=-q make -e html
- # - store_artifacts:
- # path: doc/build/html/
- # destination: devdocs
+ - store_artifacts:
+ path: doc/build/html/
# - store_artifacts:
diff --git a/.codecov.yml b/.codecov.yml
index cb3ee230b..d92d54c9d 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -1,29 +1,13 @@
codecov:
- ci:
- # we don't require appveyor or
- # circleCI to pass to report
- # coverage, which currently only
- # comes from a single Python 3.6 job
- # in Travis
- - !appveyor
- - !circle
notify:
- # don't require all travis builds to pass;
- # as long as the coverage job succeeds it
- # can report the % coverage, even if another
- # job needs a restart for whatever reason
- - require_ci_to_pass: no
- # we should only require a single build before
- # reporting the % coverage because there's only
- # one coverage job in Travis
- - after_n_builds: 1
+ require_ci_to_pass: no
+ after_n_builds: 1
coverage:
status:
project:
default:
# Require 1% coverage, i.e., always succeed
target: 1
+ patch: false
+ changes: false
comment: off
-
-ignore:
- - "**/setup.py"
diff --git a/.coveragerc b/.coveragerc
index 1f61c25a4..9048b9cc4 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,3 +1,4 @@
[run]
branch = True
include = */numpy/*
+disable_warnings = include-ignored
diff --git a/.ctags.d b/.ctags.d
new file mode 100644
index 000000000..60f7d6c65
--- /dev/null
+++ b/.ctags.d
@@ -0,0 +1 @@
+--langmaps=c:+.src
diff --git a/.dependabot/config.yml b/.dependabot/config.yml
new file mode 100644
index 000000000..160ec85cf
--- /dev/null
+++ b/.dependabot/config.yml
@@ -0,0 +1,9 @@
+version: 1
+update_configs:
+ - package_manager: "python"
+ directory: "/"
+ update_schedule: "weekly"
+ commit_message:
+ prefix: "MAINT"
+ default_labels:
+ - "03 - Maintenance"
diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md
index 01d9a537e..22113b913 100644
--- a/.github/CONTRIBUTING.md
+++ b/.github/CONTRIBUTING.md
@@ -16,12 +16,12 @@ Thanks for your interest in contributing code to numpy!
+ If this is your first time contributing to a project on GitHub, please read
through our
-[guide to contributing to numpy](https://docs.scipy.org/doc/numpy/dev/index.html)
+[guide to contributing to numpy](https://numpy.org/devdocs/dev/index.html)
+ If you have contributed to other projects on GitHub you can go straight to our
-[development workflow](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html)
+[development workflow](https://numpy.org/devdocs/dev/development_workflow.html)
Either way, please be sure to follow our
-[convention for commit messages](https://docs.scipy.org/doc/numpy/dev/gitwash/development_workflow.html#writing-the-commit-message).
+[convention for commit messages](https://numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message).
If you are writing new C code, please follow the style described in
``doc/C_STYLE_GUIDE``.
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 000000000..4c868d735
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,2 @@
+tidelift: pypi/numpy
+custom: https://www.numpy.org/#support-numpy
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index b6da4b772..e12eea7bd 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -1,7 +1,7 @@
<!-- Please be sure you are following the instructions in the dev guidelines
-http://www.numpy.org/devdocs/dev/gitwash/development_workflow.html
+http://www.numpy.org/devdocs/dev/development_workflow.html
-->
<!-- We'd appreciate it if your commit message is properly formatted
-http://www.numpy.org/devdocs/dev/gitwash/development_workflow.html#writing-the-commit-message
+http://www.numpy.org/devdocs/dev/development_workflow.html#writing-the-commit-message
-->
diff --git a/.gitignore b/.gitignore
index 0a1e1909f..2ad02b560 100644
--- a/.gitignore
+++ b/.gitignore
@@ -124,6 +124,10 @@ numpy/core/include/numpy/config.h
numpy/core/include/numpy/multiarray_api.txt
numpy/core/include/numpy/ufunc_api.txt
numpy/core/lib/
+numpy/core/src/common/npy_binsearch.h
+numpy/core/src/common/npy_partition.h
+numpy/core/src/common/npy_sort.h
+numpy/core/src/common/templ_common.h
numpy/core/src/multiarray/_multiarray_tests.c
numpy/core/src/multiarray/arraytypes.c
numpy/core/src/multiarray/einsum.c
@@ -138,7 +142,9 @@ numpy/core/src/npysort/binsearch.c
numpy/core/src/npysort/heapsort.c
numpy/core/src/npysort/mergesort.c
numpy/core/src/npysort/quicksort.c
+numpy/core/src/npysort/radixsort.c
numpy/core/src/npysort/selection.c
+numpy/core/src/npysort/timsort.c
numpy/core/src/npysort/sort.c
numpy/core/src/private/npy_binsearch.h
numpy/core/src/private/npy_partition.h
@@ -149,7 +155,9 @@ numpy/core/src/umath/_struct_ufunc_tests.c
numpy/core/src/umath/_umath_tests.c
numpy/core/src/umath/scalarmath.c
numpy/core/src/umath/funcs.inc
+numpy/core/src/umath/clip.[ch]
numpy/core/src/umath/loops.[ch]
+numpy/core/src/umath/matmul.[ch]
numpy/core/src/umath/operand_flag_tests.c
numpy/core/src/umath/simd.inc
numpy/core/src/umath/struct_ufunc_test.c
@@ -157,12 +165,16 @@ numpy/core/src/umath/test_rational.c
numpy/core/src/umath/umath_tests.c
numpy/distutils/__config__.py
numpy/linalg/umath_linalg.c
-doc/source/reference/generated
+doc/source/**/generated/
benchmarks/results
benchmarks/html
benchmarks/env
benchmarks/numpy
# cythonized files
cythonize.dat
-numpy/random/mtrand/mtrand.c
-numpy/random/mtrand/randint_helpers.pxi
+numpy/random/_mtrand/_mtrand.c
+numpy/random/*.c
+numpy/random/legacy/*.c
+numpy/random/_mtrand/randint_helpers.pxi
+numpy/random/bounded_integers.pyx
+numpy/random/bounded_integers.pxd
diff --git a/.lgtm.yml b/.lgtm.yml
index c0a9cf59a..cc16544a3 100644
--- a/.lgtm.yml
+++ b/.lgtm.yml
@@ -7,3 +7,18 @@ path_classifiers:
# "undefined export" alerts
- numpy/random/__init__.py
+extraction:
+ python:
+ python_setup:
+ requirements:
+ - cython>=0.29
+ cpp:
+ index:
+ build_command:
+ - python3 setup.py build
+ after_prepare:
+ - pip3 install --upgrade --user cython
+ - export PATH="$HOME/.local/bin:$PATH"
+
+queries:
+ - include: py/file-not-closed
diff --git a/.mailmap b/.mailmap
index 6464e4b24..9d7aaa3c4 100644
--- a/.mailmap
+++ b/.mailmap
@@ -9,6 +9,8 @@
# gives no duplicates.
Aaron Baecker <abaecker@localhost> abaecker <abaecker@localhost>
+Alan Fontenot <logeaux@yahoo.com> logeaux <logeaux@yahoo.com>
+Alan Fontenot <logeaux@yahoo.com> logeaux <36168460+logeaux@users.noreply.github.com>
Abdul Muneer <abdulmuneer@gmail.com> abdulmuneer <abdulmuneer@gmail.com>
Adam Ginsburg <adam.g.ginsburg@gmail.com> Adam Ginsburg <keflavich@gmail.com>
Albert Jornet Puig <albert.jornet@ic3.cat> jurnix <albert.jornet@ic3.cat>
@@ -19,12 +21,17 @@ Alex Griffing <argriffi@ncsu.edu> argriffing <argriffing@users.noreply.github.co
Alex Thomas <alexthomas93@users.noreply.github.com> alexthomas93 <alexthomas93@users.noreply.github.com>
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
Alexander Belopolsky <abalkin@enlnt.com> Alexander Belopolsky <a@enlnt.com>
+Alexander Belopolsky <abalkin@enlnt.com> sasha <sasha@localhost>
+Alexander Jung <kontakt@ajung.name> aleju <kontakt@ajung.name>
Alexander Shadchin <alexandr.shadchin@gmail.com> Alexandr Shadchin <alexandr.shadchin@gmail.com>
Alexander Shadchin <alexandr.shadchin@gmail.com> shadchin <alexandr.shadchin@gmail.com>
Allan Haldane <allan.haldane@gmail.com> ahaldane <ealloc@gmail.com>
Alok Singhal <gandalf013@gmail.com> Alok Singhal <alok@merfinllc.com>
+Alyssa Quek <alyssaquek@gmail.com> alyssaq <alyssaquek@gmail.com>
Amir Sarabadani <ladsgroup@gmail.com> amir <ladsgroup@gmail.com>
Anatoly Techtonik <techtonik@gmail.com> anatoly techtonik <techtonik@gmail.com>
+Andras Deak <deak.andris@gmail.com> adeak <adeak@users.noreply.github.com>
+Andrea Pattori <andrea.pattori@gmail.com> patto90 <andrea.pattori@gmail.com>
Andrei Kucharavy <ank@andreikucharavy.com> chiffa <ank@andreikucharavy.com>
Anne Archibald <peridot.faceted@gmail.com> aarchiba <peridot.faceted@gmail.com>
Anne Archibald <peridot.faceted@gmail.com> Anne Archibald <archibald@astron.nl>
@@ -38,8 +45,12 @@ Badhri Narayanan Krishnakumar <badhrinarayanan.k@gmail.com> badhrink <badhrinara
Behzad Nouri <behzadnouri@gmail.com> behzad nouri <behzadnouri@gmail.com>
Benjamin Root <ben.v.root@gmail.com> Ben Root <ben.v.root@gmail.com>
Benjamin Root <ben.v.root@gmail.com> weathergod <?@?>
+Bernardt Duvenhage <bernardt.duvenhage@gmail.com> bduvenhage <bernardt.duvenhage@gmail.com>
Bertrand Lefebvre <bertrand.l3f@gmail.com> bertrand <bertrand.l3f@gmail.com>
Bertrand Lefebvre <bertrand.l3f@gmail.com> Bertrand <bertrand.l3f@gmail.com>
+Bharat Raghunathan <bharatr@symphonyai.com> Bharat123Rox <bharatr@symphonyai.com>
+Bill Spotz <wfspotz@sandia.gov> William Spotz <wfspotz@sandia.gov@localhost>
+Bill Spotz <wfspotz@sandia.gov> wfspotz@sandia.gov <wfspotz@sandia.gov@localhost>
Bob Eldering <eldering@jive.eu> bobeldering <eldering@jive.eu>
Brett R Murphy <bmurphy@enthought.com> brettrmurphy <bmurphy@enthought.com>
Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@Laptop-3.local>
@@ -47,12 +58,15 @@ Bryan Van de Ven <bryanv@continuum.io> Bryan Van de Ven <bryan@laptop.local>
Carl Kleffner <cmkleffner@gmail.com> carlkl <cmkleffner@gmail.com>
Chris Burns <chris.burns@localhost> chris.burns <chris.burns@localhost>
Chris Kerr <debdepba@dasganma.tk> Chris Kerr <cjk34@cam.ac.uk>
+Christian Clauss <cclauss@bluewin.ch> cclauss <cclauss@bluewin.ch>
Christopher Hanley <chanley@gmail.com> chanley <chanley@gmail.com>
Christoph Gohlke <cgohlke@uci.edu> cgholke <?@?>
Christoph Gohlke <cgohlke@uci.edu> cgohlke <cgohlke@uci.edu>
Christoph Gohlke <cgohlke@uci.edu> Christolph Gohlke <cgohlke@uci.edu>
+Daniel B Allan <daniel.b.allan@gmail.com> danielballan <daniel.b.allan@gmail.com>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <daniel@meltingwax.net>
Daniel da Silva <mail@danieldasilva.org> Daniel da Silva <var.mail.daniel@gmail.com>
+Daniel Hrisca <daniel.hrisca@gmail.com> danielhrisca <daniel.hrisca@gmail.com>
Daniel J Farrell <danieljfarrel@me.com> danieljfarrell <danieljfarrel@me.com>
Daniel Müllner <Daniel Müllner muellner@math.stanford.edu> Daniel <muellner@localhost.localdomain>
Daniel Müllner <Daniel Müllner muellner@math.stanford.edu> dmuellner <Daniel Müllner muellner@math.stanford.edu>
@@ -61,11 +75,14 @@ David Huard <david.huard@gmail.com> dhuard <dhuard@localhost>
David M Cooke <cookedm@localhost> cookedm <cookedm@localhost>
David Nicholson <davidjn@google.com> davidjn <dnic12345@gmail.com>
David Ochoa <ochoadavid@gmail.com> ochoadavid <ochoadavid@gmail.com>
+Dawid Zych <dawid.zych@yandex.com> silenc3r <dawid.zych@yandex.com>
+Dennis Zollo <dzollo@swift-nav.com> denniszollo <dzollo@swift-nav.com>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeier <dhomeie@gwdg.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homeir <derek@astro.phsik.uni-goettingen.de>
Derek Homeier <derek@astro.physik.uni-goettingen.de> Derek Homier <derek@astro.physik.uni-goettingen.de>
Derrick Williams <myutat@gmail.com> derrick <myutat@gmail.com>
Dmitriy Shalyga <zuko3d@gmail.com> zuko3d <zuko3d@gmail.com>
+Ed Schofield <edschofield@localhost> edschofield <edschofield@localhost>
Egor Zindy <ezindy@gmail.com> zindy <ezindy@gmail.com>
Endolith <endolith@gmail.com>
Eric Fode <ericfode@gmail.com> Eric Fode <ericfode@linuxlaptop.(none)>
@@ -97,28 +114,44 @@ Irvin Probst <irvin.probst@ensta-bretagne.fr> I--P <irvin.probst@ensta-bretagne.
Jaime Fernandez <jaime.frio@gmail.com> Jaime Fernandez <jaime.fernandez@hp.com>
Jaime Fernandez <jaime.frio@gmail.com> jaimefrio <jaime.frio@gmail.com>
Jaime Fernandez <jaime.frio@gmail.com> Jaime <jaime.frio@gmail.com>
+James Webber <jamestwebber@gmail.com> jamestwebber <jamestwebber@gmail.com>
Jarrod Millman <millman@berkeley.edu> Jarrod Millman <jarrod.millman@gmail.com>
Jason Grout <jason-github@creativetrax.com> Jason Grout <jason.grout@drake.edu>
Jason King <pizza@netspace.net.au> jason king <pizza@netspace.net.au>
Jay Bourque <jay.bourque@continuum.io> jayvius <jay.bourque@continuum.io>
Jean Utke <jutke@allstate.com> jutke <jutke@allstate.com>
+Jeffrey Yancey <jeffrey@octane5.com> Jeff <3820914+jeffyancey@users.noreply.github.com>
+Jeremy Lay <jlay80@gmail.com> jeremycl01 <jlay80@gmail.com>
+Jérémie du Boisberranger <jeremie.du-boisberranger@inria.fr> jeremiedbb <34657725+jeremiedbb@users.noreply.github.com>
Jerome Kelleher <jerome.kelleher@ed.ac.uk> jeromekelleher <jerome.kelleher@ed.ac.uk>
+Johannes Hampp <johannes.hampp@zeu.uni-giessen.de> euronion <42553970+euronion@users.noreply.github.com>
Johannes Schönberger <hannesschoenberger@gmail.com> Johannes Schönberger <jschoenberger@demuc.de>
+John Darbyshire <24256554+attack68@users.noreply.github.com> attack68 <24256554+attack68@users.noreply.github.com>
Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <joseph.r.fox-rabinovitz@nasa.gov>
-Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Mad Physicist <madphysicist@users.noreply.github.com>
+Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Joseph Fox-Rabinovitz <madphysicist@users.noreply.github.com>
+Joseph Fox-Rabinovitz <jfoxrabinovitz@gmail.com> Mad Physicist <madphysicist@users.noreply.github.com>
Joseph Martinot-Lagarde <contrebasse@gmail.com> Joseph Martinot-Lagarde <joseph.martinot-lagarde@onera.fr>
Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <jtaylor.debian@googlemail.com>
Julian Taylor <juliantaylor108@gmail.com> Julian Taylor <juliantaylor108@googlemail.com>
Julien Lhermitte <jrmlhermitte@gmail.com> Julien Lhermitte <lhermitte@bnl.gov>
Julien Schueller <julien.schueller@gmail.com> jschueller <julien.schueller@gmail.com>
+Kai Striega <kaistriega@gmail.com> kai <kaistriega@gmail.com>
+Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega@gmail.com>
+Kai Striega <kaistriega@gmail.com> kai-striega <kaistriega+github@gmail.com>
Khaled Ben Abdallah Okuda <khaled.ben.okuda@gmail.com> KhaledTo <khaled.ben.okuda@gmail.com>
+Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@gmail.com>
+Kiko Correoso <kachine@protonmail.com> kikocorreoso <kikocorreoso@users.noreply.github.com>
Konrad Kapp <k_kapp@yahoo.com> k_kapp@yahoo.com <k_kapp@yahoo.com>
+Kriti Singh <kritisingh1.ks@gmail.com> kritisingh1 <kritisingh1.ks@gmail.com>
+Kmol Yuan <pyslvs@gmail.com> Yuan <pyslvs@gmail.com>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <l.buitinck@esciencecenter.nl>
Lars Buitinck <larsmans@gmail.com> Lars Buitinck <L.J.Buitinck@uva.nl>
+Lars Grüter <lagru@mailbox.org> Lars G <lagru@mailbox.org>
Luis Pedro Coelho <luis@luispedro.org> Luis Pedro Coelho <lpc@cmu.edu>
Luke Zoltan Kelley <lkelley@cfa.harvard.edu> lzkelley <lkelley@cfa.harvard.edu>
Manoj Kumar <manojkumarsivaraj334@gmail.com> MechCoder <manojkumarsivaraj334@gmail.com>
Mark DePristo <mdepristo@synapdx.com> markdepristo <mdepristo@synapdx.com>
+Mark Weissman <mw9050@gmail.com> m-d-w <mw9050@gmail.com>
Mark Wiebe <mwwiebe@gmail.com> Mark <mwwiebe@gmail.com>
Mark Wiebe <mwwiebe@gmail.com> Mark Wiebe <mwiebe@continuum.io>
Mark Wiebe <mwwiebe@gmail.com> Mark Wiebe <mwiebe@enthought.com>
@@ -134,6 +167,7 @@ Michael Droettboom <mdboom@gmail.com> mdroe <mdroe@localhost>
Michael K. Tran <trankmichael@gmail.com> mtran <trankmichael@gmail.com>
Michael Martin <mmartin4242@gmail.com> mmartin <mmartin4242@gmail.com>
Michael Schnaitter <schnaitterm@knights.ucf.edu> schnaitterm <schnaitterm@users.noreply.github.com>
+Muhammad Kasim <firman.kasim@gmail.com> mfkasim91 <firman.kasim@gmail.com>
Nathaniel J. Smith <njs@pobox.com> njsmith <njs@pobox.com>
Naveen Arunachalam <notatroll.troll@gmail.com> naveenarun <notatroll.troll@gmail.com>
Nicolas Scheffer <nicolas.scheffer@sri.com> Nicolas Scheffer <scheffer@speech.sri.com>
@@ -155,6 +189,7 @@ Ralf Gommers <ralf.gommers@gmail.com> Ralf Gommers <ralf.gommers@googlemail.com>
Ralf Gommers <ralf.gommers@gmail.com> rgommers <ralf.gommers@googlemail.com>
Rehas Sachdeva <aquannie@gmail.com> rehassachdeva <aquannie@gmail.com>
Ritta Narita <narittan@gmail.com> RittaNarita <narittan@gmail.com>
+Riya Sharma <navneet.nmk@gmail.com> ayir <navneet.nmk@gmail.com>
Robert Kern <rkern@enthought.com> Robert Kern <robert.kern@gmail.com>
Robert LU <robberphex@gmail.com> RobberPhex <robberphex@gmail.com>
Ronan Lamy <ronan.lamy@gmail.com> Ronan Lamy <Ronan.Lamy@normalesup.org>
@@ -167,23 +202,37 @@ Sanchez Gonzalez Alvaro <as12513@imperial.ac.uk> alvarosg <as12513@imperial.ac.u
Saullo Giovani <saullogiovani@gmail.com> saullogiovani <saullogiovani@gmail.com>
Saurabh Mehta <e.samehta@gmail.com>
Sebastian Berg <sebastian@sipsolutions.net> seberg <sebastian@sipsolutions.net>
+Shekhar Prasad Rajak <shekharrajak@live.com> shekharrajak <shekharrajak@live.com>
Shota Kawabuchi <shota.kawabuchi+GitHub@gmail.com> skwbc <shota.kawabuchi+GitHub@gmail.com>
Siavash Eliasi <siavashserver@gmail.com> siavashserver <siavashserver@gmail.com>
+Søren Rasmussen <soren.rasmussen@alexandra.dk> sorenrasmussenai <47032123+sorenrasmussenai@users.noreply.github.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <sjvdwalt@gmail.com>
Stefan van der Walt <stefanv@berkeley.edu> Stefan van der Walt <stefan@sun.ac.za>
Stephan Hoyer <shoyer@gmail.com> Stephan Hoyer <shoyer@climate.com>
Steven J Kern <kern.steven0@gmail.com>
+SuryaChand P <psschand@gmail.com> Surya P <psschand@gmail.com>
+SuryaChand P <psschand@gmail.com> psschand <psschand@gmail.com>
Thomas A Caswell <tcaswell@gmail.com> Thomas A Caswell <tcaswell@bnl.gov>
Tim Cera <tim@cerazone.net> tim cera <tcera@sjrwmd.com>
+Tim Teichmann <t.teichmann@dashdos.com> tteichmann <t.teichmann@dashdos.com>
+Tim Teichmann <t.teichmann@dashdos.com> tteichmann <44259103+tteichmann@users.noreply.github.com>
Tom Boyd <pezcore@users.noreply.github.com> pezcore <pezcore@users.noreply.github.com>
Tom Poole <t.b.poole@gmail.com> tpoole <t.b.poole@gmail.com>
+Tony LaTorre <tlatorre@uchicago.edu> tlatorre <tlatorre@uchicago.edu>
Travis Oliphant <travis@continuum.io> Travis E. Oliphant <teoliphant@gmail.com>
Travis Oliphant <travis@continuum.io> Travis Oliphant <oliphant@enthought.com>
Valentin Haenel <valentin@haenel.co> Valentin Haenel <valentin.haenel@gmx.de>
+Vrinda Narayan <talk2vrinda@gmail.com> vrindaaa <48102157+vrindaaa@users.noreply.github.com>
Warren Weckesser <warren.weckesser@enthought.com> Warren Weckesser <warren.weckesser@gmail.com>
+Weitang Li <liwt31@163.com> wtli@Dirac <liwt31@163.com>
+Weitang Li <liwt31@163.com> wtli <liwt31@163.com>
Wendell Smith <wendellwsmith@gmail.com> Wendell Smith <wackywendell@gmail.com>
-William Spotz <wfspotz@sandia.gov@localhost> wfspotz@sandia.gov <wfspotz@sandia.gov@localhost>
+Wim Glenn <wim.glenn@melbourneit.com.au> wim glenn <wim.glenn@melbourneit.com.au>
Wojtek Ruszczewski <git@wr.waw.pl> wrwrwr <git@wr.waw.pl>
+Yuji Kanagawa <yuji.kngw.80s.revive@gmail.com> kngwyu <yuji.kngw.80s.revive@gmail.com>
+Yury Kirienko <yury.kirienko@gmail.com> kirienko <yury.kirienko@gmail.com>
Zixu Zhao <zixu.zhao.tireless@gmail.com> ZZhaoTireless <zixu.zhao.tireless@gmail.com>
Ziyan Zhou<ziyan.zhou@mujin.co.jp> Ziyan <ziyan.zhou@mujin.co.jp>
-luzpaz <luzpaz@users.noreply.github.com> luz.paz <luzpaz@users.noreply.github.com>
+luzpaz <kunda@scribus.net> luz.paz <luzpaz@users.noreply.github.com>
+luzpaz <kunda@scribus.net> luzpaz <luzpaz@users.noreply.github.com>
+spacescientist <aspacescientist@protonmail.com> spacescientist <spacescientist@pm.me>
diff --git a/.travis.yml b/.travis.yml
index cf67134d3..68564d35b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -2,8 +2,7 @@
# http://lint.travis-ci.org/
language: python
group: travis_latest
-# Run jobs on container-based infrastructure, can be overridden per job
-sudo: false
+dist: xenial
# Travis whitelists the installable packages, additions can be requested
# https://github.com/travis-ci/apt-package-whitelist
@@ -11,7 +10,6 @@ addons:
apt:
packages: &common_packages
- gfortran
- - libatlas-dev
- libatlas-base-dev
# Speedup builds, particularly when USE_CHROOT=1
- eatmydata
@@ -20,8 +18,17 @@ cache:
directories:
- $HOME/.cache/pip
+stage: Comprehensive tests
+
+stages:
+ # Do the style check and a single test job, don't proceed if it fails
+ - name: Initial tests
+ # Do the rest of the tests
+ - name: Comprehensive tests
+
env:
global:
+ - OpenBLAS_version=0.3.7
- WHEELHOUSE_UPLOADER_USERNAME=travis.numpy
# The following is generated with the command:
# travis encrypt -r numpy/numpy WHEELHOUSE_UPLOADER_SECRET=tH3AP1KeY
@@ -30,28 +37,18 @@ env:
iFWt9Ka92CaqYdU7nqfWp9VImSndPmssjmCXJ1v1IjZPAM\
ahp7Qnm0rWRmA0z9SomuRUQOJQ6s684vU="
-python:
- - 2.7
- - 3.5
- - 3.6
matrix:
include:
+ # Do all python versions without environment variables set
+ - python: 3.5
+ - stage: Initial tests
+ python: 3.6
+ - python: 3.7
+ - python: 3.8-dev
- python: 3.7
- dist: xenial # Required for Python 3.7
- sudo: true # travis-ci/travis-ci#9069
env: INSTALL_PICKLE5=1
- python: 3.6
- env: USE_CHROOT=1 ARCH=i386 DIST=bionic PYTHON=3.6
- sudo: true
- addons:
- apt:
- update: true
- packages:
- - dpkg
- - debootstrap
- - python: 3.5
- dist: xenial # Required for python3.5-dbg
- sudo: true # travis-ci/travis-ci#9069
+ dist: bionic
env: USE_DEBUG=1
addons:
apt:
@@ -63,23 +60,35 @@ matrix:
- python3-setuptools
- python: 3.6
env: USE_WHEEL=1 RUN_FULL_TESTS=1 RUN_COVERAGE=1 INSTALL_PICKLE5=1
- - python: 2.7
- env: USE_WHEEL=1 RUN_FULL_TESTS=1 PYTHON_OPTS="-3 -OO"
- python: 3.6
env: USE_SDIST=1
- python: 3.6
env:
- PYTHONOPTIMIZE=2
+ - BLAS=None
+ - LAPACK=None
+ - ATLAS=None
+ - NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas
+ - NPY_LAPACK_ORDER=MKL,OPENBLAS,ATLAS,ACCELERATE,LAPACK
- USE_ASV=1
- python: 3.5
env: NPY_RELAXED_STRIDES_CHECKING=0
- python: 3.6
env: USE_WHEEL=1 NPY_RELAXED_STRIDES_DEBUG=1
- python: 3.6
+ env: NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0
+ - python: 3.6
env:
- BLAS=None
- LAPACK=None
- ATLAS=None
+ - os: linux-ppc64le
+ python: 3.6
+ env:
+ # for matrix annotation only
+ - PPC64_LE=1
+ # use POWER8 OpenBLAS build, not system ATLAS
+ - ATLAS=None
before_install:
- ./tools/travis-before-install.sh
diff --git a/INSTALL.rst.txt b/INSTALL.rst.txt
index d3ed7197e..bd2f4f92c 100644
--- a/INSTALL.rst.txt
+++ b/INSTALL.rst.txt
@@ -12,10 +12,9 @@ https://scipy.org/install.html.
Prerequisites
=============
-Building NumPy requires the following software installed:
+Building NumPy requires the following installed software:
-1) For Python 2, Python__ 2.7.x or newer.
- For Python 3, Python__ 3.4.x or newer.
+1) For Python 3, Python__ 3.5.x or newer.
On Debian and derivative (Ubuntu): python python-dev
@@ -27,8 +26,9 @@ Building NumPy requires the following software installed:
Python must also be compiled with the zlib module enabled.
-2) Cython >= 0.19 (for development versions of numpy, not for released
- versions)
+2) Cython >= 0.29.2 (for development versions of numpy, not for released
+ versions)
+
3) pytest__ (optional) 1.15 or later
This is required for testing numpy, but not for using it.
@@ -61,8 +61,6 @@ To perform an inplace build that can be run from the source folder run::
python setup.py build_ext --inplace -j 4
-Note that the ``python`` command here is the system default Python, generally
-python 2, the ``python3`` command may be needed to install on python 3.
See `Requirements for Installing Packages <https://packaging.python.org/tutorials/installing-packages/>`_
for more details.
@@ -79,8 +77,13 @@ skipped when running the test suite if no Fortran compiler is available. For
building Scipy a Fortran compiler is needed though, so we include some details
on Fortran compilers in the rest of this section.
-On OS X and Linux, all common compilers will work. Note that for Fortran,
-``gfortran`` is strongly preferred over ``g77``, but if you happen to have both
+On OS X and Linux, all common compilers will work. Note that C99 support is
+required. For compilers that don't support the C99 language standard by
+default (such as ``gcc`` versions < 5.0), it should be enabled. For ``gcc``::
+
+ export CFLAGS='-std=c99'
+
+For Fortran, ``gfortran`` works, ``g77`` does not. In case ``g77`` is
installed then ``g77`` will be detected and used first. To explicitly select
``gfortran`` in that case, do::
@@ -89,7 +92,7 @@ installed then ``g77`` will be detected and used first. To explicitly select
Windows
-------
-On Windows, building from source can be difficult. Currently the most robust
+On Windows, building from source can be difficult. Currently, the most robust
option is to use the Intel compilers, or alternatively MSVC (the same version
as used to build Python itself) with Intel ifort. Intel itself maintains a
good `application note <https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl>`_
@@ -129,7 +132,7 @@ ATLAS) will also work.
Ubuntu/Debian
-------------
-For best performance a development package providing BLAS and CBLAS should be
+For best performance, a development package providing BLAS and CBLAS should be
installed. Some of the options available are:
- ``libblas-dev``: reference BLAS (not very optimized)
diff --git a/LICENSE.txt b/LICENSE.txt
index 4b4dd3388..5eae3201a 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2005-2017, NumPy Developers.
+Copyright (c) 2005-2019, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -28,33 +28,3 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-
-The NumPy repository and source distributions bundle several libraries that are
-compatibly licensed. We list these here.
-
-Name: Numpydoc
-Files: doc/sphinxext/numpydoc/*
-License: 2-clause BSD
- For details, see doc/sphinxext/LICENSE.txt
-
-Name: scipy-sphinx-theme
-Files: doc/scipy-sphinx-theme/*
-License: 3-clause BSD, PSF and Apache 2.0
- For details, see doc/scipy-sphinx-theme/LICENSE.txt
-
-Name: lapack-lite
-Files: numpy/linalg/lapack_lite/*
-License: 3-clause BSD
- For details, see numpy/linalg/lapack_lite/LICENSE.txt
-
-Name: tempita
-Files: tools/npy_tempita/*
-License: BSD derived
- For details, see tools/npy_tempita/license.txt
-
-Name: dragon4
-Files: numpy/core/src/multiarray/dragon4.c
-License: One of a kind
- For license text, see numpy/core/src/multiarray/dragon4.c
diff --git a/LICENSES_bundled.txt b/LICENSES_bundled.txt
new file mode 100644
index 000000000..ea349c7ee
--- /dev/null
+++ b/LICENSES_bundled.txt
@@ -0,0 +1,27 @@
+The NumPy repository and source distributions bundle several libraries that are
+compatibly licensed. We list these here.
+
+Name: Numpydoc
+Files: doc/sphinxext/numpydoc/*
+License: 2-clause BSD
+ For details, see doc/sphinxext/LICENSE.txt
+
+Name: scipy-sphinx-theme
+Files: doc/scipy-sphinx-theme/*
+License: 3-clause BSD, PSF and Apache 2.0
+ For details, see doc/scipy-sphinx-theme/LICENSE.txt
+
+Name: lapack-lite
+Files: numpy/linalg/lapack_lite/*
+License: 3-clause BSD
+ For details, see numpy/linalg/lapack_lite/LICENSE.txt
+
+Name: tempita
+Files: tools/npy_tempita/*
+License: BSD derived
+ For details, see tools/npy_tempita/license.txt
+
+Name: dragon4
+Files: numpy/core/src/multiarray/dragon4.c
+License: MIT
+ For license text, see numpy/core/src/multiarray/dragon4.c
diff --git a/MANIFEST.in b/MANIFEST.in
index e15e0e58a..7ab57eb8c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -5,21 +5,28 @@
# Avoid using MANIFEST.in for that.
#
include MANIFEST.in
+include pyproject.toml
include pytest.ini
include *.txt
+include README.md
include site.cfg.example
-include numpy/random/mtrand/generate_mtrand_c.py
-recursive-include numpy/random/mtrand *.pyx *.pxd
+include runtests.py
+include tox.ini
+include .coveragerc
+include test_requirements.txt
+recursive-include numpy/random *.pyx *.pxd *.pyx.in *.pxd.in
+include numpy/__init__.pxd
# Add build support that should go in sdist, but not go in bdist/be installed
+# Note that sub-directories that don't have __init__ are apparently not
+# included by 'recursive-include', so list those separately
+recursive-include numpy *
recursive-include numpy/_build_utils *
-recursive-include numpy/linalg/lapack_lite *.c *.h
-include runtests.py
-include tox.ini pytest.ini .coveragerc
+recursive-include numpy/linalg/lapack_lite *
recursive-include tools *
# Add sdist files whose use depends on local configuration.
include numpy/core/src/common/cblasfuncs.c
include numpy/core/src/common/python_xerbla.c
-# Adding scons build related files not found by distutils
+# Adding build related files not found by distutils
recursive-include numpy/core/code_generators *.py *.txt
recursive-include numpy/core *.in *.h
# Add documentation and benchmarks: we don't use add_data_dir since we do not
@@ -37,3 +44,6 @@ prune benchmarks/numpy
# Exclude generated files
prune */__pycache__
global-exclude *.pyc *.pyo *.pyd *.swp *.bak *~
+# Exclude license file that we append to the main license when running
+# `python setup.py sdist`
+exclude LICENSES_bundled.txt
diff --git a/README.md b/README.md
index d7f23904b..0599c46f7 100644
--- a/README.md
+++ b/README.md
@@ -2,19 +2,20 @@
[![Travis](https://img.shields.io/travis/numpy/numpy/master.svg?label=Travis%20CI)](
https://travis-ci.org/numpy/numpy)
-[![AppVeyor](https://img.shields.io/appveyor/ci/charris/numpy/master.svg?label=AppVeyor)](
- https://ci.appveyor.com/project/charris/numpy)
[![Azure](https://dev.azure.com/numpy/numpy/_apis/build/status/azure-pipeline%20numpy.numpy)](
- https://dev.azure.com/numpy/numpy/_apis/build/status/azure-pipeline%20numpy.numpy?branchName=master)
+ https://dev.azure.com/numpy/numpy/_build/latest?definitionId=5)
[![codecov](https://codecov.io/gh/numpy/numpy/branch/master/graph/badge.svg)](
https://codecov.io/gh/numpy/numpy)
NumPy is the fundamental package needed for scientific computing with Python.
-- **Website (including documentation):** https://www.numpy.org
+- **Website:** https://www.numpy.org
+- **Documentation:** http://docs.scipy.org/
- **Mailing list:** https://mail.python.org/mailman/listinfo/numpy-discussion
-- **Source:** https://github.com/numpy/numpy
+- **Source code:** https://github.com/numpy/numpy
+- **Contributing:** https://www.numpy.org/devdocs/dev/index.html
- **Bug reports:** https://github.com/numpy/numpy/issues
+- **Report a security vulnerability:** https://tidelift.com/docs/security
It provides:
@@ -32,4 +33,17 @@ Tests can then be run after installation with:
python -c 'import numpy; numpy.test()'
+
+Call for Contributions
+----------------------
+
+NumPy appreciates help from a wide range of different backgrounds.
+Work such as high level documentation or website improvements are valuable
+and we would like to grow our team with people filling these roles.
+Small improvements or fixes are always appreciated and issues labeled as easy
+may be a good starting point.
+If you are considering larger contributions outside the traditional coding work,
+please contact us through the mailing list.
+
+
[![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](https://numfocus.org)
diff --git a/azure-pipelines.yml b/azure-pipelines.yml
index 14a83b70b..633808c0b 100644
--- a/azure-pipelines.yml
+++ b/azure-pipelines.yml
@@ -1,116 +1,182 @@
-jobs:
-- job: macOS
- pool:
- # NOTE: at time of writing, there is a danger
- # that using an invalid vmIMage string for macOS
- # image silently redirects to a Windows build on Azure;
- # for now, use the only image name officially present in
- # the docs even though i.e., numba uses another in their
- # azure config for mac os -- Microsoft has indicated
- # they will patch this issue
- vmIMage: macOS-10.13
- steps:
- # the @0 refers to the (major) version of the *task* on Microsoft's
- # end, not the order in the build matrix nor anything to do
- # with version of Python selected
- - task: UsePythonVersion@0
- inputs:
- versionSpec: '3.6'
- addToPath: true
- architecture: 'x64'
- # NOTE: do we have a compelling reason to use older / newer
- # versions of Xcode toolchain for testing?
- - script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_10.app/Contents/Developer"
- displayName: 'select Xcode version'
- # NOTE: might be better if we could avoid installing
- # two C compilers, but with homebrew looks like we're
- # now stuck getting the full gcc toolchain instead of
- # just pulling in gfortran
- - script: brew install gcc
- displayName: 'make gfortran available on mac os vm'
- - script: python -m pip install --upgrade pip setuptools wheel
- displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest pickle5
- displayName: 'Install dependencies; some are optional to avoid test skips'
- # NOTE: init_dgelsd failed init issue with current ACCELERATE /
- # LAPACK configuration on Azure macos image; at the time of writing
- # this plagues homebrew / macports NumPy builds, but we will
- # circumvent for now by aggressively disabling acceleration for
- # macos NumPy builds / tests; ACCELERATE=None on its own is not
- # sufficient
- # also, might as well prefer usage of clang over gcc proper
- # to match likely scenario on many user mac machines
- - script: python setup.py build -j 4 install
- displayName: 'Build NumPy'
- env:
- BLAS: None
- LAPACK: None
- ATLAS: None
- ACCELERATE: None
- CC: /usr/bin/clang
- - script: python runtests.py --mode=full -- -rsx --junitxml=junit/test-results.xml
- displayName: 'Run Full NumPy Test Suite'
- - task: PublishTestResults@2
- inputs:
- testResultsFiles: '**/test-*.xml'
- testRunTitle: 'Publish test results for Python $(python.version)'
-- job: Windows
- pool:
- vmIMage: 'VS2017-Win2016'
- strategy:
- maxParallel: 6
- matrix:
- Python36-32bit-fast:
- PYTHON_VERSION: '3.6'
- PYTHON_ARCH: 'x86'
- TEST_MODE: fast
- Python37-32bit-fast:
- PYTHON_VERSION: '3.7'
- PYTHON_ARCH: 'x86'
- TEST_MODE: fast
- Python27-64bit-fast:
- PYTHON_VERSION: '2.7'
- PYTHON_ARCH: 'x64'
- TEST_MODE: fast
- Python35-64bit-full:
- PYTHON_VERSION: '3.5'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- Python36-64bit-full:
- PYTHON_VERSION: '3.6'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- INSTALL_PICKLE5: 1
- Python37-64bit-full:
- PYTHON_VERSION: '3.7'
- PYTHON_ARCH: 'x64'
- TEST_MODE: full
- INSTALL_PICKLE5: 1
- steps:
- - task: UsePythonVersion@0
- inputs:
- versionSpec: $(PYTHON_VERSION)
- addToPath: true
- architecture: $(PYTHON_ARCH)
- # as noted by numba project, currently need
- # specific VC install for Python 2.7
- # NOTE: had some issues splitting powershell
- # command into bits and / or using condition
- # directive, so squeezing operation to a single
- # line for now
- - powershell: if ($env:PYTHON_VERSION -eq 2.7) {$wc = New-Object net.webclient; $wc.Downloadfile("https://download.microsoft.com/download/7/9/6/796EF2E4-801B-4FC4-AB28-B59FBF6D907B/VCForPython27.msi", "VCForPython27.msi"); Start-Process "VCForPython27.msi" /qn -Wait}
- displayName: 'Install VC 9.0'
- - script: python -m pip install --upgrade pip setuptools wheel
- displayName: 'Install tools'
- - script: python -m pip install cython nose pytz pytest
- displayName: 'Install dependencies; some are optional to avoid test skips'
- # NOTE: for Windows builds it seems much more tractable to use runtests.py
- # vs. manual setup.py and then runtests.py for testing only
- - script: if [%INSTALL_PICKLE5%]==[1] python -m pip install pickle5
- displayName: 'Install optional pickle5 backport (only for python3.6 and 3.7)'
- - script: python runtests.py --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml
- displayName: 'Build NumPy & Run Full NumPy Test Suite'
- - task: PublishTestResults@2
- inputs:
- testResultsFiles: '**/test-*.xml'
- testRunTitle: 'Publish test results for Python $(python.version)'
+trigger:
+ # start a new build for every push
+ batch: False
+ branches:
+ include:
+ - master
+ - maintenance/*
+variables:
+ # OpenBLAS_version should be updated
+ # to match numpy-wheels repo
+ OpenBLAS_version: 0.3.7
+
+stages:
+- stage: InitialTests
+ jobs:
+ - job: WindowsFast
+ pool:
+ vmImage: 'VS2017-Win2016'
+ strategy:
+ matrix:
+ Python36-64bit-fast:
+ PYTHON_VERSION: '3.6'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: fast
+ BITS: 64
+ steps:
+ - template: azure-steps-windows.yml
+
+- stage: ComprehensiveTests
+ jobs:
+ - job: Linux_Python_36_32bit_full_with_asserts
+ pool:
+ vmImage: 'ubuntu-16.04'
+ steps:
+ - script: |
+ docker pull i386/ubuntu:bionic
+ docker run -v $(pwd):/numpy i386/ubuntu:bionic /bin/bash -c "cd numpy && \
+ apt-get -y update && \
+ apt-get -y install python3.6-dev python3-pip locales python3-certifi && \
+ locale-gen fr_FR && update-locale && \
+ apt-get -y install gfortran-5 wget && \
+ target=\$(python3 tools/openblas_support.py) && \
+ cp -r \$target/usr/local/lib/* /usr/lib && \
+ cp \$target/usr/local/include/* /usr/include && \
+ python3 -m pip install --user --upgrade pip setuptools && \
+ python3 -m pip install --user -r test_requirements.txt && \
+ python3 -m pip install . && \
+ F77=gfortran-5 F90=gfortran-5 \
+ CFLAGS='-UNDEBUG -std=c99' python3 runtests.py -n --debug-info --mode=full -- -rsx --junitxml=junit/test-results.xml && \
+ python3 tools/openblas_support.py --check_version $(OpenBLAS_version)"
+ displayName: 'Run 32-bit Ubuntu Docker Build / Tests'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python 3.6-32 bit full Linux'
+ - job: macOS
+ pool:
+ # NOTE: at time of writing, there is a danger
+ # that using an invalid vmIMage string for macOS
+ # image silently redirects to a Windows build on Azure;
+ # for now, use the only image name officially present in
+ # the docs even though i.e., numba uses another in their
+ # azure config for mac os -- Microsoft has indicated
+ # they will patch this issue
+ vmImage: macOS-10.13
+ steps:
+ # the @0 refers to the (major) version of the *task* on Microsoft's
+ # end, not the order in the build matrix nor anything to do
+ # with version of Python selected
+ - task: UsePythonVersion@0
+ inputs:
+ versionSpec: '3.6'
+ addToPath: true
+ architecture: 'x64'
+ # NOTE: do we have a compelling reason to use older / newer
+ # versions of Xcode toolchain for testing?
+ - script: /bin/bash -c "sudo xcode-select -s /Applications/Xcode_10.app/Contents/Developer"
+ displayName: 'select Xcode version'
+ # NOTE: might be better if we could avoid installing
+ # two C compilers, but with homebrew looks like we're
+ # now stuck getting the full gcc toolchain instead of
+ # just pulling in gfortran
+ - script: |
+ # same version of gfortran as the wheel builds
+ brew install gcc49
+ # manually link critical gfortran libraries
+ ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libgfortran.3.dylib /usr/local/lib/libgfortran.3.dylib
+ ln -s /usr/local/Cellar/gcc@4.9/4.9.4_1/lib/gcc/4.9/libquadmath.0.dylib /usr/local/lib/libquadmath.0.dylib
+ # manually symlink gfortran-4.9 to plain gfortran
+ # for f2py
+ ln -s /usr/local/bin/gfortran-4.9 /usr/local/bin/gfortran
+ displayName: 'make gfortran available on mac os vm'
+ # use the pre-built openblas binary that most closely
+ # matches our MacOS wheel builds -- currently based
+ # primarily on file size / name details
+ - script: |
+ target=$(python tools/openblas_support.py)
+ # manually link to appropriate system paths
+ cp $target/usr/local/lib/* /usr/local/lib/
+ cp $target/usr/local/include/* /usr/local/include/
+ displayName: 'install pre-built openblas'
+ - script: python -m pip install --upgrade pip setuptools wheel
+ displayName: 'Install tools'
+ - script: |
+ python -m pip install -r test_requirements.txt
+ python -m pip install vulture docutils sphinx==2.2.0 numpydoc
+ displayName: 'Install dependencies; some are optional to avoid test skips'
+ - script: /bin/bash -c "! vulture . --min-confidence 100 --exclude doc/,numpy/distutils/ | grep 'unreachable'"
+ displayName: 'Check for unreachable code paths in Python modules'
+ # prefer usage of clang over gcc proper
+ # to match likely scenario on many user mac machines
+ - script: python setup.py build -j 4 build_src --verbose-cfg install
+ displayName: 'Build NumPy'
+ env:
+ BLAS: None
+ LAPACK: None
+ ATLAS: None
+ ACCELERATE: None
+ CC: /usr/bin/clang
+ # wait until after dev build of NumPy to pip
+ # install matplotlib to avoid pip install of older numpy
+ - script: python -m pip install matplotlib
+ displayName: 'Install matplotlib before refguide run'
+ - script: python runtests.py -g --refguide-check
+ displayName: 'Run Refuide Check'
+ - script: python runtests.py -n --mode=full -- -rsx --junitxml=junit/test-results.xml
+ displayName: 'Run Full NumPy Test Suite'
+ - bash: python tools/openblas_support.py --check_version $(OpenBLAS_version)
+ displayName: 'Verify OpenBLAS version'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python 3.6 64-bit full Mac OS'
+ - job: Windows
+ pool:
+ vmImage: 'VS2017-Win2016'
+ strategy:
+ maxParallel: 6
+ matrix:
+ Python36-32bit-fast:
+ PYTHON_VERSION: '3.6'
+ PYTHON_ARCH: 'x86'
+ TEST_MODE: fast
+ BITS: 32
+ Python37-32bit-fast:
+ PYTHON_VERSION: '3.7'
+ PYTHON_ARCH: 'x86'
+ TEST_MODE: fast
+ BITS: 32
+ Python35-64bit-full:
+ PYTHON_VERSION: '3.5'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ Python36-64bit-full:
+ PYTHON_VERSION: '3.6'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ Python37-64bit-full:
+ PYTHON_VERSION: '3.7'
+ PYTHON_ARCH: 'x64'
+ TEST_MODE: full
+ BITS: 64
+ steps:
+ - template: azure-steps-windows.yml
+ - job: Linux_PyPy3
+ pool:
+ vmIMage: 'ubuntu-16.04'
+ steps:
+ - script: source tools/pypy-test.sh
+ displayName: 'Run PyPy3 Build / Tests'
+ - task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ testRunTitle: 'Publish test results for PyPy3'
+ failTaskOnFailedTests: true
diff --git a/azure-steps-windows.yml b/azure-steps-windows.yml
new file mode 100644
index 000000000..26d7a667d
--- /dev/null
+++ b/azure-steps-windows.yml
@@ -0,0 +1,56 @@
+steps:
+- task: UsePythonVersion@0
+ inputs:
+ versionSpec: $(PYTHON_VERSION)
+ addToPath: true
+ architecture: $(PYTHON_ARCH)
+- script: python -m pip install --upgrade pip setuptools wheel
+ displayName: 'Install tools'
+- script: python -m pip install -r test_requirements.txt
+ displayName: 'Install dependencies; some are optional to avoid test skips'
+- powershell: |
+ $pyversion = python -c "from __future__ import print_function; import sys; print(sys.version.split()[0])"
+ Write-Host "Python Version: $pyversion"
+ $target = "C:\\hostedtoolcache\\windows\\Python\\$pyversion\\$(PYTHON_ARCH)\\lib\\openblas.a"
+ Write-Host "target path: $target"
+ $openblas = python tools/openblas_support.py
+ cp $openblas $target
+ displayName: 'Download / Install OpenBLAS'
+
+- powershell: |
+ choco install -y mingw --forcex86 --force --version=5.3.0
+ displayName: 'Install 32-bit mingw for 32-bit builds'
+ condition: eq(variables['BITS'], 32)
+# NOTE: for Windows builds it seems much more tractable to use runtests.py
+# vs. manual setup.py and then runtests.py for testing only
+- powershell: |
+ If ($(BITS) -eq 32) {
+ $env:CFLAGS = "-m32"
+ $env:LDFLAGS = "-m32"
+ $env:PATH = "C:\\tools\\mingw32\\bin;" + $env:PATH
+ refreshenv
+ }
+ python -c "from tools import openblas_support; openblas_support.make_init('numpy')"
+ pip wheel -v -v -v --wheel-dir=dist .
+
+ ls dist -r | Foreach-Object {
+ pip install $_.FullName
+ }
+ displayName: 'Build NumPy'
+- bash: |
+ pushd . && cd .. && target=$(python -c "import numpy, os; print(os.path.abspath(os.path.join(os.path.dirname(numpy.__file__), '.libs')))") && popd
+ pip download -d destination --only-binary --no-deps numpy==1.14
+ cd destination && unzip numpy*.whl && cp numpy/.libs/*.dll $target
+ ls $target
+ displayName: 'Add extraneous & older DLL to numpy/.libs to probe DLL handling robustness'
+ condition: eq(variables['PYTHON_VERSION'], '3.6')
+- script: pushd . && cd .. && python -c "from ctypes import windll; windll.kernel32.SetDefaultDllDirectories(0x00000800); import numpy" && popd
+ displayName: 'For gh-12667; Windows DLL resolution'
+- script: python runtests.py -n --show-build-log --mode=$(TEST_MODE) -- -rsx --junitxml=junit/test-results.xml
+ displayName: 'Run NumPy Test Suite'
+- task: PublishTestResults@2
+ condition: succeededOrFailed()
+ inputs:
+ testResultsFiles: '**/test-*.xml'
+ failTaskOnFailedTests: true
+ testRunTitle: 'Publish test results for Python $(PYTHON_VERSION) $(BITS)-bit $(TEST_MODE) Windows' \ No newline at end of file
diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json
index 653b48a08..45da9533f 100644
--- a/benchmarks/asv.conf.json
+++ b/benchmarks/asv.conf.json
@@ -35,7 +35,7 @@
// The Pythons you'd like to test against. If not provided, defaults
// to the current version of Python used to run `asv`.
- "pythons": ["2.7"],
+ "pythons": ["3.6"],
// The matrix of dependencies to test. Each key is the name of a
// package (in PyPI) and the values are version numbers. An empty
diff --git a/benchmarks/benchmarks/bench_avx.py b/benchmarks/benchmarks/bench_avx.py
new file mode 100644
index 000000000..f7b524e43
--- /dev/null
+++ b/benchmarks/benchmarks/bench_avx.py
@@ -0,0 +1,34 @@
+from __future__ import absolute_import, division, print_function
+
+from .common import Benchmark
+
+import numpy as np
+
+avx_ufuncs = ['sqrt',
+ 'absolute',
+ 'reciprocal',
+ 'square',
+ 'rint',
+ 'floor',
+ 'ceil' ,
+ 'trunc']
+stride = [1, 2, 4]
+dtype = ['f', 'd']
+
+class AVX_UFunc(Benchmark):
+ params = [avx_ufuncs, stride, dtype]
+ param_names = ['avx_based_ufunc', 'stride', 'dtype']
+ timeout = 10
+
+ def setup(self, ufuncname, stride, dtype):
+ np.seterr(all='ignore')
+ try:
+ self.f = getattr(np, ufuncname)
+ except AttributeError:
+ raise NotImplementedError()
+ N = 10000
+ self.arr = np.ones(stride*N, dtype)
+
+ def time_ufunc(self, ufuncname, stride, dtype):
+ self.f(self.arr[::stride])
+
diff --git a/benchmarks/benchmarks/bench_core.py b/benchmarks/benchmarks/bench_core.py
index 26cffcab1..f7ce61b8f 100644
--- a/benchmarks/benchmarks/bench_core.py
+++ b/benchmarks/benchmarks/bench_core.py
@@ -10,6 +10,7 @@ class Core(Benchmark):
self.l100 = range(100)
self.l50 = range(50)
self.l = [np.arange(1000), np.arange(1000)]
+ self.l_view = [memoryview(a) for a in self.l]
self.l10x10 = np.ones((10, 10))
def time_array_1(self):
@@ -27,6 +28,9 @@ class Core(Benchmark):
def time_array_l(self):
np.array(self.l)
+ def time_array_l_view(self):
+ np.array(self.l_view)
+
def time_vstack_l(self):
np.vstack(self.l)
@@ -97,8 +101,8 @@ class Temporaries(Benchmark):
class CorrConv(Benchmark):
- params = [[50, 1000, 1e5],
- [10, 100, 1000, 1e4],
+ params = [[50, 1000, int(1e5)],
+ [10, 100, 1000, int(1e4)],
['valid', 'same', 'full']]
param_names = ['size1', 'size2', 'mode']
@@ -162,12 +166,18 @@ class UnpackBits(Benchmark):
def time_unpackbits(self):
np.unpackbits(self.d)
+ def time_unpackbits_little(self):
+ np.unpackbits(self.d, bitorder="little")
+
def time_unpackbits_axis0(self):
np.unpackbits(self.d2, axis=0)
def time_unpackbits_axis1(self):
np.unpackbits(self.d2, axis=1)
+ def time_unpackbits_axis1_little(self):
+ np.unpackbits(self.d2, bitorder="little", axis=1)
+
class Indices(Benchmark):
def time_indices(self):
diff --git a/benchmarks/benchmarks/bench_function_base.py b/benchmarks/benchmarks/bench_function_base.py
index 9ef03262b..2170c4fc4 100644
--- a/benchmarks/benchmarks/bench_function_base.py
+++ b/benchmarks/benchmarks/bench_function_base.py
@@ -95,36 +95,165 @@ class Select(Benchmark):
np.select(self.cond_large, ([self.d, self.e] * 10))
-class Sort(Benchmark):
- def setup(self):
- self.e = np.arange(10000, dtype=np.float32)
- self.o = np.arange(10001, dtype=np.float32)
- np.random.seed(25)
- np.random.shuffle(self.o)
- # quicksort implementations can have issues with equal elements
- self.equal = np.ones(10000)
- self.many_equal = np.sort(np.arange(10000) % 10)
-
- def time_sort(self):
- np.sort(self.e)
+def memoize(f):
+ _memoized = {}
+ def wrapped(*args):
+ if args not in _memoized:
+ _memoized[args] = f(*args)
+
+ return _memoized[args].copy()
+
+ return f
+
+
+class SortGenerator(object):
+ # The size of the unsorted area in the "random unsorted area"
+ # benchmarks
+ AREA_SIZE = 100
+ # The size of the "partially ordered" sub-arrays
+ BUBBLE_SIZE = 100
+
+ @staticmethod
+ @memoize
+ def random(size, dtype):
+ """
+ Returns a randomly-shuffled array.
+ """
+ arr = np.arange(size, dtype=dtype)
+ np.random.shuffle(arr)
+ return arr
+
+ @staticmethod
+ @memoize
+ def ordered(size, dtype):
+ """
+ Returns an ordered array.
+ """
+ return np.arange(size, dtype=dtype)
+
+ @staticmethod
+ @memoize
+ def reversed(size, dtype):
+ """
+ Returns an array that's in descending order.
+ """
+ return np.arange(size-1, -1, -1, dtype=dtype)
+
+ @staticmethod
+ @memoize
+ def uniform(size, dtype):
+ """
+ Returns an array that has the same value everywhere.
+ """
+ return np.ones(size, dtype=dtype)
+
+ @staticmethod
+ @memoize
+ def swapped_pair(size, dtype, swap_frac):
+ """
+ Returns an ordered array, but one that has ``swap_frac * size``
+ pairs swapped.
+ """
+ a = np.arange(size, dtype=dtype)
+ for _ in range(int(size * swap_frac)):
+ x, y = np.random.randint(0, size, 2)
+ a[x], a[y] = a[y], a[x]
+ return a
+
+ @staticmethod
+ @memoize
+ def sorted_block(size, dtype, block_size):
+ """
+ Returns an array with blocks that are all sorted.
+ """
+ a = np.arange(size, dtype=dtype)
+ b = []
+ if size < block_size:
+ return a
+ block_num = size // block_size
+ for i in range(block_num):
+ b.extend(a[i::block_num])
+ return np.array(b)
+
+ @classmethod
+ @memoize
+ def random_unsorted_area(cls, size, dtype, frac, area_size=None):
+ """
+ This type of array has random unsorted areas such that they
+ compose the fraction ``frac`` of the original array.
+ """
+ if area_size is None:
+ area_size = cls.AREA_SIZE
+
+ area_num = int(size * frac / area_size)
+ a = np.arange(size, dtype=dtype)
+ for _ in range(area_num):
+ start = np.random.randint(size-area_size)
+ end = start + area_size
+ np.random.shuffle(a[start:end])
+ return a
+
+ @classmethod
+ @memoize
+ def random_bubble(cls, size, dtype, bubble_num, bubble_size=None):
+ """
+ This type of array has ``bubble_num`` random unsorted areas.
+ """
+ if bubble_size is None:
+ bubble_size = cls.BUBBLE_SIZE
+ frac = bubble_size * bubble_num / size
+
+ return cls.random_unsorted_area(size, dtype, frac, bubble_size)
- def time_sort_random(self):
- np.sort(self.o)
- def time_sort_inplace(self):
- self.e.sort()
-
- def time_sort_equal(self):
- self.equal.sort()
-
- def time_sort_many_equal(self):
- self.many_equal.sort()
-
- def time_argsort(self):
- self.e.argsort()
-
- def time_argsort_random(self):
- self.o.argsort()
+class Sort(Benchmark):
+ """
+ This benchmark tests sorting performance with several
+ different types of arrays that are likely to appear in
+ real-world applications.
+ """
+ params = [
+ # In NumPy 1.17 and newer, 'merge' can be one of several
+ # stable sorts, it isn't necessarily merge sort.
+ ['quick', 'merge', 'heap'],
+ ['float64', 'int64', 'int16'],
+ [
+ ('random',),
+ ('ordered',),
+ ('reversed',),
+ ('uniform',),
+ ('sorted_block', 10),
+ ('sorted_block', 100),
+ ('sorted_block', 1000),
+ # ('swapped_pair', 0.01),
+ # ('swapped_pair', 0.1),
+ # ('swapped_pair', 0.5),
+ # ('random_unsorted_area', 0.5),
+ # ('random_unsorted_area', 0.1),
+ # ('random_unsorted_area', 0.01),
+ # ('random_bubble', 1),
+ # ('random_bubble', 5),
+ # ('random_bubble', 10),
+ ],
+ ]
+ param_names = ['kind', 'dtype', 'array_type']
+
+ # The size of the benchmarked arrays.
+ ARRAY_SIZE = 10000
+
+ def setup(self, kind, dtype, array_type):
+ np.random.seed(1234)
+ array_class = array_type[0]
+ self.arr = getattr(SortGenerator, array_class)(self.ARRAY_SIZE, dtype, *array_type[1:])
+
+ def time_sort(self, kind, dtype, array_type):
+ # Using np.sort(...) instead of arr.sort(...) because it makes a copy.
+ # This is important because the data is prepared once per benchmark, but
+ # used across multiple runs.
+ np.sort(self.arr, kind=kind)
+
+ def time_argsort(self, kind, dtype, array_type):
+ np.argsort(self.arr, kind=kind)
class SortWorst(Benchmark):
@@ -140,7 +269,7 @@ class SortWorst(Benchmark):
def time_sort_worst(self):
np.sort(self.worst)
- # Retain old benchmark name for backward compatability
+ # Retain old benchmark name for backward compatibility
time_sort_worst.benchmark_name = "bench_function_base.Sort.time_sort_worst"
diff --git a/benchmarks/benchmarks/bench_import.py b/benchmarks/benchmarks/bench_import.py
new file mode 100644
index 000000000..83edecafe
--- /dev/null
+++ b/benchmarks/benchmarks/bench_import.py
@@ -0,0 +1,36 @@
+from __future__ import absolute_import, division, print_function
+
+from subprocess import call
+from sys import executable
+from timeit import default_timer
+
+from .common import Benchmark
+
+
+class Import(Benchmark):
+ timer = default_timer
+
+ def execute(self, command):
+ call((executable, '-c', command))
+
+ def time_numpy(self):
+ self.execute('import numpy')
+
+ def time_numpy_inspect(self):
+ # What are the savings from avoiding to import the inspect module?
+ self.execute('import numpy, inspect')
+
+ def time_fft(self):
+ self.execute('from numpy import fft')
+
+ def time_linalg(self):
+ self.execute('from numpy import linalg')
+
+ def time_ma(self):
+ self.execute('from numpy import ma')
+
+ def time_matlib(self):
+ self.execute('from numpy import matlib')
+
+ def time_random(self):
+ self.execute('from numpy import random')
diff --git a/benchmarks/benchmarks/bench_io.py b/benchmarks/benchmarks/bench_io.py
index 879f9b69e..439cd422f 100644
--- a/benchmarks/benchmarks/bench_io.py
+++ b/benchmarks/benchmarks/bench_io.py
@@ -66,7 +66,8 @@ class Savez(Benchmark):
self.squares = get_squares()
def time_vb_savez_squares(self):
- np.savez('tmp.npz', self.squares)
+ np.savez('tmp.npz', **self.squares)
+
class LoadtxtCSVComments(Benchmark):
# benchmarks for np.loadtxt comment handling
diff --git a/benchmarks/benchmarks/bench_lib.py b/benchmarks/benchmarks/bench_lib.py
index e6c91a27c..f65a96dad 100644
--- a/benchmarks/benchmarks/bench_lib.py
+++ b/benchmarks/benchmarks/bench_lib.py
@@ -9,20 +9,109 @@ import numpy as np
class Pad(Benchmark):
- """Benchmarks for `numpy.pad`."""
+ """Benchmarks for `numpy.pad`.
+
+ When benchmarking the pad function it is useful to cover scenarios where
+ the ratio between the size of the input array and the output array differs
+ significantly (original area vs. padded area). This allows to evaluate for
+ which scenario a padding algorithm is optimized. Furthermore involving
+ large range of array sizes ensures that the effects of CPU-bound caching is
+ visible.
+
+ The table below shows the sizes of the arrays involved in this benchmark:
+
+ +-----------------+----------+-----------+-----------+-----------------+
+ | shape | original | padded: 1 | padded: 8 | padded: (0, 32) |
+ +=================+==========+===========+===========+=================+
+ | (2 ** 22,) | 32 MiB | 32.0 MiB | 32.0 MiB | 32.0 MiB |
+ +-----------------+----------+-----------+-----------+-----------------+
+ | (1024, 1024) | 8 MiB | 8.03 MiB | 8.25 MiB | 8.51 MiB |
+ +-----------------+----------+-----------+-----------+-----------------+
+ | (256, 256, 1) | 256 KiB | 786 KiB | 5.08 MiB | 11.6 MiB |
+ +-----------------+----------+-----------+-----------+-----------------+
+ | (4, 4, 4, 4) | 2 KiB | 10.1 KiB | 1.22 MiB | 12.8 MiB |
+ +-----------------+----------+-----------+-----------+-----------------+
+ | (1, 1, 1, 1, 1) | 8 B | 1.90 MiB | 10.8 MiB | 299 MiB |
+ +-----------------+----------+-----------+-----------+-----------------+
+ """
param_names = ["shape", "pad_width", "mode"]
params = [
- [(1000,), (10, 100), (10, 10, 10)],
- [1, 3, (0, 5)],
+ # Shape of the input arrays
+ [(2 ** 22,), (1024, 1024), (256, 128, 1),
+ (4, 4, 4, 4), (1, 1, 1, 1, 1)],
+ # Tested pad widths
+ [1, 8, (0, 32)],
+ # Tested modes: mean, median, minimum & maximum use the same code path
+ # reflect & symmetric share a lot of their code path
["constant", "edge", "linear_ramp", "mean", "reflect", "wrap"],
]
def setup(self, shape, pad_width, mode):
- # avoid np.zeros or np.empty's lazy allocation.
- # np.full causes pagefaults to occur during setup
- # instead of during the benchmark
- self.array = np.full(shape, 0)
+ # Make sure to fill the array to make the OS page fault
+ # in the setup phase and not the timed phase
+ self.array = np.full(shape, fill_value=1, dtype=np.float64)
def time_pad(self, shape, pad_width, mode):
np.pad(self.array, pad_width, mode)
+
+class Nan(Benchmark):
+ """Benchmarks for nan functions"""
+
+ param_names = ["array_size", "percent_nans"]
+ params = [
+ # sizes of the 1D arrays
+ [200, int(2e5)],
+ # percent of np.nan in arrays
+ [0, 0.1, 2., 50., 90.],
+ ]
+
+ def setup(self, array_size, percent_nans):
+ np.random.seed(123)
+ # produce a randomly shuffled array with the
+ # approximate desired percentage np.nan content
+ base_array = np.random.uniform(size=array_size)
+ base_array[base_array < percent_nans / 100.] = np.nan
+ self.arr = base_array
+
+ def time_nanmin(self, array_size, percent_nans):
+ np.nanmin(self.arr)
+
+ def time_nanmax(self, array_size, percent_nans):
+ np.nanmax(self.arr)
+
+ def time_nanargmin(self, array_size, percent_nans):
+ np.nanargmin(self.arr)
+
+ def time_nanargmax(self, array_size, percent_nans):
+ np.nanargmax(self.arr)
+
+ def time_nansum(self, array_size, percent_nans):
+ np.nansum(self.arr)
+
+ def time_nanprod(self, array_size, percent_nans):
+ np.nanprod(self.arr)
+
+ def time_nancumsum(self, array_size, percent_nans):
+ np.nancumsum(self.arr)
+
+ def time_nancumprod(self, array_size, percent_nans):
+ np.nancumprod(self.arr)
+
+ def time_nanmean(self, array_size, percent_nans):
+ np.nanmean(self.arr)
+
+ def time_nanvar(self, array_size, percent_nans):
+ np.nanvar(self.arr)
+
+ def time_nanstd(self, array_size, percent_nans):
+ np.nanstd(self.arr)
+
+ def time_nanmedian(self, array_size, percent_nans):
+ np.nanmedian(self.arr)
+
+ def time_nanquantile(self, array_size, percent_nans):
+ np.nanquantile(self.arr, q=0.2)
+
+ def time_nanpercentile(self, array_size, percent_nans):
+ np.nanpercentile(self.arr, q=50)
diff --git a/benchmarks/benchmarks/bench_linalg.py b/benchmarks/benchmarks/bench_linalg.py
index a65d510be..5c44162a2 100644
--- a/benchmarks/benchmarks/bench_linalg.py
+++ b/benchmarks/benchmarks/bench_linalg.py
@@ -106,4 +106,4 @@ class Lstsq(Benchmark):
self.b = get_indexes_rand()[:100].astype(np.float64)
def time_numpy_linalg_lstsq_a__b_float64(self):
- np.linalg.lstsq(self.a, self.b)
+ np.linalg.lstsq(self.a, self.b, rcond=-1)
diff --git a/benchmarks/benchmarks/bench_overrides.py b/benchmarks/benchmarks/bench_overrides.py
index 2cb94c95c..58572d07d 100644
--- a/benchmarks/benchmarks/bench_overrides.py
+++ b/benchmarks/benchmarks/bench_overrides.py
@@ -2,7 +2,15 @@ from __future__ import absolute_import, division, print_function
from .common import Benchmark
-from numpy.core.overrides import array_function_dispatch
+try:
+ from numpy.core.overrides import array_function_dispatch
+except ImportError:
+ # Don't fail at import time with old Numpy versions
+ def array_function_dispatch(*args, **kwargs):
+ def wrap(*args, **kwargs):
+ return None
+ return wrap
+
import numpy as np
@@ -16,10 +24,10 @@ def mock_broadcast_to(array, shape, subok=False):
def _concatenate_dispatcher(arrays, axis=None, out=None):
- for array in arrays:
- yield array
if out is not None:
- yield out
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
@array_function_dispatch(_concatenate_dispatcher)
diff --git a/benchmarks/benchmarks/bench_random.py b/benchmarks/benchmarks/bench_random.py
index 9d84d83d3..c52b463e5 100644
--- a/benchmarks/benchmarks/bench_random.py
+++ b/benchmarks/benchmarks/bench_random.py
@@ -4,6 +4,13 @@ from .common import Benchmark
import numpy as np
+from numpy.random import RandomState
+
+try:
+ from numpy.random import Generator
+except ImportError:
+ pass
+
class Random(Benchmark):
params = ['normal', 'uniform', 'weibull 1', 'binomial 10 0.5',
@@ -69,14 +76,113 @@ class Randint_dtype(Benchmark):
class Permutation(Benchmark):
def setup(self):
self.n = 10000
- self.a_1d = np.random.random_sample(self.n)
- self.a_2d = np.random.random_sample((self.n, 2))
-
+ self.a_1d = np.random.random(self.n)
+ self.a_2d = np.random.random((self.n, 2))
+
def time_permutation_1d(self):
np.random.permutation(self.a_1d)
def time_permutation_2d(self):
- np.random.permutation(self.a_2d)
+ np.random.permutation(self.a_2d)
def time_permutation_int(self):
np.random.permutation(self.n)
+
+nom_size = 100000
+
+class RNG(Benchmark):
+ param_names = ['rng']
+ params = ['PCG64', 'MT19937', 'Philox', 'SFC64', 'numpy']
+
+ def setup(self, bitgen):
+ if bitgen == 'numpy':
+ self.rg = np.random.RandomState()
+ else:
+ self.rg = Generator(getattr(np.random, bitgen)())
+ self.rg.random()
+ self.int32info = np.iinfo(np.int32)
+ self.uint32info = np.iinfo(np.uint32)
+ self.uint64info = np.iinfo(np.uint64)
+
+ def time_raw(self, bitgen):
+ if bitgen == 'numpy':
+ self.rg.random_integers(self.int32info.max, size=nom_size)
+ else:
+ self.rg.integers(self.int32info.max, size=nom_size, endpoint=True)
+
+ def time_32bit(self, bitgen):
+ min, max = self.uint32info.min, self.uint32info.max
+ if bitgen == 'numpy':
+ self.rg.randint(min, max + 1, nom_size, dtype=np.uint32)
+ else:
+ self.rg.integers(min, max + 1, nom_size, dtype=np.uint32)
+
+ def time_64bit(self, bitgen):
+ min, max = self.uint64info.min, self.uint64info.max
+ if bitgen == 'numpy':
+ self.rg.randint(min, max + 1, nom_size, dtype=np.uint64)
+ else:
+ self.rg.integers(min, max + 1, nom_size, dtype=np.uint64)
+
+ def time_normal_zig(self, bitgen):
+ self.rg.standard_normal(nom_size)
+
+class Bounded(Benchmark):
+ u8 = np.uint8
+ u16 = np.uint16
+ u32 = np.uint32
+ u64 = np.uint64
+ param_names = ['rng', 'dt_max']
+ params = [['PCG64', 'MT19937', 'Philox', 'SFC64', 'numpy'],
+ [[u8, 95],
+ [u8, 64], # Worst case for legacy
+ [u8, 127], # Best case for legacy
+ [u16, 95],
+ [u16, 1024], # Worst case for legacy
+ [u16, 1535], # Typ. avg. case for legacy
+ [u16, 2047], # Best case for legacy
+ [u32, 1024], # Worst case for legacy
+ [u32, 1535], # Typ. avg. case for legacy
+ [u32, 2047], # Best case for legacy
+ [u64, 95],
+ [u64, 1024], # Worst case for legacy
+ [u64, 1535], # Typ. avg. case for legacy
+ [u64, 2047], # Best case for legacy
+ ]]
+
+ def setup(self, bitgen, args):
+ if bitgen == 'numpy':
+ self.rg = np.random.RandomState()
+ else:
+ self.rg = Generator(getattr(np.random, bitgen)())
+ self.rg.random()
+
+ def time_bounded(self, bitgen, args):
+ """
+ Timer for 8-bit bounded values.
+
+ Parameters (packed as args)
+ ----------
+ dt : {uint8, uint16, uint32, unit64}
+ output dtype
+ max : int
+ Upper bound for range. Lower is always 0. Must be <= 2**bits.
+ """
+ dt, max = args
+ if bitgen == 'numpy':
+ self.rg.randint(0, max + 1, nom_size, dtype=dt)
+ else:
+ self.rg.integers(0, max + 1, nom_size, dtype=dt)
+
+class Choice(Benchmark):
+ params = [1e3, 1e6, 1e8]
+
+ def setup(self, v):
+ self.a = np.arange(v)
+ self.rng = np.random.default_rng()
+
+ def time_legacy_choice(self, v):
+ np.random.choice(self.a, 1000, replace=False)
+
+ def time_choice(self, v):
+ self.rng.choice(self.a, 1000, replace=False)
diff --git a/benchmarks/benchmarks/bench_records.py b/benchmarks/benchmarks/bench_records.py
new file mode 100644
index 000000000..41a6dd775
--- /dev/null
+++ b/benchmarks/benchmarks/bench_records.py
@@ -0,0 +1,43 @@
+from __future__ import absolute_import, division, print_function
+import os
+
+from .common import Benchmark
+
+import numpy as np
+
+
+class Records(Benchmark):
+ def setup(self):
+ self.l50 = np.arange(1000)
+ self.fields_number = 10000
+ self.arrays = [self.l50 for _ in range(self.fields_number)]
+ self.formats = [self.l50.dtype.str for _ in range(self.fields_number)]
+ self.formats_str = ','.join(self.formats)
+ self.dtype_ = np.dtype(
+ [
+ ('field_{}'.format(i), self.l50.dtype.str)
+ for i in range(self.fields_number)
+ ]
+ )
+ self.buffer = self.l50.tostring() * self.fields_number
+
+ def time_fromarrays_w_dtype(self):
+ np.core.records.fromarrays(self.arrays, dtype=self.dtype_)
+
+ def time_fromarrays_wo_dtype(self):
+ np.core.records.fromarrays(self.arrays)
+
+ def time_fromarrays_formats_as_list(self):
+ np.core.records.fromarrays(self.arrays, formats=self.formats)
+
+ def time_fromarrays_formats_as_string(self):
+ np.core.records.fromarrays(self.arrays, formats=self.formats_str)
+
+ def time_fromstring_w_dtype(self):
+ np.core.records.fromstring(self.buffer, dtype=self.dtype_)
+
+ def time_fromstring_formats_as_list(self):
+ np.core.records.fromstring(self.buffer, formats=self.formats)
+
+ def time_fromstring_formats_as_string(self):
+ np.core.records.fromstring(self.buffer, formats=self.formats_str)
diff --git a/benchmarks/benchmarks/bench_reduce.py b/benchmarks/benchmarks/bench_reduce.py
index ffc148cd2..0043d5357 100644
--- a/benchmarks/benchmarks/bench_reduce.py
+++ b/benchmarks/benchmarks/bench_reduce.py
@@ -32,7 +32,7 @@ class AnyAll(Benchmark):
# avoid np.zeros's lazy allocation that would
# cause page faults during benchmark
self.zeros = np.full(100000, 0, bool)
- self.ones = np.full(100000, 0, bool)
+ self.ones = np.full(100000, 1, bool)
def time_all_fast(self):
self.zeros.all()
diff --git a/benchmarks/benchmarks/bench_shape_base.py b/benchmarks/benchmarks/bench_shape_base.py
index e48ea0adb..187b923cd 100644
--- a/benchmarks/benchmarks/bench_shape_base.py
+++ b/benchmarks/benchmarks/bench_shape_base.py
@@ -88,10 +88,18 @@ class Block2D(Benchmark):
class Block3D(Benchmark):
- params = [1, 10, 100]
- param_names = ['size']
-
- def setup(self, n):
+ """This benchmark concatenates an array of size ``(5n)^3``"""
+ # Having copy as a `mode` of the block3D
+ # allows us to directly compare the benchmark of block
+ # to that of a direct memory copy into new buffers with
+ # the ASV framework.
+ # block and copy will be plotted on the same graph
+ # as opposed to being displayed as separate benchmarks
+ params = [[1, 10, 100],
+ ['block', 'copy']]
+ param_names = ['n', 'mode']
+
+ def setup(self, n, mode):
# Slow setup method: hence separated from the others above
self.a000 = np.ones((2 * n, 2 * n, 2 * n), int) * 1
@@ -105,8 +113,7 @@ class Block3D(Benchmark):
self.a111 = np.ones((3 * n, 3 * n, 3 * n), int) * 8
- def time_3d(self, n):
- np.block([
+ self.block = [
[
[self.a000, self.a001],
[self.a010, self.a011],
@@ -115,7 +122,17 @@ class Block3D(Benchmark):
[self.a100, self.a101],
[self.a110, self.a111],
]
- ])
+ ]
+ self.arr_list = [a
+ for two_d in self.block
+ for one_d in two_d
+ for a in one_d]
+
+ def time_3d(self, n, mode):
+ if mode == 'block':
+ np.block(self.block)
+ else: # mode == 'copy'
+ [arr.copy() for arr in self.arr_list]
# Retain old benchmark name for backward compat
time_3d.benchmark_name = "bench_shape_base.Block.time_3d"
diff --git a/benchmarks/benchmarks/bench_ufunc.py b/benchmarks/benchmarks/bench_ufunc.py
index a7e385f70..62e70782d 100644
--- a/benchmarks/benchmarks/bench_ufunc.py
+++ b/benchmarks/benchmarks/bench_ufunc.py
@@ -15,7 +15,7 @@ ufuncs = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'isinf', 'isnan', 'isnat', 'lcm', 'ldexp', 'left_shift', 'less',
'less_equal', 'log', 'log10', 'log1p', 'log2', 'logaddexp',
'logaddexp2', 'logical_and', 'logical_not', 'logical_or',
- 'logical_xor', 'maximum', 'minimum', 'mod', 'modf', 'multiply',
+ 'logical_xor', 'matmul', 'maximum', 'minimum', 'mod', 'modf', 'multiply',
'negative', 'nextafter', 'not_equal', 'positive', 'power',
'rad2deg', 'radians', 'reciprocal', 'remainder', 'right_shift',
'rint', 'sign', 'signbit', 'sin', 'sinh', 'spacing', 'sqrt',
diff --git a/changelog/13829.enhancement.rst b/changelog/13829.enhancement.rst
new file mode 100644
index 000000000..ede1b2a53
--- /dev/null
+++ b/changelog/13829.enhancement.rst
@@ -0,0 +1,6 @@
+Add ``axis`` argument for ``random.permutation`` and ``random.shuffle``
+-----------------------------------------------------------------------
+
+Previously the ``random.permutation`` and ``random.shuffle`` functions
+can only shuffle an array along the first axis; they now have a
+new argument ``axis`` which allows shuffle along a specified axis.
diff --git a/doc/CAPI.rst.txt b/doc/CAPI.rst.txt
deleted file mode 100644
index ccee0fdb6..000000000
--- a/doc/CAPI.rst.txt
+++ /dev/null
@@ -1,320 +0,0 @@
-===============
-C-API for NumPy
-===============
-
-:Author: Travis Oliphant
-:Discussions to: `numpy-discussion@python.org`__
-:Created: October 2005
-
-__ https://scipy.org/scipylib/mailing-lists.html
-
-The C API of NumPy is (mostly) backward compatible with Numeric.
-
-There are a few non-standard Numeric usages (that were not really part
-of the API) that will need to be changed:
-
-* If you used any of the function pointers in the ``PyArray_Descr``
- structure you will have to modify your usage of those. First,
- the pointers are all under the member named ``f``. So ``descr->cast``
- is now ``descr->f->cast``. In addition, the
- casting functions have eliminated the strides argument (use
- ``PyArray_CastTo`` if you need strided casting). All functions have
- one or two ``PyArrayObject *`` arguments at the end. This allows the
- flexible arrays and mis-behaved arrays to be handled.
-
-* The ``descr->zero`` and ``descr->one`` constants have been replaced with
- function calls, ``PyArray_Zero``, and ``PyArray_One`` (be sure to read the
- code and free the resulting memory if you use these calls).
-
-* If you passed ``array->dimensions`` and ``array->strides`` around
- to functions, you will need to fix some code. These are now
- ``npy_intp*`` pointers. On 32-bit systems there won't be a problem.
- However, on 64-bit systems, you will need to make changes to avoid
- errors and segfaults.
-
-
-The header files ``arrayobject.h`` and ``ufuncobject.h`` contain many defines
-that you may find useful. The files ``__ufunc_api.h`` and
-``__multiarray_api.h`` contain the available C-API function calls with
-their function signatures.
-
-All of these headers are installed to
-``<YOUR_PYTHON_LOCATION>/site-packages/numpy/core/include``
-
-
-Getting arrays in C-code
-=========================
-
-All new arrays can be created using ``PyArray_NewFromDescr``. A simple interface
-equivalent to ``PyArray_FromDims`` is ``PyArray_SimpleNew(nd, dims, typenum)``
-and to ``PyArray_FromDimsAndData`` is
-``PyArray_SimpleNewFromData(nd, dims, typenum, data)``.
-
-This is a very flexible function.
-
-::
-
- PyObject * PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr,
- int nd, npy_intp *dims,
- npy_intp *strides, char *data,
- int flags, PyObject *obj);
-
-``subtype`` : ``PyTypeObject *``
- The subtype that should be created (either pass in
- ``&PyArray_Type``, or ``obj->ob_type``,
- where ``obj`` is an instance of a subtype (or subclass) of
- ``PyArray_Type``).
-
-``descr`` : ``PyArray_Descr *``
- The type descriptor for the array. This is a Python object (this
- function steals a reference to it). The easiest way to get one is
- using ``PyArray_DescrFromType(<typenum>)``. If you want to use a
- flexible size array, then you need to use
- ``PyArray_DescrNewFromType(<flexible typenum>)`` and set its ``elsize``
- parameter to the desired size. The typenum in both of these cases
- is one of the ``PyArray_XXXX`` enumerated types.
-
-``nd`` : ``int``
- The number of dimensions (<``MAX_DIMS``)
-
-``*dims`` : ``npy_intp *``
- A pointer to the size in each dimension. Information will be
- copied from here.
-
-``*strides`` : ``npy_intp *``
- The strides this array should have. For new arrays created by this
- routine, this should be ``NULL``. If you pass in memory for this array
- to use, then you can pass in the strides information as well
- (otherwise it will be created for you and default to C-contiguous
- or Fortran contiguous). Any strides will be copied into the array
- structure. Do not pass in bad strides information!!!!
-
- ``PyArray_CheckStrides(...)`` can help but you must call it if you are
- unsure. You cannot pass in strides information when data is ``NULL``
- and this routine is creating its own memory.
-
-``*data`` : ``char *``
- ``NULL`` for creating brand-new memory. If you want this array to wrap
- another memory area, then pass the pointer here. You are
- responsible for deleting the memory in that case, but do not do so
- until the new array object has been deleted. The best way to
- handle that is to get the memory from another Python object,
- ``INCREF`` that Python object after passing it's data pointer to this
- routine, and set the ``->base`` member of the returned array to the
- Python object. *You are responsible for* setting ``PyArray_BASE(ret)``
- to the base object. Failure to do so will create a memory leak.
-
- If you pass in a data buffer, the ``flags`` argument will be the flags
- of the new array. If you create a new array, a non-zero flags
- argument indicates that you want the array to be in Fortran order.
-
-``flags`` : ``int``
- Either the flags showing how to interpret the data buffer passed
- in, or if a new array is created, nonzero to indicate a Fortran
- order array. See below for an explanation of the flags.
-
-``obj`` : ``PyObject *``
- If subtypes is ``&PyArray_Type``, this argument is
- ignored. Otherwise, the ``__array_finalize__`` method of the subtype
- is called (if present) and passed this object. This is usually an
- array of the type to be created (so the ``__array_finalize__`` method
- must handle an array argument. But, it can be anything...)
-
-Note: The returned array object will be uninitialized unless the type is
-``PyArray_OBJECT`` in which case the memory will be set to ``NULL``.
-
-``PyArray_SimpleNew(nd, dims, typenum)`` is a drop-in replacement for
-``PyArray_FromDims`` (except it takes ``npy_intp*`` dims instead of ``int*`` dims
-which matters on 64-bit systems) and it does not initialize the memory
-to zero.
-
-``PyArray_SimpleNew`` is just a macro for ``PyArray_New`` with default arguments.
-Use ``PyArray_FILLWBYTE(arr, 0)`` to fill with zeros.
-
-The ``PyArray_FromDims`` and family of functions are still available and
-are loose wrappers around this function. These functions still take
-``int *`` arguments. This should be fine on 32-bit systems, but on 64-bit
-systems you may run into trouble if you frequently passed
-``PyArray_FromDims`` the dimensions member of the old ``PyArrayObject`` structure
-because ``sizeof(npy_intp) != sizeof(int)``.
-
-
-Getting an arrayobject from an arbitrary Python object
-======================================================
-
-``PyArray_FromAny(...)``
-
-This function replaces ``PyArray_ContiguousFromObject`` and friends (those
-function calls still remain but they are loose wrappers around the
-``PyArray_FromAny`` call).
-
-::
-
- static PyObject *
- PyArray_FromAny(PyObject *op, PyArray_Descr *dtype, int min_depth,
- int max_depth, int requires, PyObject *context)
-
-
-``op`` : ``PyObject *``
- The Python object to "convert" to an array object
-
-``dtype`` : ``PyArray_Descr *``
- The desired data-type descriptor. This can be ``NULL``, if the
- descriptor should be determined by the object. Unless ``FORCECAST`` is
- present in ``flags``, this call will generate an error if the data
- type cannot be safely obtained from the object.
-
-``min_depth`` : ``int``
- The minimum depth of array needed or 0 if doesn't matter
-
-``max_depth`` : ``int``
- The maximum depth of array allowed or 0 if doesn't matter
-
-``requires`` : ``int``
- A flag indicating the "requirements" of the returned array. These
- are the usual ndarray flags (see `NDArray flags`_ below). In
- addition, there are three flags used only for the ``FromAny``
- family of functions:
-
- - ``ENSURECOPY``: always copy the array. Returned arrays always
- have ``CONTIGUOUS``, ``ALIGNED``, and ``WRITEABLE`` set.
- - ``ENSUREARRAY``: ensure the returned array is an ndarray.
- - ``FORCECAST``: cause a cast to occur regardless of whether or
- not it is safe.
-
-``context`` : ``PyObject *``
- If the Python object ``op`` is not a numpy array, but has an
- ``__array__`` method, context is passed as the second argument to
- that method (the first is the typecode). Almost always this
- parameter is ``NULL``.
-
-
-``PyArray_ContiguousFromAny(op, typenum, min_depth, max_depth)`` is
-equivalent to ``PyArray_ContiguousFromObject(...)`` (which is still
-available), except it will return the subclass if op is already a
-subclass of the ndarray. The ``ContiguousFromObject`` version will
-always return an ndarray.
-
-Passing Data Type information to C-code
-=======================================
-
-All datatypes are handled using the ``PyArray_Descr *`` structure.
-This structure can be obtained from a Python object using
-``PyArray_DescrConverter`` and ``PyArray_DescrConverter2``. The former
-returns the default ``PyArray_LONG`` descriptor when the input object
-is None, while the latter returns ``NULL`` when the input object is ``None``.
-
-See the ``arraymethods.c`` and ``multiarraymodule.c`` files for many
-examples of usage.
-
-Getting at the structure of the array.
---------------------------------------
-
-You should use the ``#defines`` provided to access array structure portions:
-
-- ``PyArray_DATA(obj)`` : returns a ``void *`` to the array data
-- ``PyArray_BYTES(obj)`` : return a ``char *`` to the array data
-- ``PyArray_ITEMSIZE(obj)``
-- ``PyArray_NDIM(obj)``
-- ``PyArray_DIMS(obj)``
-- ``PyArray_DIM(obj, n)``
-- ``PyArray_STRIDES(obj)``
-- ``PyArray_STRIDE(obj,n)``
-- ``PyArray_DESCR(obj)``
-- ``PyArray_BASE(obj)``
-
-see more in ``arrayobject.h``
-
-
-NDArray Flags
-=============
-
-The ``flags`` attribute of the ``PyArrayObject`` structure contains important
-information about the memory used by the array (pointed to by the data member)
-This flags information must be kept accurate or strange results and even
-segfaults may result.
-
-There are 6 (binary) flags that describe the memory area used by the
-data buffer. These constants are defined in ``arrayobject.h`` and
-determine the bit-position of the flag. Python exposes a nice attribute-
-based interface as well as a dictionary-like interface for getting
-(and, if appropriate, setting) these flags.
-
-Memory areas of all kinds can be pointed to by an ndarray, necessitating
-these flags. If you get an arbitrary ``PyArrayObject`` in C-code,
-you need to be aware of the flags that are set.
-If you need to guarantee a certain kind of array
-(like ``NPY_CONTIGUOUS`` and ``NPY_BEHAVED``), then pass these requirements into the
-PyArray_FromAny function.
-
-
-``NPY_CONTIGUOUS``
- True if the array is (C-style) contiguous in memory.
-``NPY_FORTRAN``
- True if the array is (Fortran-style) contiguous in memory.
-
-Notice that contiguous 1-d arrays are always both ``NPY_FORTRAN`` contiguous
-and C contiguous. Both of these flags can be checked and are convenience
-flags only as whether or not an array is ``NPY_CONTIGUOUS`` or ``NPY_FORTRAN``
-can be determined by the ``strides``, ``dimensions``, and ``itemsize``
-attributes.
-
-``NPY_OWNDATA``
- True if the array owns the memory (it will try and free it using
- ``PyDataMem_FREE()`` on deallocation --- so it better really own it).
-
-These three flags facilitate using a data pointer that is a memory-mapped
-array, or part of some larger record array. But, they may have other uses...
-
-``NPY_ALIGNED``
- True if the data buffer is aligned for the type and the strides
- are multiples of the alignment factor as well. This can be
- checked.
-
-``NPY_WRITEABLE``
- True only if the data buffer can be "written" to.
-
-``NPY_WRITEBACKIFCOPY``
- This is a special flag that is set if this array represents a copy
- made because a user required certain flags in ``PyArray_FromAny`` and
- a copy had to be made of some other array (and the user asked for
- this flag to be set in such a situation). The base attribute then
- points to the "misbehaved" array (which is set read_only). If you use
- this flag, you are must call ``PyArray_ResolveWritebackIfCopy`` before
- deallocating this array (i.e. before calling ``Py_DECREF`` the last time)
- which will write the data contents back to the "misbehaved" array (casting
- if necessary) and will reset the "misbehaved" array to ``WRITEABLE``. If
- the "misbehaved" array was not ``WRITEABLE`` to begin with then
- ``PyArray_FromAny`` would have returned an error because ``WRITEBACKIFCOPY``
- would not have been possible. In error conditions, call
- ``PyArray_DiscardWritebackIfCopy`` to throw away the scratch buffer, then
- ``Py_DECREF`` or ``Py_XDECREF``.
-
-``NPY_UPDATEIFCOPY``
- Similar to ``NPY_WRITEBACKIFCOPY``, but deprecated since it copied the
- contents back when the array is deallocated, which is not explicit and
- relies on refcount semantics. Refcount semantics are unreliable on
- alternative implementations of python such as PyPy.
-
-``PyArray_UpdateFlags(obj, flags)`` will update the ``obj->flags`` for
-``flags`` which can be any of ``NPY_CONTIGUOUS``, ``NPY_FORTRAN``, ``NPY_ALIGNED``, or
-``NPY_WRITEABLE``.
-
-Some useful combinations of these flags:
-
-- ``NPY_BEHAVED = NPY_ALIGNED | NPY_WRITEABLE``
-- ``NPY_CARRAY = NPY_DEFAULT = NPY_CONTIGUOUS | NPY_BEHAVED``
-- ``NPY_CARRAY_RO = NPY_CONTIGUOUS | NPY_ALIGNED``
-- ``NPY_FARRAY = NPY_FORTRAN | NPY_BEHAVED``
-- ``NPY_FARRAY_RO = NPY_FORTRAN | NPY_ALIGNED``
-
-The macro ``PyArray_CHECKFLAGS(obj, flags)`` can test any combination of flags.
-There are several default combinations defined as macros already
-(see ``arrayobject.h``)
-
-In particular, there are ``ISBEHAVED``, ``ISBEHAVED_RO``, ``ISCARRAY``
-and ``ISFARRAY`` macros that also check to make sure the array is in
-native byte order (as determined) by the data-type descriptor.
-
-There are more C-API enhancements which you can discover in the code,
-or buy the book (http://www.trelgol.com)
diff --git a/doc/C_STYLE_GUIDE.rst.txt b/doc/C_STYLE_GUIDE.rst.txt
index a5726f16f..07f4b99df 100644
--- a/doc/C_STYLE_GUIDE.rst.txt
+++ b/doc/C_STYLE_GUIDE.rst.txt
@@ -10,9 +10,6 @@ to achieve uniformity. Because the NumPy conventions are very close to
those in PEP-0007, that PEP is used as a template below with the NumPy
additions and variations in the appropriate spots.
-NumPy modified PEP-0007
-=======================
-
Introduction
------------
@@ -31,10 +28,7 @@ Two good reasons to break a particular rule:
C dialect
---------
-* Use ANSI/ISO standard C (the 1989 version of the standard).
- This means, amongst many other things, that all declarations
- must be at the top of a block (not necessarily at the top of
- function).
+* Use C99 (that is, the standard defined by ISO/IEC 9899:1999).
* Don't use GCC extensions (e.g. don't write multi-line strings
without trailing backslashes). Preferably break long strings
@@ -49,9 +43,6 @@ C dialect
* All function declarations and definitions must use full
prototypes (i.e. specify the types of all arguments).
-* Do not use C++ style // one line comments, they aren't portable.
- Note: this will change with the proposed transition to C++.
-
* No compiler warnings with major compilers (gcc, VC++, a few others).
Note: NumPy still produces compiler warnings that need to be addressed.
@@ -138,7 +129,7 @@ Code lay-out
the open paren, no spaces inside the parens, no spaces before
commas, one space after each comma.
-* Always put spaces around assignment, Boolean and comparison
+* Always put spaces around the assignment, Boolean and comparison
operators. In expressions using a lot of operators, add spaces
around the outermost (lowest priority) operators.
@@ -179,12 +170,12 @@ Code lay-out
Trailing comments should be used sparingly. Instead of ::
- if (yes) {/* Success! */
+ if (yes) { // Success!
do ::
if (yes) {
- /* Success! */
+ // Success!
* All functions and global variables should be declared static
when they aren't needed outside the current compilation unit.
@@ -201,7 +192,7 @@ Naming conventions
In the future the names should be of the form ``Npy*_PublicFunction``,
where the star is something appropriate.
-* Public Macros should have a NPY_ prefix and then use upper case,
+* Public Macros should have a ``NPY_`` prefix and then use upper case,
for example, ``NPY_DOUBLE``.
* Private functions should be lower case with underscores, for example:
diff --git a/doc/DISTUTILS.rst.txt b/doc/DISTUTILS.rst.txt
index c027afff2..bcef82500 100644
--- a/doc/DISTUTILS.rst.txt
+++ b/doc/DISTUTILS.rst.txt
@@ -243,7 +243,7 @@ in writing setup scripts:
after processing all source generators, no extension module will
be built. This is the recommended way to conditionally define
extension modules. Source generator functions are called by the
- ``build_src`` command of ``numpy.distutils``.
+ ``build_src`` sub-command of ``numpy.distutils``.
For example, here is a typical source generator function::
@@ -297,11 +297,182 @@ in writing setup scripts:
+ ``config.get_info(*names)`` ---
-Template files
---------------
-XXX: Describe how files with extensions ``.f.src``, ``.pyf.src``,
-``.c.src``, etc. are pre-processed by the ``build_src`` command.
+.. _templating:
+
+Conversion of ``.src`` files using Templates
+--------------------------------------------
+
+NumPy distutils supports automatic conversion of source files named
+<somefile>.src. This facility can be used to maintain very similar
+code blocks requiring only simple changes between blocks. During the
+build phase of setup, if a template file named <somefile>.src is
+encountered, a new file named <somefile> is constructed from the
+template and placed in the build directory to be used instead. Two
+forms of template conversion are supported. The first form occurs for
+files named <file>.ext.src where ext is a recognized Fortran
+extension (f, f90, f95, f77, for, ftn, pyf). The second form is used
+for all other cases.
+
+.. index::
+ single: code generation
+
+Fortran files
+-------------
+
+This template converter will replicate all **function** and
+**subroutine** blocks in the file with names that contain '<...>'
+according to the rules in '<...>'. The number of comma-separated words
+in '<...>' determines the number of times the block is repeated. What
+these words are indicates what that repeat rule, '<...>', should be
+replaced with in each block. All of the repeat rules in a block must
+contain the same number of comma-separated words indicating the number
+of times that block should be repeated. If the word in the repeat rule
+needs a comma, leftarrow, or rightarrow, then prepend it with a
+backslash ' \'. If a word in the repeat rule matches ' \\<index>' then
+it will be replaced with the <index>-th word in the same repeat
+specification. There are two forms for the repeat rule: named and
+short.
+
+Named repeat rule
+^^^^^^^^^^^^^^^^^
+
+A named repeat rule is useful when the same set of repeats must be
+used several times in a block. It is specified using <rule1=item1,
+item2, item3,..., itemN>, where N is the number of times the block
+should be repeated. On each repeat of the block, the entire
+expression, '<...>' will be replaced first with item1, and then with
+item2, and so forth until N repeats are accomplished. Once a named
+repeat specification has been introduced, the same repeat rule may be
+used **in the current block** by referring only to the name
+(i.e. <rule1>.
+
+
+Short repeat rule
+^^^^^^^^^^^^^^^^^
+
+A short repeat rule looks like <item1, item2, item3, ..., itemN>. The
+rule specifies that the entire expression, '<...>' should be replaced
+first with item1, and then with item2, and so forth until N repeats
+are accomplished.
+
+
+Pre-defined names
+^^^^^^^^^^^^^^^^^
+
+The following predefined named repeat rules are available:
+
+- <prefix=s,d,c,z>
+
+- <_c=s,d,c,z>
+
+- <_t=real, double precision, complex, double complex>
+
+- <ftype=real, double precision, complex, double complex>
+
+- <ctype=float, double, complex_float, complex_double>
+
+- <ftypereal=float, double precision, \\0, \\1>
+
+- <ctypereal=float, double, \\0, \\1>
+
+
+Other files
+------------
+
+Non-Fortran files use a separate syntax for defining template blocks
+that should be repeated using a variable expansion similar to the
+named repeat rules of the Fortran-specific repeats.
+
+NumPy Distutils preprocesses C source files (extension: :file:`.c.src`) written
+in a custom templating language to generate C code. The :c:data:`@` symbol is
+used to wrap macro-style variables to empower a string substitution mechanism
+that might describe (for instance) a set of data types.
+
+The template language blocks are delimited by :c:data:`/**begin repeat`
+and :c:data:`/**end repeat**/` lines, which may also be nested using
+consecutively numbered delimiting lines such as :c:data:`/**begin repeat1`
+and :c:data:`/**end repeat1**/`:
+
+1. "/\**begin repeat "on a line by itself marks the beginning of
+a segment that should be repeated.
+
+2. Named variable expansions are defined using ``#name=item1, item2, item3,
+..., itemN#`` and placed on successive lines. These variables are
+replaced in each repeat block with corresponding word. All named
+variables in the same repeat block must define the same number of
+words.
+
+3. In specifying the repeat rule for a named variable, ``item*N`` is short-
+hand for ``item, item, ..., item`` repeated N times. In addition,
+parenthesis in combination with \*N can be used for grouping several
+items that should be repeated. Thus, #name=(item1, item2)*4# is
+equivalent to #name=item1, item2, item1, item2, item1, item2, item1,
+item2#
+
+4. "\*/ "on a line by itself marks the end of the variable expansion
+naming. The next line is the first line that will be repeated using
+the named rules.
+
+5. Inside the block to be repeated, the variables that should be expanded
+are specified as ``@name@``
+
+6. "/\**end repeat**/ "on a line by itself marks the previous line
+as the last line of the block to be repeated.
+
+7. A loop in the NumPy C source code may have a ``@TYPE@`` variable, targeted
+for string substitution, which is preprocessed to a number of otherwise
+identical loops with several strings such as INT, LONG, UINT, ULONG. The
+``@TYPE@`` style syntax thus reduces code duplication and maintenance burden by
+mimicking languages that have generic type support.
+
+The above rules may be clearer in the following template source example:
+
+.. code-block:: NumPyC
+ :linenos:
+ :emphasize-lines: 3, 13, 29, 31
+
+ /* TIMEDELTA to non-float types */
+
+ /**begin repeat
+ *
+ * #TOTYPE = BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, DATETIME,
+ * TIMEDELTA#
+ * #totype = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_datetime, npy_timedelta#
+ */
+
+ /**begin repeat1
+ *
+ * #FROMTYPE = TIMEDELTA#
+ * #fromtype = npy_timedelta#
+ */
+ static void
+ @FROMTYPE@_to_@TOTYPE@(void *input, void *output, npy_intp n,
+ void *NPY_UNUSED(aip), void *NPY_UNUSED(aop))
+ {
+ const @fromtype@ *ip = input;
+ @totype@ *op = output;
+
+ while (n--) {
+ *op++ = (@totype@)*ip++;
+ }
+ }
+ /**end repeat1**/
+
+ /**end repeat**/
+
+The preprocessing of generically typed C source files (whether in NumPy
+proper or in any third party package using NumPy Distutils) is performed
+by `conv_template.py`_.
+The type specific C files generated (extension: .c)
+by these modules during the build process are ready to be compiled. This
+form of generic typing is also supported for C header files (preprocessed
+to produce .h files).
+
+.. _conv_template.py: https://github.com/numpy/numpy/blob/master/numpy/distutils/conv_template.py
Useful functions in ``numpy.distutils.misc_util``
-------------------------------------------------
@@ -427,7 +598,7 @@ Extra features in NumPy Distutils
'''''''''''''''''''''''''''''''''
Specifying config_fc options for libraries in setup.py script
-------------------------------------------------------------
+-------------------------------------------------------------
It is possible to specify config_fc options in setup.py scripts.
For example, using
diff --git a/doc/HOWTO_RELEASE.rst.txt b/doc/HOWTO_RELEASE.rst.txt
index a6a8fe8ab..4b485c8b9 100644
--- a/doc/HOWTO_RELEASE.rst.txt
+++ b/doc/HOWTO_RELEASE.rst.txt
@@ -5,13 +5,13 @@ Current build and release info
==============================
The current info on building and releasing NumPy and SciPy is scattered in
-several places. It should be summarized in one place, updated and where
+several places. It should be summarized in one place, updated, and where
necessary described in more detail. The sections below list all places where
useful info can be found.
Source tree
-----------
-* INSTALL.txt
+* INSTALL.rst.txt
* release.sh
* pavement.py
@@ -37,8 +37,8 @@ Supported platforms and versions
================================
Python 2.7 and >=3.4 are the currently supported versions when building from
-source. We test numpy against all these versions every time we merge code to
-trunk. Binary installers may be available for a subset of these versions (see
+source. We test NumPy against all these versions every time we merge code to
+master. Binary installers may be available for a subset of these versions (see
below).
OS X
@@ -54,7 +54,7 @@ Windows
-------
We build 32- and 64-bit wheels for Python 2.7, 3.4, 3.5 on Windows. Windows
-XP, Vista, 7, 8 and 10 are supported. We build numpy using the MSVC compilers
+XP, Vista, 7, 8 and 10 are supported. We build NumPy using the MSVC compilers
on Appveyor, but we are hoping to update to a `mingw-w64 toolchain
<https://mingwpy.github.io>`_. The Windows wheels use ATLAS for BLAS / LAPACK.
@@ -62,7 +62,7 @@ Linux
-----
We build and ship `manylinux1 <https://www.python.org/dev/peps/pep-0513>`_
-wheels for numpy. Many Linux distributions include their own binary builds
+wheels for NumPy. Many Linux distributions include their own binary builds
of NumPy.
BSD / Solaris
@@ -93,7 +93,7 @@ each platform. At the moment this means:
* Manylinux1 wheels use the gcc provided on the Manylinux docker images.
You will need Cython for building the binaries. Cython compiles the ``.pyx``
-files in the numpy distribution to ``.c`` files.
+files in the NumPy distribution to ``.c`` files.
Building source archives and wheels
-----------------------------------
@@ -130,9 +130,9 @@ Uploading to PyPI
Generating author/pr lists
--------------------------
-You will need an personal access token
+You will need a personal access token
`<https://help.github.com/articles/creating-a-personal-access-token-for-the-command-line/>`_
-so that scripts can access the github numpy repository
+so that scripts can access the github NumPy repository.
* gitpython (pip)
* pygithub (pip)
@@ -182,8 +182,8 @@ After a date is set, create a new maintenance/x.y.z branch, add new empty
release notes for the next version in the master branch and update the Trac
Milestones.
-Make sure current trunk builds a package correctly
---------------------------------------------------
+Make sure current branch builds a package correctly
+---------------------------------------------------
::
git clean -fxd
@@ -191,23 +191,12 @@ Make sure current trunk builds a package correctly
python setup.py sdist
To actually build the binaries after everything is set up correctly, the
-release.sh script can be used. For details of the build process itself it is
+release.sh script can be used. For details of the build process itself, it is
best to read the pavement.py script.
.. note:: The following steps are repeated for the beta(s), release
candidates(s) and the final release.
-Check that docs can be built
-----------------------------
-Do::
-
- cd doc/
- make dist
-
-to check that the documentation is in a buildable state. See
-doc/HOWTO_BUILD_DOCS.rst.txt for more details and for how to update
-https://docs.scipy.org.
-
Check deprecations
------------------
Before the release branch is made, it should be checked that all deprecated
@@ -233,7 +222,7 @@ There are three steps to the process.
2. If the C_API_VERSION in the first step has changed, or if the hash of
the API has changed, the cversions.txt file needs to be updated. To check
- the hash, run the script numpy/core/cversions.py and note the api hash that
+ the hash, run the script numpy/core/cversions.py and note the API hash that
is printed. If that hash does not match the last hash in
numpy/core/code_generators/cversions.txt the hash has changed. Using both
the appropriate C_API_VERSION and hash, add a new entry to cversions.txt.
@@ -244,7 +233,7 @@ There are three steps to the process.
definitive.
If steps 1 and 2 are done correctly, compiling the release should not give
- a warning "API mismatch detect at the beginning of the build.
+ a warning "API mismatch detect at the beginning of the build".
3. The numpy/core/include/numpy/numpyconfig.h will need a new
NPY_X_Y_API_VERSION macro, where X and Y are the major and minor version
@@ -257,12 +246,19 @@ updated for a major release.
Check the release notes
-----------------------
-Check that the release notes are up-to-date.
+Use `towncrier`_ to build the release note, copy it to the proper name, and
+commit the changes. This will remove all the fragments from ``changelog/*.rst``
+and add ``doc/release/latest-note.rst`` which must be renamed with the proper
+version number::
+
+ python -mtowncrier --version "Numpy 1.11.0"
+ git mv doc/release/latest-note.rst doc/release/1.11.0-notes.rst
+ git commit -m"Create release note"
-Write or update the release notes in a file named for the release, such as
-``doc/release/1.11.0-notes.rst``.
+Check that the release notes are up-to-date.
-Mention at least the following:
+Update the release notes with a Highlights section. Mention some of the
+following:
- major new features
- deprecated and removed features
@@ -270,8 +266,7 @@ Mention at least the following:
- for SciPy, supported NumPy version(s)
- outlook for the near future
-Also make sure that as soon as the branch is made, there is a new release
-notes file in trunk for the next release.
+.. _towncrier: https://github.com/hawkowl/towncrier
Update the release status and create a release "tag"
----------------------------------------------------
@@ -318,10 +313,10 @@ The ``-s`` flag makes a PGP (usually GPG) signed tag. Please do sign the
release tags.
The release tag should have the release number in the annotation (tag
-message). Unfortunately the name of a tag can be changed without breaking the
+message). Unfortunately, the name of a tag can be changed without breaking the
signature, the contents of the message cannot.
-See : https://github.com/scipy/scipy/issues/4919 for a discussion of signing
+See: https://github.com/scipy/scipy/issues/4919 for a discussion of signing
release tags, and https://keyring.debian.org/creating-key.html for instructions
on creating a GPG key if you do not have one.
@@ -383,14 +378,24 @@ Build the changelog and notes for upload with::
paver write_release_and_log
-The tar-files and binary releases for distribution should be uploaded to SourceForge,
-together with the Release Notes and the Changelog. Uploading can be done
-through a web interface or, more efficiently, through scp/sftp/rsync as
-described in the SourceForge
-`upload guide <https://sourceforge.net/apps/trac/sourceforge/wiki/Release%20files%20for%20download>`_ (dead link).
-For example::
+Build and archive documentation
+-------------------------------
+Do::
+
+ cd doc/
+ make dist
+
+to check that the documentation is in a buildable state. Then, after tagging,
+create an archive of the documentation in the numpy/doc repo::
- scp <filename> <username>,numpy@frs.sourceforge.net:/home/frs/project/n/nu/numpy/NumPy/<releasedir>/
+ # This checks out github.com/numpy/doc and adds (``git add``) the
+ # documentation to the checked out repo.
+ make merge-doc
+ # Now edit the ``index.html`` file in the repo to reflect the new content,
+ # and commit the changes
+ git -C dist/merge commit -a "Add documentation for <version>"
+ # Push to numpy/doc repo
+ git -C push
Update PyPI
-----------
@@ -443,28 +448,6 @@ you released you can push the tag and release commit up to github::
where ``upstream`` points to the main https://github.com/numpy/numpy.git
repository.
-Update docs.scipy.org
----------------------
-
-All documentation for a release can be updated on https://docs.scipy.org/ with:
-
- make dist
- make upload USERNAME=<yourname> RELEASE=1.11.0
-
-Note that ``<username>`` must have SSH credentials on the server. If you don't
-have those, ask someone who does (the list currently includes @rgommers,
-@juliantaylor and @pv).
-
-Also rebuild and upload ``docs.scipy.org`` front page, if the release
-series is a new one. The front page sources have their own repo:
-https://github.com/scipy/docs.scipy.org. Do the following:
-
-- Update ``index.rst`` for the new version.
-- ``make dist``
-- Check that the built documentation is OK.
-- ``touch output-is-fine``
-- ``make upload USERNAME=<username> RELEASE=1.x.y``
-
Update scipy.org
----------------
@@ -479,9 +462,9 @@ Announce to the lists
The release should be announced on the mailing lists of
NumPy and SciPy, to python-announce, and possibly also those of
-Matplotlib,IPython and/or Pygame.
+Matplotlib, IPython and/or Pygame.
-During the beta/RC phase an explicit request for testing the binaries with
+During the beta/RC phase, an explicit request for testing the binaries with
several other libraries (SciPy/Matplotlib/Pygame) should be posted on the
mailing list.
@@ -497,5 +480,5 @@ After the final release is announced, a few administrative tasks are left to be
done:
- Forward port changes in the release branch to release notes and release
- scripts, if any, to trunk.
+ scripts, if any, to master branch.
- Update the Milestones in Trac.
diff --git a/doc/Makefile b/doc/Makefile
index 667dbef29..3c32cb811 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -1,23 +1,35 @@
# Makefile for Sphinx documentation
#
-PYVER = 3.6
+# PYVER needs to be major.minor, just "3" doesn't work - it will result in
+# issues with the amendments to PYTHONPATH and install paths (see DIST_VARS).
+
+# Use explicit "version_info" indexing since make cannot handle colon characters, and
+# evaluate it now to allow easier debugging when printing the variable
+
+PYVER:=$(shell python3 -c 'from sys import version_info as v; print("{0}.{1}".format(v[0], v[1]))')
PYTHON = python$(PYVER)
# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = LANG=C sphinx-build
-PAPER =
+SPHINXOPTS ?=
+SPHINXBUILD ?= LANG=C sphinx-build
+PAPER ?=
+# For merging a documentation archive into a git checkout of numpy/doc
+# Turn a tag like v1.18.0 into 1.18
+# Use sed -n -e 's/patttern/match/p' to return a blank value if no match
+TAG ?= $(shell git describe --tag | sed -n -e's,v\([1-9]\.[0-9]*\)\.[0-9].*,\1,p')
FILES=
# Internal variables.
PAPEROPT_a4 = -D latex_paper_size=a4
PAPEROPT_letter = -D latex_paper_size=letter
-ALLSPHINXOPTS = -d build/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source
+ALLSPHINXOPTS = -WT --keep-going -d build/doctrees $(PAPEROPT_$(PAPER)) \
+ $(SPHINXOPTS) source
.PHONY: help clean html web pickle htmlhelp latex changes linkcheck \
- dist dist-build gitwash-update
+ dist dist-build gitwash-update version-check html-build latex-build \
+ merge-doc
#------------------------------------------------------------------------------
@@ -33,9 +45,11 @@ help:
@echo " dist PYVER=... to make a distribution-ready tree"
@echo " gitwash-update GITWASH=path/to/gitwash update gitwash developer docs"
@echo " upload USERNAME=... RELEASE=... to upload built docs to docs.scipy.org"
+ @echo " merge-doc TAG=... to clone numpy/doc and archive documentation into it"
clean:
- -rm -rf build/* source/reference/generated
+ -rm -rf build/*
+ find . -name generated -type d -prune -exec rm -rf "{}" ";"
gitwash-update:
rm -rf source/dev/gitwash
@@ -58,19 +72,40 @@ gitwash-update:
#
-INSTALL_DIR = $(CURDIR)/build/inst-dist/
+INSTALL_DIR = $(CURDIR)/build/inst-dist
INSTALL_PPH = $(INSTALL_DIR)/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/site-packages:$(INSTALL_DIR)/lib/python$(PYVER)/dist-packages:$(INSTALL_DIR)/local/lib/python$(PYVER)/dist-packages
UPLOAD_DIR=/srv/docs_scipy_org/doc/numpy-$(RELEASE)
-DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)" SPHINXOPTS="$(SPHINXOPTS)"
+DIST_VARS=SPHINXBUILD="LANG=C PYTHONPATH=$(INSTALL_PPH) python$(PYVER) `which sphinx-build`" PYTHON="PYTHONPATH=$(INSTALL_PPH) python$(PYVER)"
+
+NUMPYVER:=$(shell $(PYTHON) -c "import numpy; print(numpy.version.git_revision[:10])" 2>/dev/null)
+GITVER ?= $(shell cd ..; $(PYTHON) -c "from setup import git_version; \
+ print(git_version()[:10])")
+
+version-check:
+ifeq "$(GITVER)" "Unknown"
+ # @echo sdist build with unlabeled sources
+else ifeq ("", "$(NUMPYVER)")
+ @echo numpy not found, cannot build documentation without successful \"import numpy\"
+ @exit 1
+else ifneq ($(NUMPYVER),$(GITVER))
+ @echo installed numpy $(NUMPYVER) != current repo git version \'$(GITVER)\'
+ @echo use '"make dist"' or '"GITVER=$(NUMPYVER) make $(MAKECMDGOALS) ..."'
+ @exit 1
+else
+ # for testing
+ # @echo installed numpy $(NUMPYVER) matches git version $(GITVER); exit 1
+endif
-dist:
+
+dist: build/dist.tar.gz
+
+build/dist.tar.gz:
make $(DIST_VARS) real-dist
-real-dist: dist-build html html-scipyorg
- test -d build/latex || make latex
+real-dist: dist-build html-build html-scipyorg
+ test -d build/latex || make latex-build
make -C build/latex all-pdf
- -test -d build/htmlhelp || make htmlhelp-build
-rm -rf build/dist
cp -r build/html-scipyorg build/dist
cd build/html && zip -9r ../dist/numpy-html.zip .
@@ -86,7 +121,7 @@ dist-build:
install -d $(subst :, ,$(INSTALL_PPH))
$(PYTHON) `which easy_install` --prefix=$(INSTALL_DIR) ../dist/*.egg
-upload:
+upload: build/dist.tar.gz
# SSH must be correctly configured for this to work.
# Assumes that ``make dist`` was already run
# Example usage: ``make upload USERNAME=rgommers RELEASE=1.10.1``
@@ -103,6 +138,32 @@ upload:
ssh $(USERNAME)@docs.scipy.org rm $(UPLOAD_DIR)/dist.tar.gz
ssh $(USERNAME)@docs.scipy.org ln -snf numpy-$(RELEASE) /srv/docs_scipy_org/doc/numpy
+
+merge-doc: build/dist.tar.gz
+ifeq "$(TAG)" ""
+ echo tag "$(TAG)" not of the form 1.18;
+ exit 1;
+endif
+ @# Only clone if the directory does not exist
+ @if ! test -d build/merge; then \
+ git clone https://github.com/numpy/doc build/merge; \
+ fi;
+ @# Remove any old content and copy in the new, add it to git
+ -rm -rf build/merge/$(TAG)/*
+ -mkdir -p build/merge/$(TAG)
+ @# -C changes working directory
+ tar -C build/merge/$(TAG) -xf build/dist.tar.gz
+ git -C build/merge add $(TAG)
+ @# For now, the user must do this. If it is onerous, automate it and change
+ @# the instructions in doc/HOWTO_RELEASE.rst.txt
+ @echo " "
+ @echo New documentation archive added to ./build/merge.
+ @echo Now add/modify the appropiate section after
+ @echo " <!-- insert here -->"
+ @echo in build/merge/index.html,
+ @echo then \"git commit\", \"git push\"
+
+
#------------------------------------------------------------------------------
# Basic Sphinx generation rules for different formats
#------------------------------------------------------------------------------
@@ -112,7 +173,8 @@ build/generate-stamp: $(wildcard source/reference/*.rst)
mkdir -p build
touch build/generate-stamp
-html: generate
+html: version-check html-build
+html-build: generate
mkdir -p build/html build/doctrees
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) build/html $(FILES)
$(PYTHON) postprocess.py html build/html/*.html
@@ -125,7 +187,7 @@ html-scipyorg:
@echo
@echo "Build finished. The HTML pages are in build/html."
-pickle: generate
+pickle: generate version-check
mkdir -p build/pickle build/doctrees
$(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) build/pickle $(FILES)
@echo
@@ -135,7 +197,7 @@ pickle: generate
web: pickle
-htmlhelp: generate
+htmlhelp: generate version-check
mkdir -p build/htmlhelp build/doctrees
$(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) build/htmlhelp $(FILES)
@echo
@@ -146,11 +208,12 @@ htmlhelp-build: htmlhelp build/htmlhelp/numpy.chm
%.chm: %.hhp
-hhc.exe $^
-qthelp: generate
+qthelp: generate version-check
mkdir -p build/qthelp build/doctrees
$(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) build/qthelp $(FILES)
-latex: generate
+latex: version-check latex-build
+latex-build: generate
mkdir -p build/latex build/doctrees
$(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) build/latex $(FILES)
$(PYTHON) postprocess.py tex build/latex/*.tex
@@ -160,18 +223,18 @@ latex: generate
@echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \
"run these through (pdf)latex."
-coverage: build
+coverage: build version-check
mkdir -p build/coverage build/doctrees
$(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) build/coverage $(FILES)
@echo "Coverage finished; see c.txt and python.txt in build/coverage"
-changes: generate
+changes: generate version-check
mkdir -p build/changes build/doctrees
$(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) build/changes $(FILES)
@echo
@echo "The overview file is in build/changes."
-linkcheck: generate
+linkcheck: generate version-check
mkdir -p build/linkcheck build/doctrees
$(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) build/linkcheck $(FILES)
@echo
diff --git a/doc/Py3K.rst.txt b/doc/Py3K.rst.txt
index f78b9e5db..b23536ca5 100644
--- a/doc/Py3K.rst.txt
+++ b/doc/Py3K.rst.txt
@@ -812,20 +812,20 @@ Types with tp_as_sequence defined
PySequenceMethods in py3k are binary compatible with py2k, but some of the
slots have gone away. I suspect this means some functions need redefining so
-the semantics of the slots needs to be checked.
-
-PySequenceMethods foo_sequence_methods = {
- (lenfunc)0, /* sq_length */
- (binaryfunc)0, /* sq_concat */
- (ssizeargfunc)0, /* sq_repeat */
- (ssizeargfunc)0, /* sq_item */
- (void *)0, /* nee sq_slice */
- (ssizeobjargproc)0, /* sq_ass_item */
- (void *)0, /* nee sq_ass_slice */
- (objobjproc)0, /* sq_contains */
- (binaryfunc)0, /* sq_inplace_concat */
- (ssizeargfunc)0 /* sq_inplace_repeat */
-};
+the semantics of the slots needs to be checked::
+
+ PySequenceMethods foo_sequence_methods = {
+ (lenfunc)0, /* sq_length */
+ (binaryfunc)0, /* sq_concat */
+ (ssizeargfunc)0, /* sq_repeat */
+ (ssizeargfunc)0, /* sq_item */
+ (void *)0, /* nee sq_slice */
+ (ssizeobjargproc)0, /* sq_ass_item */
+ (void *)0, /* nee sq_ass_slice */
+ (objobjproc)0, /* sq_contains */
+ (binaryfunc)0, /* sq_inplace_concat */
+ (ssizeargfunc)0 /* sq_inplace_repeat */
+ };
PyMappingMethods
@@ -840,13 +840,13 @@ Types with tp_as_mapping defined
* multiarray/arrayobject.c
PyMappingMethods in py3k look to be the same as in py2k. The semantics
-of the slots needs to be checked.
+of the slots needs to be checked::
-PyMappingMethods foo_mapping_methods = {
- (lenfunc)0, /* mp_length */
- (binaryfunc)0, /* mp_subscript */
- (objobjargproc)0 /* mp_ass_subscript */
-};
+ PyMappingMethods foo_mapping_methods = {
+ (lenfunc)0, /* mp_length */
+ (binaryfunc)0, /* mp_subscript */
+ (objobjargproc)0 /* mp_ass_subscript */
+ };
PyFile
diff --git a/doc/RELEASE_WALKTHROUGH.rst.txt b/doc/RELEASE_WALKTHROUGH.rst.txt
index c3400194c..0a761e350 100644
--- a/doc/RELEASE_WALKTHROUGH.rst.txt
+++ b/doc/RELEASE_WALKTHROUGH.rst.txt
@@ -6,6 +6,11 @@ replace 1.14.5 by the correct version.
Release Walkthrough
====================
+Note that in the code snippets below, ``upstream`` refers to the root repository on
+github and ``origin`` to a fork in your personal account. You may need to make adjustments
+if you have not forked the repository but simply cloned it locally. You can
+also edit ``.git/config`` and add ``upstream`` if it isn't already present.
+
Backport Pull Requests
----------------------
@@ -33,6 +38,11 @@ to the maintenance branch, and later will be forward ported to master.
Finish the Release Note
-----------------------
+.. note:
+
+ This has changed now that we use ``towncrier``. See the instructions for
+ creating the release note in ``doc/release/upcoming_changes/README.rst``.
+
Fill out the release note ``doc/release/1.14.5-notes.rst`` calling out
significant changes.
@@ -46,7 +56,7 @@ repository::
$ git checkout maintenance/1.14.x
$ git pull upstream maintenance/1.14.x
$ git submodule update
- $ git clean -xdf
+ $ git clean -xdfq
Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
@@ -55,7 +65,7 @@ Edit pavement.py and setup.py as detailed in HOWTO_RELEASE::
Sanity check::
- $ python runtests.py -m "full"
+ $ python runtests.py -m "full" # NumPy < 1.17 only
$ python3 runtests.py -m "full"
Push this release directly onto the end of the maintenance branch. This
@@ -73,7 +83,7 @@ Paver is used to build the source releases. It will create the ``release`` and
``release/installers`` directories and put the ``*.zip`` and ``*.tar.gz``
source releases in the latter. ::
- $ cython --version # check that you have the correct cython version
+ $ python3 -m cython --version # check for correct cython version
$ paver sdist # sdist will do a git clean -xdf, so we omit that
@@ -86,7 +96,7 @@ commit. This can take a while. The numpy-wheels repository is cloned from
may have been accessed and changed by someone else and a push will fail::
$ cd ../numpy-wheels
- $ git pull origin master
+ $ git pull upstream master
$ git branch <new version> # only when starting new numpy version
$ git checkout v1.14.x # v1.14.x already existed for the 1.14.4 release
@@ -94,9 +104,9 @@ Edit the ``.travis.yml`` and ``.appveyor.yml`` files to make sure they have the
correct version, and put in the commit hash for the ``REL`` commit created
above for ``BUILD_COMMIT``, see the _example from `v1.14.3`::
- $ gvim .travis.yml appveyor.yml
+ $ gvim .travis.yml .appveyor.yml
$ git commit -a
- $ git push origin HEAD
+ $ git push upstream HEAD
Now wait. If you get nervous at the amount of time taken -- the builds can take
several hours-- you can check the build progress by following the links
@@ -121,7 +131,7 @@ download all the wheels to the ``../numpy/release/installers`` directory and
upload later using ``twine``::
$ cd ../terryfy
- $ git pull origin master
+ $ git pull upstream master
$ CDN_URL=https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com
$ NPY_WHLS=../numpy/release/installers
$ ./wheel-uploader -u $CDN_URL -n -v -w $NPY_WHLS -t win numpy 1.14.5
@@ -134,10 +144,11 @@ environment.
Generate the README files
-------------------------
-This needs to be done after all installers are present, but before the pavement
-file is updated for continued development.
+This needs to be done after all installers are downloaded, but before the pavement
+file is updated for continued development::
- $ paver write_release_and_log
+ $ cd ../numpy
+ $ paver write_release
Tag the release
@@ -145,7 +156,7 @@ Tag the release
Once the wheels have been built and downloaded without errors, go back to your
numpy repository in the maintenance branch and tag the ``REL`` commit, signing
-it with your gpg key, and build the source distribution archives::
+it with your gpg key::
$ git tag -s v1.14.5
@@ -157,8 +168,8 @@ push the tag upstream::
$ git push upstream v1.14.5
-We wait until this point to push the tag because it is very difficult to change
-the tag after it has been pushed.
+We wait until this point to push the tag because it is public and should not
+be changed after it has been pushed.
Reset the maintenance branch into a development state
@@ -168,6 +179,19 @@ Add another ``REL`` commit to the numpy maintenance branch, which resets the
``ISREALEASED`` flag to ``False`` and increments the version counter::
$ gvim pavement.py setup.py
+
+Create release notes for next release and edit them to set the version::
+
+ $ cp doc/release/template.rst doc/release/1.14.6-notes.rst
+ $ gvim doc/release/1.14.6-notes.rst
+ $ git add doc/release/1.14.6-notes.rst
+
+Add new release notes to the documentation release list::
+
+ $ gvim doc/source/release.rst
+
+Commit the result::
+
$ git commit -a -m"REL: prepare 1.14.x for further development"
$ git push upstream maintenance/1.14.x
@@ -176,7 +200,9 @@ Upload to PyPI
--------------
Upload to PyPI using ``twine``. A recent version of ``twine`` of is needed
-after recent PyPI changes, version ``1.11.0`` was used here. ::
+after recent PyPI changes, version ``1.11.0`` was used here.
+
+.. code-block:: sh
$ cd ../numpy
$ twine upload release/installers/*.whl
@@ -206,28 +232,39 @@ add files, using an editable text window and as binary uploads.
- Hit the ``{Publish,Update} release`` button at the bottom.
-Upload documents to docs.scipy.org
-----------------------------------
+Upload documents to numpy.org
+-----------------------------
This step is only needed for final releases and can be skipped for
-pre-releases. You will also need upload permission for the document server, if
-you do not have permission ping Pauli Virtanen or Ralf Gommers to generate and
-upload the documentation. Otherwise::
+pre-releases. ``make merge-doc`` clones the ``numpy/doc`` repo into
+``doc/build/merge`` and updates it with the new documentation::
$ pushd doc
$ make dist
- $ make upload USERNAME=<yourname> RELEASE=v1.14.5
+ $ make merge-doc
$ popd
-If the release series is a new one, you will need to rebuild and upload the
-``docs.scipy.org`` front page::
+If the release series is a new one, you will need to add a new section to the
+``doc/build/merge/index.html`` front page just after the "insert here" comment::
+
+ $ gvim doc/build/merge/index.html +/'insert here'
- $ cd ../docs.scipy.org
- $ gvim index.rst
+Otherwise, only the ``zip`` and ``pdf`` links should be updated with the
+new tag name::
-Note: there is discussion about moving the docs to github. This section will be
-updated when/if that happens.
+ $ gvim doc/build/merge/index.html +/'tag v1.14'
+You can "test run" the new documentation in a browser to make sure the links
+work::
+
+ $ firefox doc/build/merge/index.html
+
+Once everything seems satisfactory, commit and upload the changes::
+
+ $ pushd doc/build/merge
+ $ git commit -am"Add documentation for v1.14.5"
+ $ git push
+ $ popd
Announce the release on scipy.org
---------------------------------
@@ -250,8 +287,9 @@ Announce to mailing lists
The release should be announced on the numpy-discussion, scipy-devel,
scipy-user, and python-announce-list mailing lists. Look at previous
-announcements for the basic template. The contributor and PR lists
-are the same as generated for the release notes above.
+announcements for the basic template. The contributor and PR lists are the same
+as generated for the release notes above. If you crosspost, make sure that
+python-announce-list is BCC so that replies will not be sent to that list.
Post-Release Tasks
diff --git a/doc/TESTS.rst.txt b/doc/TESTS.rst.txt
index 5fe0be1f1..14cb28df8 100644
--- a/doc/TESTS.rst.txt
+++ b/doc/TESTS.rst.txt
@@ -37,10 +37,9 @@ or from the command line::
$ python runtests.py
-SciPy uses the testing framework from NumPy (specifically
-:ref:`numpy-testing`), so all the SciPy examples shown here are also
-applicable to NumPy. NumPy's full test suite can be run as
-follows::
+SciPy uses the testing framework from :mod:`numpy.testing`, so all
+the SciPy examples shown here are also applicable to NumPy. NumPy's full test
+suite can be run as follows::
>>> import numpy
>>> numpy.test()
@@ -120,15 +119,6 @@ that makes it hard to identify the test from the output of running the test
suite with ``verbose=2`` (or similar verbosity setting). Use plain comments
(``#``) if necessary.
-Sometimes it is convenient to run ``test_yyy.py`` by itself, so we add
-
-::
-
- if __name__ == "__main__":
- run_module_suite()
-
-at the bottom.
-
Labeling tests
--------------
@@ -331,35 +321,33 @@ Known failures & skipping tests
Sometimes you might want to skip a test or mark it as a known failure,
such as when the test suite is being written before the code it's
meant to test, or if a test only fails on a particular architecture.
-The decorators from numpy.testing.dec can be used to do this.
To skip a test, simply use ``skipif``::
- from numpy.testing import dec
+ import pytest
- @dec.skipif(SkipMyTest, "Skipping this test because...")
+ @pytest.mark.skipif(SkipMyTest, reason="Skipping this test because...")
def test_something(foo):
...
The test is marked as skipped if ``SkipMyTest`` evaluates to nonzero,
and the message in verbose test output is the second argument given to
``skipif``. Similarly, a test can be marked as a known failure by
-using ``knownfailureif``::
+using ``xfail``::
- from numpy.testing import dec
+ import pytest
- @dec.knownfailureif(MyTestFails, "This test is known to fail because...")
+ @pytest.mark.xfail(MyTestFails, reason="This test is known to fail because...")
def test_something_else(foo):
...
Of course, a test can be unconditionally skipped or marked as a known
-failure by passing ``True`` as the first argument to ``skipif`` or
-``knownfailureif``, respectively.
+failure by using ``skip`` or ``xfail`` without argument, respectively.
A total of the number of skipped and known failing tests is displayed
at the end of the test run. Skipped tests are marked as ``'S'`` in
the test results (or ``'SKIPPED'`` for ``verbose > 1``), and known
-failing tests are marked as ``'K'`` (or ``'KNOWN'`` if ``verbose >
+failing tests are marked as ``'x'`` (or ``'XFAIL'`` if ``verbose >
1``).
Tests on random data
diff --git a/doc/cdoc/numpyfilter.py b/doc/cdoc/numpyfilter.py
index 614c50771..0ec50697e 100755
--- a/doc/cdoc/numpyfilter.py
+++ b/doc/cdoc/numpyfilter.py
@@ -14,10 +14,7 @@ import os
import textwrap
import optparse
-if sys.version_info[0] >= 3:
- import pickle
-else:
- import cPickle as pickle
+from numpy.core.numeric import pickle
CACHE_FILE = 'build/rst-cache.pck'
diff --git a/doc/changelog/1.15.0-changelog.rst b/doc/changelog/1.15.0-changelog.rst
index b76b9699a..4e3d3680b 100644
--- a/doc/changelog/1.15.0-changelog.rst
+++ b/doc/changelog/1.15.0-changelog.rst
@@ -374,7 +374,7 @@ A total of 438 pull requests were merged for this release.
* `#10778 <https://github.com/numpy/numpy/pull/10778>`__: BUG: test, fix for missing flags['WRITEBACKIFCOPY'] key
* `#10781 <https://github.com/numpy/numpy/pull/10781>`__: ENH: NEP index builder
* `#10785 <https://github.com/numpy/numpy/pull/10785>`__: DOC: Fixed author name in reference to book
-* `#10786 <https://github.com/numpy/numpy/pull/10786>`__: ENH: Add "stablesort" option to inp.sort as an alias for "mergesort".
+* `#10786 <https://github.com/numpy/numpy/pull/10786>`__: ENH: Add "stable" option to np.sort as an alias for "mergesort".
* `#10790 <https://github.com/numpy/numpy/pull/10790>`__: TST: Various fixes prior to switching to pytest
* `#10795 <https://github.com/numpy/numpy/pull/10795>`__: BUG: Allow spaces in output string of einsum
* `#10796 <https://github.com/numpy/numpy/pull/10796>`__: BUG: fix wrong inplace vectorization on overlapping arguments
diff --git a/doc/changelog/1.15.3-changelog.rst b/doc/changelog/1.15.3-changelog.rst
new file mode 100644
index 000000000..9e03df454
--- /dev/null
+++ b/doc/changelog/1.15.3-changelog.rst
@@ -0,0 +1,32 @@
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jeroen Demeyer
+* Kevin Sheppard
+* Matthew Bowden +
+* Matti Picus
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 12 pull requests were merged for this release.
+
+* `#12080 <https://github.com/numpy/numpy/pull/12080>`__: MAINT: Blacklist some MSVC complex functions.
+* `#12083 <https://github.com/numpy/numpy/pull/12083>`__: TST: Add azure CI testing to 1.15.x branch.
+* `#12084 <https://github.com/numpy/numpy/pull/12084>`__: BUG: test_path() now uses Path.resolve()
+* `#12085 <https://github.com/numpy/numpy/pull/12085>`__: TST, MAINT: Fix some failing tests on azure-pipelines mac and...
+* `#12187 <https://github.com/numpy/numpy/pull/12187>`__: BUG: Fix memory leak in mapping.c
+* `#12188 <https://github.com/numpy/numpy/pull/12188>`__: BUG: Allow boolean subtract in histogram
+* `#12189 <https://github.com/numpy/numpy/pull/12189>`__: BUG: Fix in-place permutation
+* `#12190 <https://github.com/numpy/numpy/pull/12190>`__: BUG: limit default for get_num_build_jobs() to 8
+* `#12191 <https://github.com/numpy/numpy/pull/12191>`__: BUG: OBJECT_to_* should check for errors
+* `#12192 <https://github.com/numpy/numpy/pull/12192>`__: DOC: Prepare for NumPy 1.15.3 release.
+* `#12237 <https://github.com/numpy/numpy/pull/12237>`__: BUG: Fix MaskedArray fill_value type conversion.
+* `#12238 <https://github.com/numpy/numpy/pull/12238>`__: TST: Backport azure-pipeline testing fixes for Mac
diff --git a/doc/changelog/1.15.4-changelog.rst b/doc/changelog/1.15.4-changelog.rst
new file mode 100644
index 000000000..fbe71f4ae
--- /dev/null
+++ b/doc/changelog/1.15.4-changelog.rst
@@ -0,0 +1,21 @@
+
+Contributors
+============
+
+A total of 4 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Matti Picus
+* Sebastian Berg
+* bbbbbbbbba +
+
+Pull requests merged
+====================
+
+A total of 4 pull requests were merged for this release.
+
+* `#12296 <https://github.com/numpy/numpy/pull/12296>`__: BUG: Dealloc cached buffer info (#12249)
+* `#12297 <https://github.com/numpy/numpy/pull/12297>`__: BUG: Fix fill value in masked array '==' and '!=' ops.
+* `#12307 <https://github.com/numpy/numpy/pull/12307>`__: DOC: Correct the default value of `optimize` in `numpy.einsum`
+* `#12320 <https://github.com/numpy/numpy/pull/12320>`__: REL: Prepare for the NumPy 1.15.4 release
diff --git a/doc/changelog/1.16.0-changelog.rst b/doc/changelog/1.16.0-changelog.rst
new file mode 100644
index 000000000..8aca5e643
--- /dev/null
+++ b/doc/changelog/1.16.0-changelog.rst
@@ -0,0 +1,616 @@
+
+Contributors
+============
+
+A total of 113 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alan Fontenot +
+* Allan Haldane
+* Alon Hershenhorn +
+* Alyssa Quek +
+* Andreas Nussbaumer +
+* Anner +
+* Anthony Sottile +
+* Antony Lee
+* Ayappan P +
+* Bas van Schaik +
+* C.A.M. Gerlach +
+* Charles Harris
+* Chris Billington
+* Christian Clauss
+* Christoph Gohlke
+* Christopher Pezley +
+* Daniel B Allan +
+* Daniel Smith
+* Dawid Zych +
+* Derek Kim +
+* Dima Pasechnik +
+* Edgar Giovanni Lepe +
+* Elena Mokeeva +
+* Elliott Sales de Andrade +
+* Emil Hessman +
+* Eric Larson
+* Eric Schles +
+* Eric Wieser
+* Giulio Benetti +
+* Guillaume Gautier +
+* Guo Ci
+* Heath Henley +
+* Isuru Fernando +
+* J. Lewis Muir +
+* Jack Vreeken +
+* Jaime Fernandez
+* James Bourbeau
+* Jeff VanOss
+* Jeffrey Yancey +
+* Jeremy Chen +
+* Jeremy Manning +
+* Jeroen Demeyer
+* John Darbyshire +
+* John Kirkham
+* John Zwinck
+* Jonas Jensen +
+* Joscha Reimer +
+* Juan Azcarreta +
+* Julian Taylor
+* Kevin Sheppard
+* Krzysztof Chomski +
+* Kyle Sunden
+* Lars Grüter
+* Lilian Besson +
+* MSeifert04
+* Mark Harfouche
+* Marten van Kerkwijk
+* Martin Thoma
+* Matt Harrigan +
+* Matthew Bowden +
+* Matthew Brett
+* Matthias Bussonnier
+* Matti Picus
+* Max Aifer +
+* Michael Hirsch, Ph.D +
+* Michael James Jamie Schnaitter +
+* MichaelSaah +
+* Mike Toews
+* Minkyu Lee +
+* Mircea Akos Bruma +
+* Mircea-Akos Brumă +
+* Moshe Looks +
+* Muhammad Kasim +
+* Nathaniel J. Smith
+* Nikita Titov +
+* Paul Müller +
+* Paul van Mulbregt
+* Pauli Virtanen
+* Pierre Glaser +
+* Pim de Haan
+* Ralf Gommers
+* Robert Kern
+* Robin Aggleton +
+* Rohit Pandey +
+* Roman Yurchak +
+* Ryan Soklaski
+* Sebastian Berg
+* Sho Nakamura +
+* Simon Gibbons
+* Stan Seibert +
+* Stefan Otte
+* Stefan van der Walt
+* Stephan Hoyer
+* Stuart Archibald
+* Taylor Smith +
+* Tim Felgentreff +
+* Tim Swast +
+* Tim Teichmann +
+* Toshiki Kataoka
+* Travis Oliphant
+* Tyler Reddy
+* Uddeshya Singh +
+* Warren Weckesser
+* Weitang Li +
+* Wenjamin Petrenko +
+* William D. Irons
+* Yannick Jadoul +
+* Yaroslav Halchenko
+* Yug Khanna +
+* Yuji Kanagawa +
+* Yukun Guo +
+* @ankokumoyashi +
+* @lerbuke +
+
+Pull requests merged
+====================
+
+A total of 490 pull requests were merged for this release.
+
+* `#6256 <https://github.com/numpy/numpy/pull/6256>`__: NEP: Add proposal for oindex and vindex.
+* `#6377 <https://github.com/numpy/numpy/pull/6377>`__: BUG: define "uint-alignment", fixes complex64 alignment
+* `#8206 <https://github.com/numpy/numpy/pull/8206>`__: ENH: add padding options to diff
+* `#8923 <https://github.com/numpy/numpy/pull/8923>`__: ENH: Add 'stone' estimator to np.histogram
+* `#8955 <https://github.com/numpy/numpy/pull/8955>`__: ENH: Allow ufunc.identity to be any python object
+* `#9022 <https://github.com/numpy/numpy/pull/9022>`__: BUG: don't silence `__array_wrap__` errors in `ufunc.reduce`
+* `#10551 <https://github.com/numpy/numpy/pull/10551>`__: BUG: memmap close files when it shouldn't, load leaves them open...
+* `#10602 <https://github.com/numpy/numpy/pull/10602>`__: MAINT: Move dtype string functions to python
+* `#10704 <https://github.com/numpy/numpy/pull/10704>`__: NEP 15: Merging multiarray and umath
+* `#10797 <https://github.com/numpy/numpy/pull/10797>`__: DEP: Updated `unravel_index()` to support `shape` kwarg
+* `#10915 <https://github.com/numpy/numpy/pull/10915>`__: ENH: implement nep 0015: merge multiarray and umath
+* `#10998 <https://github.com/numpy/numpy/pull/10998>`__: DOC: removed spurious FIXME comment in number.c
+* `#11002 <https://github.com/numpy/numpy/pull/11002>`__: MAINT: add clearer message to assist users with failed builds.
+* `#11016 <https://github.com/numpy/numpy/pull/11016>`__: ENH: Add AARCH32 support.
+* `#11084 <https://github.com/numpy/numpy/pull/11084>`__: DOC: link to TESTS.rst.txt testing guidelines, tweak testing...
+* `#11119 <https://github.com/numpy/numpy/pull/11119>`__: ENH: Chain exceptions to give better error messages for invalid...
+* `#11175 <https://github.com/numpy/numpy/pull/11175>`__: ENH: Generalized ufunc signature expansion for frozen and flexible...
+* `#11197 <https://github.com/numpy/numpy/pull/11197>`__: BUG/ENH: Removed non-standard scaling of the covariance matrix...
+* `#11234 <https://github.com/numpy/numpy/pull/11234>`__: DOC: Update einsum docs
+* `#11282 <https://github.com/numpy/numpy/pull/11282>`__: MAINT: move comparison operator special-handling out of ufunc...
+* `#11297 <https://github.com/numpy/numpy/pull/11297>`__: NEP: Expansion of gufunc signatures.
+* `#11299 <https://github.com/numpy/numpy/pull/11299>`__: BUG: Prevent crashes on 0-length structured void scalars
+* `#11303 <https://github.com/numpy/numpy/pull/11303>`__: DOC: revision of NEP-18 (`__array_function__`)
+* `#11312 <https://github.com/numpy/numpy/pull/11312>`__: WIP: DOC: slightly tweak the directions to create a release
+* `#11318 <https://github.com/numpy/numpy/pull/11318>`__: REL: Setup master for 1.16 development.
+* `#11323 <https://github.com/numpy/numpy/pull/11323>`__: DEP: Actually deprecate the normed argument to histogram
+* `#11324 <https://github.com/numpy/numpy/pull/11324>`__: MAINT: Don't use dtype strings when the dtypes themselves can...
+* `#11326 <https://github.com/numpy/numpy/pull/11326>`__: DOC: Update master after NumPy 1.14.5 release.
+* `#11328 <https://github.com/numpy/numpy/pull/11328>`__: MAINT: Misc numeric cleanup
+* `#11335 <https://github.com/numpy/numpy/pull/11335>`__: DOC: Change array lengths/entries in `broadcast_arrays` example...
+* `#11336 <https://github.com/numpy/numpy/pull/11336>`__: BUG: decref in failure path; replace `PyObject_Type` by `Py_TYPE`
+* `#11338 <https://github.com/numpy/numpy/pull/11338>`__: MAINT: Ensure ufunc override call each class only once, plus...
+* `#11340 <https://github.com/numpy/numpy/pull/11340>`__: BUG: sctypeDict['f8'] randomly points to double or longdouble...
+* `#11345 <https://github.com/numpy/numpy/pull/11345>`__: BUG/ENH: Einsum optimization path updates and bug fixes.
+* `#11347 <https://github.com/numpy/numpy/pull/11347>`__: DOC: Silence many sphinx warnings
+* `#11348 <https://github.com/numpy/numpy/pull/11348>`__: ENH: Improve support for pathlib.Path objects in load functions
+* `#11349 <https://github.com/numpy/numpy/pull/11349>`__: DOC: document new functions
+* `#11351 <https://github.com/numpy/numpy/pull/11351>`__: MAINT: Improve speed of ufunc kwargs parsing
+* `#11353 <https://github.com/numpy/numpy/pull/11353>`__: DOC, MAINT: HTTP -> HTTPS, and other linkrot fixes
+* `#11356 <https://github.com/numpy/numpy/pull/11356>`__: NEP: Update NEP 19: RNG Policy
+* `#11357 <https://github.com/numpy/numpy/pull/11357>`__: MAINT: Add new `_test.c` files and `benchmarks/html` to `gitignore`
+* `#11365 <https://github.com/numpy/numpy/pull/11365>`__: BUG: add missing NpyIter_Close in einsum
+* `#11366 <https://github.com/numpy/numpy/pull/11366>`__: BUG/TST: String indexing should just fail, not emit a futurewarning
+* `#11371 <https://github.com/numpy/numpy/pull/11371>`__: DOC: Clarify requirement that histogram bins are monotonic.
+* `#11373 <https://github.com/numpy/numpy/pull/11373>`__: TST: Show that histogramdd's normed argument is histogram's density
+* `#11374 <https://github.com/numpy/numpy/pull/11374>`__: WIP: additional revision for NEP-18 (`__array_function__`)
+* `#11376 <https://github.com/numpy/numpy/pull/11376>`__: ENH: Remove NpyIter_Close
+* `#11379 <https://github.com/numpy/numpy/pull/11379>`__: BUG: changed hardcoded axis to 0 for checking indices
+* `#11382 <https://github.com/numpy/numpy/pull/11382>`__: DEP: deprecate undocumented, unused dtype type dicts
+* `#11383 <https://github.com/numpy/numpy/pull/11383>`__: ENH: Allow size=0 in numpy.random.choice
+* `#11385 <https://github.com/numpy/numpy/pull/11385>`__: BUG: Make scalar.squeeze accept axis arg
+* `#11390 <https://github.com/numpy/numpy/pull/11390>`__: REL,MAINT: Update numpyconfig.h for 1.15.
+* `#11391 <https://github.com/numpy/numpy/pull/11391>`__: MAINT: Update mailmap
+* `#11396 <https://github.com/numpy/numpy/pull/11396>`__: TST: Added regression test for #11395
+* `#11405 <https://github.com/numpy/numpy/pull/11405>`__: BUG: Ensure comparisons on scalar strings pass without warning.
+* `#11406 <https://github.com/numpy/numpy/pull/11406>`__: BUG: Ensure out is returned in einsum.
+* `#11409 <https://github.com/numpy/numpy/pull/11409>`__: DOC: Update testing section of README.
+* `#11414 <https://github.com/numpy/numpy/pull/11414>`__: DOC: major revision of NEP 21, advanced indexing
+* `#11422 <https://github.com/numpy/numpy/pull/11422>`__: BENCH: Add benchmarks for np.loadtxt reading from CSV format...
+* `#11424 <https://github.com/numpy/numpy/pull/11424>`__: ENH: Allow use of svd on empty arrays
+* `#11425 <https://github.com/numpy/numpy/pull/11425>`__: DOC: Clear up confusion between np.where(cond) and np.where(cond,...
+* `#11428 <https://github.com/numpy/numpy/pull/11428>`__: BUG: Fix incorrect deprecation logic for histogram(normed=...)...
+* `#11429 <https://github.com/numpy/numpy/pull/11429>`__: NEP: accept NEP 20 partially (frozen, flexible, but not broadcastable...
+* `#11432 <https://github.com/numpy/numpy/pull/11432>`__: MAINT: Refactor differences between cblas_matrixproduct and PyArray_MatrixProduct2
+* `#11434 <https://github.com/numpy/numpy/pull/11434>`__: MAINT: add PyPI classifier for Python 3.7
+* `#11436 <https://github.com/numpy/numpy/pull/11436>`__: DOC: Document average return type
+* `#11440 <https://github.com/numpy/numpy/pull/11440>`__: BUG: fix interpolation with inf and NaN present
+* `#11444 <https://github.com/numpy/numpy/pull/11444>`__: DOC: Fix documentation for fromfunction
+* `#11449 <https://github.com/numpy/numpy/pull/11449>`__: BUG: Revert #10229 to fix DLL loads on Windows.
+* `#11450 <https://github.com/numpy/numpy/pull/11450>`__: MAINT/DEP: properly implement `ndarray.__pos__`
+* `#11453 <https://github.com/numpy/numpy/pull/11453>`__: BENCH: add ufunc argument parsing benchmarks.
+* `#11455 <https://github.com/numpy/numpy/pull/11455>`__: BENCH: belated addition of lcm, gcd to ufunc benchmark.
+* `#11459 <https://github.com/numpy/numpy/pull/11459>`__: NEP: Add some text to NEP 0 to clarify how a NEP is accepted
+* `#11461 <https://github.com/numpy/numpy/pull/11461>`__: MAINT: Add discussion link to NEP 15
+* `#11462 <https://github.com/numpy/numpy/pull/11462>`__: Add NEP 22, a high level overview for the duck array work
+* `#11463 <https://github.com/numpy/numpy/pull/11463>`__: MAINT: Produce a more readable repr of argument packs in benchmark
+* `#11464 <https://github.com/numpy/numpy/pull/11464>`__: BUG: Don't convert inputs to `np.float64` in digitize
+* `#11468 <https://github.com/numpy/numpy/pull/11468>`__: BUG: Advanced indexing assignment incorrectly took 1-D fastpath
+* `#11470 <https://github.com/numpy/numpy/pull/11470>`__: BLD: Don't leave the build task running if runtests.py is interrupted
+* `#11471 <https://github.com/numpy/numpy/pull/11471>`__: MAINT: Remove python-side docstrings from add_newdocs.
+* `#11472 <https://github.com/numpy/numpy/pull/11472>`__: DOC: include NEP number on each NEP page
+* `#11473 <https://github.com/numpy/numpy/pull/11473>`__: MAINT: Move pytesttester outside of np.testing, to avoid creating...
+* `#11474 <https://github.com/numpy/numpy/pull/11474>`__: MAINT: Move add_newdocs into core, since it only adds docs to...
+* `#11479 <https://github.com/numpy/numpy/pull/11479>`__: BUG: Fix #define for ppc64 and ppc64le
+* `#11480 <https://github.com/numpy/numpy/pull/11480>`__: MAINT: move ufunc override code to umath and multiarray as much...
+* `#11482 <https://github.com/numpy/numpy/pull/11482>`__: DOC: Include warning in np.resize() docs
+* `#11484 <https://github.com/numpy/numpy/pull/11484>`__: BUG: Increase required cython version on python 3.7
+* `#11487 <https://github.com/numpy/numpy/pull/11487>`__: DOC: extend sanity check message
+* `#11488 <https://github.com/numpy/numpy/pull/11488>`__: NEP: clarify bugfix policy for legacy RandomState.
+* `#11501 <https://github.com/numpy/numpy/pull/11501>`__: MAINT: Tidy cython invocation
+* `#11503 <https://github.com/numpy/numpy/pull/11503>`__: MAINT: improve error message for isposinf and isneginf on complex...
+* `#11512 <https://github.com/numpy/numpy/pull/11512>`__: DOC: Add templates for issues and PRs
+* `#11514 <https://github.com/numpy/numpy/pull/11514>`__: Prefer the same-python cython to the on-PATH cython
+* `#11515 <https://github.com/numpy/numpy/pull/11515>`__: BUG: decref of field title caused segfault
+* `#11518 <https://github.com/numpy/numpy/pull/11518>`__: MAINT: Speed up normalize_axis_tuple by about 30%
+* `#11522 <https://github.com/numpy/numpy/pull/11522>`__: BUG: fix np.load() of empty .npz file
+* `#11525 <https://github.com/numpy/numpy/pull/11525>`__: MAINT: Append `*FLAGS` instead of overriding
+* `#11526 <https://github.com/numpy/numpy/pull/11526>`__: ENH: add multi-field assignment helpers in np.lib.recfunctions
+* `#11527 <https://github.com/numpy/numpy/pull/11527>`__: DOC: Note that method is the polar form of Box-Muller.
+* `#11528 <https://github.com/numpy/numpy/pull/11528>`__: ENH: Add support for ipython latex printing to polynomial
+* `#11531 <https://github.com/numpy/numpy/pull/11531>`__: ENH: Add density argument to histogramdd.
+* `#11533 <https://github.com/numpy/numpy/pull/11533>`__: DOC: Fixed example code for cheb2poly and poly2cheb (see #11519)
+* `#11534 <https://github.com/numpy/numpy/pull/11534>`__: DOC: Minor improvements to np.concatenate docstring
+* `#11535 <https://github.com/numpy/numpy/pull/11535>`__: MAINT: Improve memory usage in PEP3118 format parsing
+* `#11553 <https://github.com/numpy/numpy/pull/11553>`__: DOC: Tiny typo on numpy/reference/arrays.dtypes.html
+* `#11556 <https://github.com/numpy/numpy/pull/11556>`__: BUG: Make assert_string_equal check str equality simply without...
+* `#11559 <https://github.com/numpy/numpy/pull/11559>`__: NEP: accept nep 0015
+* `#11560 <https://github.com/numpy/numpy/pull/11560>`__: NEP: accept nep 0019
+* `#11562 <https://github.com/numpy/numpy/pull/11562>`__: DOC: update release notes for LDFLAGS append behavior (gh-11525).
+* `#11565 <https://github.com/numpy/numpy/pull/11565>`__: MAINT: convert the doctests for polynomial to regular tests
+* `#11566 <https://github.com/numpy/numpy/pull/11566>`__: BLD: Do not use gcc warnings flags when 'gcc' is actually clang.
+* `#11567 <https://github.com/numpy/numpy/pull/11567>`__: TST: Integrate codecov testing
+* `#11568 <https://github.com/numpy/numpy/pull/11568>`__: BLD: Modify cpu detection and printing to get working aarch64...
+* `#11571 <https://github.com/numpy/numpy/pull/11571>`__: DOC: Updated array2string description
+* `#11572 <https://github.com/numpy/numpy/pull/11572>`__: DOC: Updated Slice Description
+* `#11573 <https://github.com/numpy/numpy/pull/11573>`__: TST: add broadcast_arrays() kwarg unit test for TypeError
+* `#11580 <https://github.com/numpy/numpy/pull/11580>`__: MAINT: refactor ufunc iter operand flags handling
+* `#11591 <https://github.com/numpy/numpy/pull/11591>`__: MAINT: update runtests.py node id example for pytest usage
+* `#11592 <https://github.com/numpy/numpy/pull/11592>`__: DOC: add Stefan van der Walt to Steering Council
+* `#11593 <https://github.com/numpy/numpy/pull/11593>`__: ENH: handle empty matrices in qr decomposition
+* `#11594 <https://github.com/numpy/numpy/pull/11594>`__: ENH: support for empty matrices in linalg.lstsq
+* `#11595 <https://github.com/numpy/numpy/pull/11595>`__: BUG:warn on Nan in minimum,maximum for scalars, float16
+* `#11596 <https://github.com/numpy/numpy/pull/11596>`__: NEP: backwards compatibility and deprecation policy
+* `#11598 <https://github.com/numpy/numpy/pull/11598>`__: TST: Add Python 3.7 to CI testing
+* `#11601 <https://github.com/numpy/numpy/pull/11601>`__: BUG: Make np.array([[1], 2]) and np.array([1, [2]]) behave in...
+* `#11606 <https://github.com/numpy/numpy/pull/11606>`__: DOC: Post 1.15.0 release updates for master.
+* `#11607 <https://github.com/numpy/numpy/pull/11607>`__: DOC: minor clarification and typo fix to NEP 21 (outer indexing).
+* `#11610 <https://github.com/numpy/numpy/pull/11610>`__: TST: including C source line coverage for CI / codecov
+* `#11611 <https://github.com/numpy/numpy/pull/11611>`__: NEP: Add roadmap section and subdocuments to NEPs
+* `#11613 <https://github.com/numpy/numpy/pull/11613>`__: BUG: have geometric() raise ValueError on p=0
+* `#11615 <https://github.com/numpy/numpy/pull/11615>`__: BUG: Clip uses wrong memory order in output
+* `#11616 <https://github.com/numpy/numpy/pull/11616>`__: DOC: add a brief note on "Protocols for methods" to NEP 18
+* `#11621 <https://github.com/numpy/numpy/pull/11621>`__: DOC: Use "real symmetric" rather than "symmetric" in ``eigh``...
+* `#11626 <https://github.com/numpy/numpy/pull/11626>`__: DOC: Show plot in meshgrid example.
+* `#11630 <https://github.com/numpy/numpy/pull/11630>`__: DOC: Include the versionadded to the isnat documentation.
+* `#11634 <https://github.com/numpy/numpy/pull/11634>`__: MAINT: Filter Cython warnings in `__init__.py`
+* `#11637 <https://github.com/numpy/numpy/pull/11637>`__: ENH: np.angle: Remove unnecessary multiplication, and allow subclasses...
+* `#11638 <https://github.com/numpy/numpy/pull/11638>`__: ENH: Make expand_dims work on subclasses
+* `#11642 <https://github.com/numpy/numpy/pull/11642>`__: BUG: Fixes for unicode field names in Python 2
+* `#11643 <https://github.com/numpy/numpy/pull/11643>`__: DOC: Insert up to date link to Spyder website in Dev Env doc...
+* `#11644 <https://github.com/numpy/numpy/pull/11644>`__: BUG: Fix doc source links to unwrap decorators
+* `#11652 <https://github.com/numpy/numpy/pull/11652>`__: BUG: Ensure singleton dimensions are not dropped when converting...
+* `#11660 <https://github.com/numpy/numpy/pull/11660>`__: ENH: Add Nan warnings for maximum, minimum on more dtypes
+* `#11669 <https://github.com/numpy/numpy/pull/11669>`__: BUG: Fix regression in `void_getitem`
+* `#11670 <https://github.com/numpy/numpy/pull/11670>`__: MAINT: trivially refactor mapped indexing
+* `#11673 <https://github.com/numpy/numpy/pull/11673>`__: DOC: Add geomspace to "See also" of linspace
+* `#11679 <https://github.com/numpy/numpy/pull/11679>`__: TST: ignore setup.py files for codecov reports
+* `#11688 <https://github.com/numpy/numpy/pull/11688>`__: DOC: Update broadcasting doc with current exception details
+* `#11691 <https://github.com/numpy/numpy/pull/11691>`__: BUG: Make matrix_power again work for object arrays.
+* `#11692 <https://github.com/numpy/numpy/pull/11692>`__: MAINT: Remove duplicate code.
+* `#11693 <https://github.com/numpy/numpy/pull/11693>`__: NEP: Mark NEP 18 as accepted
+* `#11694 <https://github.com/numpy/numpy/pull/11694>`__: BUG: Fix pickle and memoryview for datetime64, timedelta64 scalars
+* `#11695 <https://github.com/numpy/numpy/pull/11695>`__: BUG: Add missing PyErr_NoMemory after failing malloc
+* `#11703 <https://github.com/numpy/numpy/pull/11703>`__: MAINT: Remove np.pkgload, which seems to be unusable anyway
+* `#11708 <https://github.com/numpy/numpy/pull/11708>`__: BUG: Fix regression in np.loadtxt for bz2 text files in Python...
+* `#11710 <https://github.com/numpy/numpy/pull/11710>`__: BUG: Check for compiler used in env['CC'], then config_vars['CC']
+* `#11711 <https://github.com/numpy/numpy/pull/11711>`__: BUG: Fix undefined functions on big-endian systems.
+* `#11715 <https://github.com/numpy/numpy/pull/11715>`__: TST: Fix urlopen stubbing.
+* `#11717 <https://github.com/numpy/numpy/pull/11717>`__: MAINT: Make einsum optimize default to False.
+* `#11718 <https://github.com/numpy/numpy/pull/11718>`__: BUG: Revert use of `console_scripts`.
+* `#11722 <https://github.com/numpy/numpy/pull/11722>`__: MAINT: Remove duplicate docstring and correct location of `__all__`...
+* `#11725 <https://github.com/numpy/numpy/pull/11725>`__: BUG: Fix Fortran kind detection for aarch64 & s390x.
+* `#11727 <https://github.com/numpy/numpy/pull/11727>`__: BUG: Fix printing of longdouble on ppc64le.
+* `#11729 <https://github.com/numpy/numpy/pull/11729>`__: DOC: fix capitalization of kilojoules
+* `#11731 <https://github.com/numpy/numpy/pull/11731>`__: DOC: fix typo in vectorize docstring
+* `#11733 <https://github.com/numpy/numpy/pull/11733>`__: DOC: recommend polynomial.Polynomial over np.polyfit
+* `#11735 <https://github.com/numpy/numpy/pull/11735>`__: BUG: Fix test sensitive to platform byte order.
+* `#11738 <https://github.com/numpy/numpy/pull/11738>`__: TST, MAINT: add lgtm.yml to tweak LGTM.com analysis
+* `#11739 <https://github.com/numpy/numpy/pull/11739>`__: BUG: disallow setting flag to writeable after fromstring, frombuffer
+* `#11740 <https://github.com/numpy/numpy/pull/11740>`__: BUG: Deprecation triggers segfault
+* `#11742 <https://github.com/numpy/numpy/pull/11742>`__: DOC: Reduce warnings and cleanup redundant c-api documentation
+* `#11745 <https://github.com/numpy/numpy/pull/11745>`__: DOC: Small docstring fixes for old polyfit.
+* `#11754 <https://github.com/numpy/numpy/pull/11754>`__: BUG: check return value of `_buffer_format_string`
+* `#11755 <https://github.com/numpy/numpy/pull/11755>`__: MAINT: Fix typos in random.hypergeometric's notes
+* `#11756 <https://github.com/numpy/numpy/pull/11756>`__: MAINT: Make assert_array_compare more generic.
+* `#11765 <https://github.com/numpy/numpy/pull/11765>`__: DOC: Move documentation from `help(ndarray.ctypes)` to `help(some_array.ctypes)`
+* `#11771 <https://github.com/numpy/numpy/pull/11771>`__: BUG: Make `random.shuffle` work on 1-D instances of `ndarray`...
+* `#11774 <https://github.com/numpy/numpy/pull/11774>`__: BUG: Fix regression in intersect1d.
+* `#11778 <https://github.com/numpy/numpy/pull/11778>`__: BUG: Avoid signed overflow in histogram
+* `#11783 <https://github.com/numpy/numpy/pull/11783>`__: MAINT: check `_append_char` return value
+* `#11784 <https://github.com/numpy/numpy/pull/11784>`__: MAINT: reformat line spacing before test methods
+* `#11797 <https://github.com/numpy/numpy/pull/11797>`__: DOC: Update docs after 1.15.1 release.
+* `#11800 <https://github.com/numpy/numpy/pull/11800>`__: DOC: document use when f2py is not in the PATH
+* `#11802 <https://github.com/numpy/numpy/pull/11802>`__: ENH: Use entry_points to install the f2py scripts.
+* `#11805 <https://github.com/numpy/numpy/pull/11805>`__: BUG: add type cast check for ediff1d
+* `#11806 <https://github.com/numpy/numpy/pull/11806>`__: DOC: Polybase augmented assignment notes
+* `#11812 <https://github.com/numpy/numpy/pull/11812>`__: DOC: edit setup.py docstring that is displayed on PyPI.
+* `#11813 <https://github.com/numpy/numpy/pull/11813>`__: BUG: fix array_split incorrect behavior with array size bigger...
+* `#11814 <https://github.com/numpy/numpy/pull/11814>`__: DOC, MAINT: Fixes for errstate() and README.md documentation.
+* `#11817 <https://github.com/numpy/numpy/pull/11817>`__: DOC: add examples and extend existing dos for polynomial subclasses
+* `#11818 <https://github.com/numpy/numpy/pull/11818>`__: TST: add missing tests for all polynomial subclass pow fns.
+* `#11823 <https://github.com/numpy/numpy/pull/11823>`__: TST: add test for array2string unexpected kwarg
+* `#11830 <https://github.com/numpy/numpy/pull/11830>`__: MAINT: reduce void type repr code duplication
+* `#11834 <https://github.com/numpy/numpy/pull/11834>`__: MAINT, DOC: Replace 'an' by 'a' in some docstrings.
+* `#11837 <https://github.com/numpy/numpy/pull/11837>`__: DOC: Make clear the connection between numpy types and C types
+* `#11840 <https://github.com/numpy/numpy/pull/11840>`__: BUG: Let 0-D arrays of Python timedelta convert to np.timedelta64.
+* `#11843 <https://github.com/numpy/numpy/pull/11843>`__: MAINT: remove surviving, unused, list comprehension
+* `#11849 <https://github.com/numpy/numpy/pull/11849>`__: TST: reorder duplicate mem_overlap.c compile
+* `#11850 <https://github.com/numpy/numpy/pull/11850>`__: DOC: add comment to remove fn after python 2 support is dropped
+* `#11852 <https://github.com/numpy/numpy/pull/11852>`__: BUG: timedelta64 now accepts NumPy ints
+* `#11858 <https://github.com/numpy/numpy/pull/11858>`__: DOC: add docstrings for numeric types
+* `#11862 <https://github.com/numpy/numpy/pull/11862>`__: BUG: Re-add `_ones_like` to numpy.core.umath.
+* `#11864 <https://github.com/numpy/numpy/pull/11864>`__: TST: Update travis testing to use latest virtualenv.
+* `#11865 <https://github.com/numpy/numpy/pull/11865>`__: DOC: add a Code of Conduct document.
+* `#11866 <https://github.com/numpy/numpy/pull/11866>`__: TST: Drop Python 3.4 testing
+* `#11868 <https://github.com/numpy/numpy/pull/11868>`__: MAINT: include benchmarks, complete docs, dev tool files in sdist.
+* `#11870 <https://github.com/numpy/numpy/pull/11870>`__: MAINT: dtype(unicode) should raise TypeError on failure
+* `#11874 <https://github.com/numpy/numpy/pull/11874>`__: BENCH: split out slow setup method in bench_shape_base.Block
+* `#11877 <https://github.com/numpy/numpy/pull/11877>`__: BUG: Fix memory leak in pyfragments.swg
+* `#11880 <https://github.com/numpy/numpy/pull/11880>`__: BUG: The multiarray/ufunc merge broke old wheels.
+* `#11882 <https://github.com/numpy/numpy/pull/11882>`__: DOC: Recommend the use of `np.ndim` over `np.isscalar`, and explain...
+* `#11889 <https://github.com/numpy/numpy/pull/11889>`__: BENCH: Split bench_function_base.Sort into Sort and SortWorst.
+* `#11891 <https://github.com/numpy/numpy/pull/11891>`__: MAINT: remove exec_command() from build_ext
+* `#11892 <https://github.com/numpy/numpy/pull/11892>`__: TST: Parametrize PEP3118 scalar tests.
+* `#11893 <https://github.com/numpy/numpy/pull/11893>`__: TST: Fix duplicated test name.
+* `#11894 <https://github.com/numpy/numpy/pull/11894>`__: TST: Parametrize f2py tests.
+* `#11895 <https://github.com/numpy/numpy/pull/11895>`__: TST: Parametrize some linalg tests over types.
+* `#11896 <https://github.com/numpy/numpy/pull/11896>`__: BUG: Fix matrix PendingDeprecationWarning suppression for pytest...
+* `#11898 <https://github.com/numpy/numpy/pull/11898>`__: MAINT: remove exec_command usage from ccompiler.py
+* `#11899 <https://github.com/numpy/numpy/pull/11899>`__: MAINT: remove exec_command from system_info.py
+* `#11900 <https://github.com/numpy/numpy/pull/11900>`__: MAINT: remove exec_command from gnu.py
+* `#11901 <https://github.com/numpy/numpy/pull/11901>`__: MAINT: remove exec_command usage in ibm.py
+* `#11904 <https://github.com/numpy/numpy/pull/11904>`__: Use pytest for some already-parametrized core tests
+* `#11905 <https://github.com/numpy/numpy/pull/11905>`__: TST: Start testing with "-std=c99" on travisCI.
+* `#11906 <https://github.com/numpy/numpy/pull/11906>`__: TST: add shippable ARMv8 to CI
+* `#11907 <https://github.com/numpy/numpy/pull/11907>`__: Link HOWTO_DOCUMENT to specific section on docstrings
+* `#11909 <https://github.com/numpy/numpy/pull/11909>`__: MAINT: flake8 cleanups
+* `#11910 <https://github.com/numpy/numpy/pull/11910>`__: MAINT: test, refactor design of recursive closures
+* `#11912 <https://github.com/numpy/numpy/pull/11912>`__: DOC: dtype offset and itemsize is limited by range of C int
+* `#11914 <https://github.com/numpy/numpy/pull/11914>`__: DOC: Clarify difference between PySequence_GETITEM, PyArray_GETITEM
+* `#11916 <https://github.com/numpy/numpy/pull/11916>`__: DEP: deprecate np.set_numeric_ops and friends
+* `#11920 <https://github.com/numpy/numpy/pull/11920>`__: TST: Fix 'def' test_numerictypes.py::TestSctypeDict to 'class'...
+* `#11921 <https://github.com/numpy/numpy/pull/11921>`__: MAINT: Don't rely on `__name__` in bitname - use the information...
+* `#11922 <https://github.com/numpy/numpy/pull/11922>`__: TST: Add tests for maximum_sctype
+* `#11929 <https://github.com/numpy/numpy/pull/11929>`__: DOC: #defining -> #define / Added a short explanation for Numeric
+* `#11930 <https://github.com/numpy/numpy/pull/11930>`__: DOC: fix scipy-sphinx-theme license path
+* `#11932 <https://github.com/numpy/numpy/pull/11932>`__: MAINT: Move `np.dtype.name.__get__` to python
+* `#11933 <https://github.com/numpy/numpy/pull/11933>`__: TST: Fix unit tests that used to call unittest.TestCase.fail
+* `#11934 <https://github.com/numpy/numpy/pull/11934>`__: NEP: Revert "NEP: Mark NEP 18 as accepted"
+* `#11935 <https://github.com/numpy/numpy/pull/11935>`__: MAINT: remove usage of exec_command in config.py
+* `#11937 <https://github.com/numpy/numpy/pull/11937>`__: MAINT: remove exec_command() from f2py init
+* `#11941 <https://github.com/numpy/numpy/pull/11941>`__: BUG: Ensure einsum(optimize=True) dispatches tensordot deterministically
+* `#11943 <https://github.com/numpy/numpy/pull/11943>`__: DOC: Add warning/clarification about backwards compat in NEP-18
+* `#11948 <https://github.com/numpy/numpy/pull/11948>`__: DEP: finish making all comparisons to NaT false
+* `#11949 <https://github.com/numpy/numpy/pull/11949>`__: MAINT: Small tidy-ups to `np.core._dtype`
+* `#11950 <https://github.com/numpy/numpy/pull/11950>`__: MAINT: Extract tangential improvements made in #11175
+* `#11952 <https://github.com/numpy/numpy/pull/11952>`__: MAINT: test NPY_INTERNAL_BUILD only if defined
+* `#11953 <https://github.com/numpy/numpy/pull/11953>`__: TST: codecov.yml improvements
+* `#11957 <https://github.com/numpy/numpy/pull/11957>`__: ENH: mark that large allocations can use huge pages
+* `#11958 <https://github.com/numpy/numpy/pull/11958>`__: TST: Add a test for np.pad where constant_values is an object
+* `#11959 <https://github.com/numpy/numpy/pull/11959>`__: MAINT: Explicitely cause pagefaults to happen before starting...
+* `#11961 <https://github.com/numpy/numpy/pull/11961>`__: TST: Add more tests for np.pad
+* `#11962 <https://github.com/numpy/numpy/pull/11962>`__: ENH: maximum lines of content to be read from numpy.loadtxt
+* `#11965 <https://github.com/numpy/numpy/pull/11965>`__: BENCH: Add a benchmark comparing block to copy in the 3D case
+* `#11966 <https://github.com/numpy/numpy/pull/11966>`__: MAINT: Rewrite shape normalization in pad function
+* `#11967 <https://github.com/numpy/numpy/pull/11967>`__: BUG: fix refcount leak in PyArray_AdaptFlexibleDType
+* `#11971 <https://github.com/numpy/numpy/pull/11971>`__: MAINT: Block algorithm with a single copy per call to `block`
+* `#11973 <https://github.com/numpy/numpy/pull/11973>`__: BUG: fix cached allocations without the GIL
+* `#11976 <https://github.com/numpy/numpy/pull/11976>`__: MAINT/DOC: Show the location of an empty list in np.block
+* `#11979 <https://github.com/numpy/numpy/pull/11979>`__: MAINT: Ensure that a copy of the array is returned when calling...
+* `#11989 <https://github.com/numpy/numpy/pull/11989>`__: BUG: Ensure boolean indexing of subclasses sets base correctly.
+* `#11991 <https://github.com/numpy/numpy/pull/11991>`__: MAINT: speed up `_block` by avoiding a recursive closure
+* `#11996 <https://github.com/numpy/numpy/pull/11996>`__: TST: Parametrize and break apart dtype tests
+* `#11997 <https://github.com/numpy/numpy/pull/11997>`__: MAINT: Extract string helpers to a new private file
+* `#12002 <https://github.com/numpy/numpy/pull/12002>`__: Revert "NEP: Revert "NEP: Mark NEP 18 as accepted""
+* `#12004 <https://github.com/numpy/numpy/pull/12004>`__: BUG: Fix f2py compile function testing.
+* `#12005 <https://github.com/numpy/numpy/pull/12005>`__: ENH: initial implementation of core `__array_function__` machinery
+* `#12008 <https://github.com/numpy/numpy/pull/12008>`__: MAINT: Reassociate `np.cast` with the comment describing it
+* `#12009 <https://github.com/numpy/numpy/pull/12009>`__: MAINT: Eliminate the private `numerictypes._typestr`
+* `#12011 <https://github.com/numpy/numpy/pull/12011>`__: ENH: implementation of array_reduce_ex
+* `#12012 <https://github.com/numpy/numpy/pull/12012>`__: MAINT: Extract the crazy number of type aliases to their own...
+* `#12014 <https://github.com/numpy/numpy/pull/12014>`__: TST: prefer pytest.skip() over SkipTest
+* `#12015 <https://github.com/numpy/numpy/pull/12015>`__: TST: improve warnings parallel test safety
+* `#12017 <https://github.com/numpy/numpy/pull/12017>`__: NEP: add 3 missing data NEPs rescued from 2011-2012
+* `#12018 <https://github.com/numpy/numpy/pull/12018>`__: MAINT: Simplify parts of `_type_aliases`
+* `#12019 <https://github.com/numpy/numpy/pull/12019>`__: DOC: MAINT: address comments @eric-wieser on NEP 24-26 PR.
+* `#12020 <https://github.com/numpy/numpy/pull/12020>`__: TST: Add tests for np.sctype2char
+* `#12021 <https://github.com/numpy/numpy/pull/12021>`__: DOC: Post NumPy 1.15.2 release updates.[ci skip]
+* `#12024 <https://github.com/numpy/numpy/pull/12024>`__: MAINT: Normalize axes the normal way in fftpack.py
+* `#12027 <https://github.com/numpy/numpy/pull/12027>`__: DOC: Add docstrings for abstract types in scalar type hierarchy
+* `#12030 <https://github.com/numpy/numpy/pull/12030>`__: DOC: use "import numpy as np" style
+* `#12032 <https://github.com/numpy/numpy/pull/12032>`__: BUG: check return value from PyArray_PromoteTypes
+* `#12033 <https://github.com/numpy/numpy/pull/12033>`__: TST: Mark check for f2py script xfail.
+* `#12034 <https://github.com/numpy/numpy/pull/12034>`__: MAINT: Add version deprecated to some deprecation messages.
+* `#12035 <https://github.com/numpy/numpy/pull/12035>`__: BUG: Fix memory leak in PY3K buffer code.
+* `#12041 <https://github.com/numpy/numpy/pull/12041>`__: MAINT: remove duplicate imports
+* `#12042 <https://github.com/numpy/numpy/pull/12042>`__: MAINT: cleanup and better document core/overrides.py
+* `#12045 <https://github.com/numpy/numpy/pull/12045>`__: BUG: fix memory leak of buffer format string
+* `#12048 <https://github.com/numpy/numpy/pull/12048>`__: BLD: pin sphinx to 1.7.9
+* `#12051 <https://github.com/numpy/numpy/pull/12051>`__: TST: add macos azure testing to CI
+* `#12054 <https://github.com/numpy/numpy/pull/12054>`__: MAINT: avoid modifying mutable default values
+* `#12056 <https://github.com/numpy/numpy/pull/12056>`__: MAINT: The crackfortran function is called with an extra argument
+* `#12057 <https://github.com/numpy/numpy/pull/12057>`__: MAINT: remove unused imports
+* `#12058 <https://github.com/numpy/numpy/pull/12058>`__: MAINT: remove redundant assignment
+* `#12060 <https://github.com/numpy/numpy/pull/12060>`__: MAINT: remove unused stdlib imports
+* `#12061 <https://github.com/numpy/numpy/pull/12061>`__: MAINT: remove redundant imports
+* `#12062 <https://github.com/numpy/numpy/pull/12062>`__: BUG: `OBJECT_to_*` should check for errors
+* `#12064 <https://github.com/numpy/numpy/pull/12064>`__: MAINT: delay initialization of getlimits (circular imports)
+* `#12072 <https://github.com/numpy/numpy/pull/12072>`__: BUG: test_path() now uses Path.resolve()
+* `#12073 <https://github.com/numpy/numpy/pull/12073>`__: MAINT Avoid some memory copies in numpy.polynomial.hermite
+* `#12079 <https://github.com/numpy/numpy/pull/12079>`__: MAINT: Blacklist some MSVC complex functions.
+* `#12081 <https://github.com/numpy/numpy/pull/12081>`__: TST: add Windows test matrix to Azure CI
+* `#12082 <https://github.com/numpy/numpy/pull/12082>`__: TST: Add Python 3.5 to Azure windows CI.
+* `#12088 <https://github.com/numpy/numpy/pull/12088>`__: BUG: limit default for get_num_build_jobs() to 8
+* `#12089 <https://github.com/numpy/numpy/pull/12089>`__: BUG: Fix in-place permutation
+* `#12090 <https://github.com/numpy/numpy/pull/12090>`__: TST, MAINT: Update pickling tests by making them loop over all...
+* `#12091 <https://github.com/numpy/numpy/pull/12091>`__: TST: Install pickle5 for CI testing with python 3.6/7
+* `#12093 <https://github.com/numpy/numpy/pull/12093>`__: Provide information about what kind is actually not integer kind
+* `#12099 <https://github.com/numpy/numpy/pull/12099>`__: ENH: Validate dispatcher functions in array_function_dispatch
+* `#12102 <https://github.com/numpy/numpy/pull/12102>`__: TST: improve coverage of nd_grid
+* `#12103 <https://github.com/numpy/numpy/pull/12103>`__: MAINT: Add azure-pipeline status badge to README.md
+* `#12106 <https://github.com/numpy/numpy/pull/12106>`__: TST, MAINT: Skip some f2py tests on Mac.
+* `#12108 <https://github.com/numpy/numpy/pull/12108>`__: BUG: Allow boolean subtract in histogram
+* `#12109 <https://github.com/numpy/numpy/pull/12109>`__: TST: add unit test for issctype
+* `#12112 <https://github.com/numpy/numpy/pull/12112>`__: ENH: check getfield arguments to prevent invalid memory access
+* `#12115 <https://github.com/numpy/numpy/pull/12115>`__: ENH: `__array_function__` support for most of `numpy.core`
+* `#12116 <https://github.com/numpy/numpy/pull/12116>`__: ENH: `__array_function__` support for `np.lib`, part 1/2
+* `#12117 <https://github.com/numpy/numpy/pull/12117>`__: ENH: `__array_function__` support for `np.fft` and `np.linalg`
+* `#12119 <https://github.com/numpy/numpy/pull/12119>`__: ENH: `__array_function__` support for `np.lib`, part 2/2
+* `#12120 <https://github.com/numpy/numpy/pull/12120>`__: ENH: add timedelta modulus operator support (mm)
+* `#12121 <https://github.com/numpy/numpy/pull/12121>`__: MAINT: Clarify the error message for resize failure
+* `#12123 <https://github.com/numpy/numpy/pull/12123>`__: DEP: deprecate asscalar
+* `#12124 <https://github.com/numpy/numpy/pull/12124>`__: BUG: refactor float error status to support Alpine linux
+* `#12125 <https://github.com/numpy/numpy/pull/12125>`__: TST: expand cases in test_issctype()
+* `#12127 <https://github.com/numpy/numpy/pull/12127>`__: BUG: Fix memory leak in mapping.c
+* `#12131 <https://github.com/numpy/numpy/pull/12131>`__: BUG: fix PyDataType_ISBOOL
+* `#12133 <https://github.com/numpy/numpy/pull/12133>`__: MAINT, TST refactor pickle imports and tests
+* `#12134 <https://github.com/numpy/numpy/pull/12134>`__: DOC: Remove duplicated sentence in numpy.multiply
+* `#12137 <https://github.com/numpy/numpy/pull/12137>`__: TST: error tests for fill_diagonal()
+* `#12138 <https://github.com/numpy/numpy/pull/12138>`__: TST: error tests for diag_indices_from()
+* `#12140 <https://github.com/numpy/numpy/pull/12140>`__: DOC: fixups for NEP-18 based on the implementation
+* `#12141 <https://github.com/numpy/numpy/pull/12141>`__: DOC: minor tweak to CoC (update NumFOCUS contact address).
+* `#12145 <https://github.com/numpy/numpy/pull/12145>`__: MAINT: Update ndarrayobject.h `__cplusplus` block.
+* `#12146 <https://github.com/numpy/numpy/pull/12146>`__: MAINT: Fix typo in comment
+* `#12147 <https://github.com/numpy/numpy/pull/12147>`__: MAINT: Move duplicated type_reso_error code into a helper function
+* `#12148 <https://github.com/numpy/numpy/pull/12148>`__: DOC: document NEP-18 overrides in release notes
+* `#12151 <https://github.com/numpy/numpy/pull/12151>`__: TST: byte_bounds contiguity handling
+* `#12153 <https://github.com/numpy/numpy/pull/12153>`__: DOC, TST: cover setdiff1d assume_unique
+* `#12154 <https://github.com/numpy/numpy/pull/12154>`__: ENH: `__array_function__` for `np.core.defchararray`
+* `#12155 <https://github.com/numpy/numpy/pull/12155>`__: MAINT: Define Py_SETREF for pre-3.5.2 python and use in code
+* `#12157 <https://github.com/numpy/numpy/pull/12157>`__: ENH: Add support for third-party path-like objects by backporting...
+* `#12159 <https://github.com/numpy/numpy/pull/12159>`__: MAINT: remove unused nd_grid `__len__`.
+* `#12163 <https://github.com/numpy/numpy/pull/12163>`__: ENH: `__array_function__` for `np.einsum` and `np.block`
+* `#12165 <https://github.com/numpy/numpy/pull/12165>`__: Mark NEP 22 as accepted, and add "Informational" NEPs to NEP...
+* `#12166 <https://github.com/numpy/numpy/pull/12166>`__: NEP: Add zero-rank arrays historical info NEP
+* `#12173 <https://github.com/numpy/numpy/pull/12173>`__: NEP: add notes about updates to NEP-18
+* `#12174 <https://github.com/numpy/numpy/pull/12174>`__: NEP 16 abstract arrays: rebased and marked as "Withdrawn"
+* `#12175 <https://github.com/numpy/numpy/pull/12175>`__: ENH: `__array_function__` for multiarray functions
+* `#12176 <https://github.com/numpy/numpy/pull/12176>`__: TST: add test for weighted histogram mismatch
+* `#12177 <https://github.com/numpy/numpy/pull/12177>`__: MAINT: remove unused `_assertSquareness()`
+* `#12179 <https://github.com/numpy/numpy/pull/12179>`__: MAINT: Move `_kind_to_stem` to `np.core._dtype`, so that it can...
+* `#12180 <https://github.com/numpy/numpy/pull/12180>`__: NEP: change toc titles, cross reference, mark 16 superseded
+* `#12181 <https://github.com/numpy/numpy/pull/12181>`__: MAINT: fix depreciation message typo for np.sum
+* `#12185 <https://github.com/numpy/numpy/pull/12185>`__: TST: test multi_dot with 2 arrays
+* `#12199 <https://github.com/numpy/numpy/pull/12199>`__: TST: add Azure CI triggers
+* `#12209 <https://github.com/numpy/numpy/pull/12209>`__: Delay import of distutils.msvccompiler to avoid warning on non-Windows.
+* `#12211 <https://github.com/numpy/numpy/pull/12211>`__: DOC: Clarify the examples for argmax and argmin
+* `#12212 <https://github.com/numpy/numpy/pull/12212>`__: MAINT: `ndarray.__repr__` should not rely on `__array_function__`
+* `#12214 <https://github.com/numpy/numpy/pull/12214>`__: TST: add test for tensorinv()
+* `#12215 <https://github.com/numpy/numpy/pull/12215>`__: TST: test dims match on lstsq()
+* `#12216 <https://github.com/numpy/numpy/pull/12216>`__: TST: test invalid histogram range
+* `#12217 <https://github.com/numpy/numpy/pull/12217>`__: TST: test histogram bins dims
+* `#12219 <https://github.com/numpy/numpy/pull/12219>`__: ENH: make matmul into a ufunc
+* `#12222 <https://github.com/numpy/numpy/pull/12222>`__: TST: unit tests for column_stack.
+* `#12224 <https://github.com/numpy/numpy/pull/12224>`__: BUG: Fix MaskedArray fill_value type conversion.
+* `#12229 <https://github.com/numpy/numpy/pull/12229>`__: MAINT: Fix typo in comment
+* `#12236 <https://github.com/numpy/numpy/pull/12236>`__: BUG: maximum, minimum no longer emit warnings on NAN
+* `#12240 <https://github.com/numpy/numpy/pull/12240>`__: BUG: Fix crash in repr of void subclasses
+* `#12241 <https://github.com/numpy/numpy/pull/12241>`__: TST: arg handling tests in histogramdd
+* `#12243 <https://github.com/numpy/numpy/pull/12243>`__: BUG: Fix misleading assert message in assert_almost_equal #12200
+* `#12245 <https://github.com/numpy/numpy/pull/12245>`__: TST: tests for sort_complex()
+* `#12246 <https://github.com/numpy/numpy/pull/12246>`__: DOC: Update docs after NumPy 1.15.3 release.
+* `#12249 <https://github.com/numpy/numpy/pull/12249>`__: BUG: Dealloc cached buffer info
+* `#12250 <https://github.com/numpy/numpy/pull/12250>`__: DOC: add missing docs
+* `#12251 <https://github.com/numpy/numpy/pull/12251>`__: MAINT: improved error message when no `__array_function__` implementation...
+* `#12254 <https://github.com/numpy/numpy/pull/12254>`__: MAINT: Move ctype -> dtype conversion to python
+* `#12257 <https://github.com/numpy/numpy/pull/12257>`__: BUG: Fix fill value in masked array '==' and '!=' ops.
+* `#12259 <https://github.com/numpy/numpy/pull/12259>`__: TST: simplify how the different code paths for block are tested.
+* `#12265 <https://github.com/numpy/numpy/pull/12265>`__: BUG: Revert linspace import for concatenation funcs
+* `#12266 <https://github.com/numpy/numpy/pull/12266>`__: BUG: Avoid SystemErrors by checking the return value of PyPrint
+* `#12268 <https://github.com/numpy/numpy/pull/12268>`__: DOC: add broadcasting article from scipy old-wiki
+* `#12270 <https://github.com/numpy/numpy/pull/12270>`__: MAINT: set `__module__` for more `array_function_dispatch` uses
+* `#12276 <https://github.com/numpy/numpy/pull/12276>`__: MAINT: remove unused parse_index()
+* `#12279 <https://github.com/numpy/numpy/pull/12279>`__: NEP: tweak and mark NEP 0027 as final
+* `#12280 <https://github.com/numpy/numpy/pull/12280>`__: DEP: deprecate passing a generator to stack functions
+* `#12281 <https://github.com/numpy/numpy/pull/12281>`__: NEP: revise note for NEP 27
+* `#12285 <https://github.com/numpy/numpy/pull/12285>`__: ENH: array does not need to be writable to use as input to take
+* `#12286 <https://github.com/numpy/numpy/pull/12286>`__: ENH: Do not emit compiler warning if forcing old API
+* `#12288 <https://github.com/numpy/numpy/pull/12288>`__: BUILD: force LGTM to use cython>=0.29
+* `#12291 <https://github.com/numpy/numpy/pull/12291>`__: MAINT: `_set_out_array()` syntax fix
+* `#12292 <https://github.com/numpy/numpy/pull/12292>`__: MAINT: removed unused vars in f2py test code
+* `#12299 <https://github.com/numpy/numpy/pull/12299>`__: BUILD: use system python3 in the chroot
+* `#12302 <https://github.com/numpy/numpy/pull/12302>`__: DOC: Update the docstring of asfortranarray and ascontiguousarray
+* `#12306 <https://github.com/numpy/numpy/pull/12306>`__: TST: add 32-bit linux Azure CI job
+* `#12312 <https://github.com/numpy/numpy/pull/12312>`__: MAINT, TST: unreachable Python code paths
+* `#12321 <https://github.com/numpy/numpy/pull/12321>`__: MAINT: Simple speed-ups for getting overloaded types
+* `#12326 <https://github.com/numpy/numpy/pull/12326>`__: DOC: NumPy 1.15.4 post release documentation update.
+* `#12328 <https://github.com/numpy/numpy/pull/12328>`__: MAINT: Allow subclasses in `ndarray.__array_function__`.
+* `#12330 <https://github.com/numpy/numpy/pull/12330>`__: TST: test_tofile_fromfile now uses initialized memory
+* `#12331 <https://github.com/numpy/numpy/pull/12331>`__: DEV: change ASV benchmarks to run on Python 3.6 by default
+* `#12338 <https://github.com/numpy/numpy/pull/12338>`__: DOC: add a docstring for the function 'compare_chararrays' (See...
+* `#12342 <https://github.com/numpy/numpy/pull/12342>`__: BUG: Fix for np.dtype(ctypes.Structure) does not respect _pack_...
+* `#12347 <https://github.com/numpy/numpy/pull/12347>`__: DOC: typo in docstring numpy.random.beta, shape parameters must...
+* `#12349 <https://github.com/numpy/numpy/pull/12349>`__: TST, DOC: store circleci doc artifacts
+* `#12353 <https://github.com/numpy/numpy/pull/12353>`__: BUG: test, fix for threshold='nan'
+* `#12354 <https://github.com/numpy/numpy/pull/12354>`__: BUG: Fix segfault when an error occurs in np.fromfile
+* `#12355 <https://github.com/numpy/numpy/pull/12355>`__: BUG: fix a bug in npy_PyFile_Dup2 where it didn't return immediately...
+* `#12357 <https://github.com/numpy/numpy/pull/12357>`__: MAINT: Cleanup pavement file
+* `#12358 <https://github.com/numpy/numpy/pull/12358>`__: BUG: test, fix loading structured dtypes with padding
+* `#12362 <https://github.com/numpy/numpy/pull/12362>`__: MAINT: disable `__array_function__` dispatch unless environment...
+* `#12363 <https://github.com/numpy/numpy/pull/12363>`__: MAINT: update gfortran RPATH for AIX/Windows non-support.
+* `#12364 <https://github.com/numpy/numpy/pull/12364>`__: NEP: clarify the purpose of "types" in `__array_function__`.
+* `#12366 <https://github.com/numpy/numpy/pull/12366>`__: MAINT: Refactor sorting header file
+* `#12372 <https://github.com/numpy/numpy/pull/12372>`__: BUG: random: Fix handling of a=0 for numpy.random.weibull.
+* `#12373 <https://github.com/numpy/numpy/pull/12373>`__: MAINT: Improve error message for legal but unsupported PEP3118...
+* `#12376 <https://github.com/numpy/numpy/pull/12376>`__: BUG: do not override exception on import failure
+* `#12377 <https://github.com/numpy/numpy/pull/12377>`__: NEP: move nep 15 from accepted to final
+* `#12378 <https://github.com/numpy/numpy/pull/12378>`__: TST: Update complex long double precision tests.
+* `#12380 <https://github.com/numpy/numpy/pull/12380>`__: BUG: Fix for #10533 np.dtype(ctype) does not respect endianness
+* `#12381 <https://github.com/numpy/numpy/pull/12381>`__: BUG: graceful DataSource __del__ when __init__ fails
+* `#12382 <https://github.com/numpy/numpy/pull/12382>`__: ENH: set correct __module__ for objects in numpy's public API
+* `#12388 <https://github.com/numpy/numpy/pull/12388>`__: ENH: allow arrays for start and stop in {lin,log,geom}space
+* `#12390 <https://github.com/numpy/numpy/pull/12390>`__: DEV: remove shim added in 1.4
+* `#12391 <https://github.com/numpy/numpy/pull/12391>`__: DEP: raise on a call to deprecated numpy.lib.function_base.unique
+* `#12392 <https://github.com/numpy/numpy/pull/12392>`__: DOC: Add release notes for ctypes improvements
+* `#12398 <https://github.com/numpy/numpy/pull/12398>`__: BUG: fix possible overlap issues with avx enabled
+* `#12399 <https://github.com/numpy/numpy/pull/12399>`__: DOC: Fix typo in polyint. Fixes #12386.
+* `#12405 <https://github.com/numpy/numpy/pull/12405>`__: ENH: Add support for `np.dtype(ctypes.Union)`
+* `#12407 <https://github.com/numpy/numpy/pull/12407>`__: BUG: Fall back to 'ascii' locale in build (if needed)
+* `#12408 <https://github.com/numpy/numpy/pull/12408>`__: BUG: multifield-view of MaskedArray gets bad fill_value
+* `#12409 <https://github.com/numpy/numpy/pull/12409>`__: MAINT: correct the dtype.descr docstring
+* `#12413 <https://github.com/numpy/numpy/pull/12413>`__: BUG: Do not double-quote arguments to the command line
+* `#12414 <https://github.com/numpy/numpy/pull/12414>`__: MAINT: Update cversion hash.
+* `#12417 <https://github.com/numpy/numpy/pull/12417>`__: BUG: Fix regression on np.dtype(ctypes.c_void_p)
+* `#12419 <https://github.com/numpy/numpy/pull/12419>`__: Fix PyArray_FillFunc function definitions
+* `#12420 <https://github.com/numpy/numpy/pull/12420>`__: gfortran needs -lpthread & -maix64(64 build) in AIX
+* `#12422 <https://github.com/numpy/numpy/pull/12422>`__: MNT: Reword error message about loading pickled data.
+* `#12424 <https://github.com/numpy/numpy/pull/12424>`__: BUG: Fix inconsistent cache keying in ndpointer
+* `#12429 <https://github.com/numpy/numpy/pull/12429>`__: MAINT: Update mailmap for 1.16.0 release.
+* `#12431 <https://github.com/numpy/numpy/pull/12431>`__: BUG/ENH: Fix use of ndpointer in return values
+* `#12437 <https://github.com/numpy/numpy/pull/12437>`__: MAINT: refactor datetime.c_metadata creation
+* `#12439 <https://github.com/numpy/numpy/pull/12439>`__: BUG: test, fix NPY_VISIBILITY_HIDDEN on gcc, which becomes NPY_NO_EXPORT
+* `#12440 <https://github.com/numpy/numpy/pull/12440>`__: BUG: don't override original errors when casting inside np.dot()...
+* `#12443 <https://github.com/numpy/numpy/pull/12443>`__: MAINT Use set litterals
+* `#12445 <https://github.com/numpy/numpy/pull/12445>`__: MAINT: Use list and dict comprehension when possible
+* `#12446 <https://github.com/numpy/numpy/pull/12446>`__: MAINT: Fixups to new functions in np.lib.recfunctions
+* `#12447 <https://github.com/numpy/numpy/pull/12447>`__: ENH: add back the multifield copy->view change
+* `#12448 <https://github.com/numpy/numpy/pull/12448>`__: MAINT: Review F401,F841,F842 flake8 errors (unused variables...
+* `#12455 <https://github.com/numpy/numpy/pull/12455>`__: TST: use condition directive for Azure 2.7 check
+* `#12458 <https://github.com/numpy/numpy/pull/12458>`__: MAINT, DOC: fix Azure README badge
+* `#12464 <https://github.com/numpy/numpy/pull/12464>`__: BUG: IndexError for empty list on structured MaskedArray.
+* `#12466 <https://github.com/numpy/numpy/pull/12466>`__: TST: use openblas for Windows CI
+* `#12470 <https://github.com/numpy/numpy/pull/12470>`__: MAINT: remove wrapper functions from numpy.core.multiarray
+* `#12471 <https://github.com/numpy/numpy/pull/12471>`__: ENH: override support for np.linspace and friends
+* `#12474 <https://github.com/numpy/numpy/pull/12474>`__: TST: enable dispatcher test coverage
+* `#12477 <https://github.com/numpy/numpy/pull/12477>`__: DOC: fix example for __call__. See #12451
+* `#12486 <https://github.com/numpy/numpy/pull/12486>`__: DOC: Update copyright year in the license
+* `#12488 <https://github.com/numpy/numpy/pull/12488>`__: ENH: implement matmul on NDArrayOperatorsMixin
+* `#12493 <https://github.com/numpy/numpy/pull/12493>`__: BUG: fix records.fromfile fails to read data >4 GB
+* `#12494 <https://github.com/numpy/numpy/pull/12494>`__: BUG: test, fix matmul, dot for vector array with stride[i]=0
+* `#12498 <https://github.com/numpy/numpy/pull/12498>`__: TST: sync Azure Win openblas
+* `#12501 <https://github.com/numpy/numpy/pull/12501>`__: MAINT: removed word/typo from comment in site.cfg.example
+* `#12556 <https://github.com/numpy/numpy/pull/12556>`__: BUG: only override vector size for avx code for 1.16
+* `#12562 <https://github.com/numpy/numpy/pull/12562>`__: DOC, MAINT: Make `PYVER = 3` in doc/Makefile.
+* `#12563 <https://github.com/numpy/numpy/pull/12563>`__: DOC: more doc updates for structured arrays
+* `#12564 <https://github.com/numpy/numpy/pull/12564>`__: BUG: fix an unsafe PyTuple_GET_ITEM call
+* `#12565 <https://github.com/numpy/numpy/pull/12565>`__: Fix lgtm.com C/C++ build
+* `#12567 <https://github.com/numpy/numpy/pull/12567>`__: BUG: reorder operations for VS2015
+* `#12568 <https://github.com/numpy/numpy/pull/12568>`__: BUG: fix improper use of C-API
+* `#12569 <https://github.com/numpy/numpy/pull/12569>`__: BUG: Make new-lines in compiler error messages print to the console
+* `#12570 <https://github.com/numpy/numpy/pull/12570>`__: MAINT: don't check alignment size=0 arrays (RELAXED_STRIDES)
+* `#12573 <https://github.com/numpy/numpy/pull/12573>`__: BUG: fix refcount issue caused by #12524
+* `#12580 <https://github.com/numpy/numpy/pull/12580>`__: BUG: fix segfault in ctypeslib with obj being collected
+* `#12581 <https://github.com/numpy/numpy/pull/12581>`__: TST: activate shippable maintenance branches
+* `#12582 <https://github.com/numpy/numpy/pull/12582>`__: BUG: fix f2py pep338 execution method
+* `#12587 <https://github.com/numpy/numpy/pull/12587>`__: BUG: Make `arr.ctypes.data` hold a reference to the underlying...
+* `#12588 <https://github.com/numpy/numpy/pull/12588>`__: BUG: check for errors after PyArray_DESCR_REPLACE
+* `#12590 <https://github.com/numpy/numpy/pull/12590>`__: DOC, MAINT: Prepare for 1.16.0rc1 release.
+* `#12603 <https://github.com/numpy/numpy/pull/12603>`__: DOC: Fix markup in 1.16.0 release notes.
+* `#12621 <https://github.com/numpy/numpy/pull/12621>`__: BUG: longdouble with elsize 12 is never uint alignable.
+* `#12622 <https://github.com/numpy/numpy/pull/12622>`__: BUG: Add missing free in ufunc dealloc
+* `#12623 <https://github.com/numpy/numpy/pull/12623>`__: MAINT: add test for 12-byte alignment
+* `#12655 <https://github.com/numpy/numpy/pull/12655>`__: BUG: fix uint alignment asserts in lowlevel loops
+* `#12656 <https://github.com/numpy/numpy/pull/12656>`__: BENCH: don't fail at import time with old Numpy
+* `#12657 <https://github.com/numpy/numpy/pull/12657>`__: DOC: update 2018 -> 2019
+* `#12705 <https://github.com/numpy/numpy/pull/12705>`__: ENH: Better links in documentation
+* `#12706 <https://github.com/numpy/numpy/pull/12706>`__: MAINT: Further fixups to uint alignment checks
+* `#12707 <https://github.com/numpy/numpy/pull/12707>`__: BUG: Add 'sparc' to platforms implementing 16 byte reals.
+* `#12708 <https://github.com/numpy/numpy/pull/12708>`__: TST: Fix endianness in unstuctured_to_structured test
+* `#12710 <https://github.com/numpy/numpy/pull/12710>`__: TST: pin Azure brew version for stability.
diff --git a/doc/changelog/1.16.1-changelog.rst b/doc/changelog/1.16.1-changelog.rst
new file mode 100644
index 000000000..30e0e3a24
--- /dev/null
+++ b/doc/changelog/1.16.1-changelog.rst
@@ -0,0 +1,62 @@
+
+Contributors
+============
+
+A total of 16 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Antoine Pitrou
+* Arcesio Castaneda Medina +
+* Charles Harris
+* Chris Markiewicz +
+* Christoph Gohlke
+* Christopher J. Markiewicz +
+* Daniel Hrisca +
+* EelcoPeacs +
+* Eric Wieser
+* Kevin Sheppard
+* Matti Picus
+* OBATA Akio +
+* Ralf Gommers
+* Sebastian Berg
+* Stephan Hoyer
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 33 pull requests were merged for this release.
+
+* `#12754 <https://github.com/numpy/numpy/pull/12754>`__: BUG: Check paths are unicode, bytes or path-like
+* `#12767 <https://github.com/numpy/numpy/pull/12767>`__: ENH: add mm->q floordiv
+* `#12768 <https://github.com/numpy/numpy/pull/12768>`__: ENH: port np.core.overrides to C for speed
+* `#12769 <https://github.com/numpy/numpy/pull/12769>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
+* `#12771 <https://github.com/numpy/numpy/pull/12771>`__: BUG: Ensure probabilities are not NaN in choice
+* `#12772 <https://github.com/numpy/numpy/pull/12772>`__: MAINT: add warning to numpy.distutils for LDFLAGS append behavior.
+* `#12773 <https://github.com/numpy/numpy/pull/12773>`__: ENH: add "max difference" messages to np.testing.assert_array_equal...
+* `#12774 <https://github.com/numpy/numpy/pull/12774>`__: BUG: Fix incorrect/missing reference cleanups found using valgrind
+* `#12776 <https://github.com/numpy/numpy/pull/12776>`__: BUG,TST: Remove the misguided `run_command` that wraps subprocess
+* `#12777 <https://github.com/numpy/numpy/pull/12777>`__: DOC, TST: Clean up matplotlib imports
+* `#12781 <https://github.com/numpy/numpy/pull/12781>`__: BUG: Fix reference counting for subarrays containing objects
+* `#12782 <https://github.com/numpy/numpy/pull/12782>`__: BUG: Ensure failing memory allocations are reported
+* `#12784 <https://github.com/numpy/numpy/pull/12784>`__: BUG: Fix leak of void scalar buffer info
+* `#12788 <https://github.com/numpy/numpy/pull/12788>`__: MAINT: Change the order of checking for local file.
+* `#12808 <https://github.com/numpy/numpy/pull/12808>`__: BUG: loosen kwargs requirements in ediff1d
+* `#12809 <https://github.com/numpy/numpy/pull/12809>`__: DOC: clarify the extend of __array_function__ support in NumPy...
+* `#12810 <https://github.com/numpy/numpy/pull/12810>`__: BUG: Check that dtype or formats arguments are not None.
+* `#12811 <https://github.com/numpy/numpy/pull/12811>`__: BUG: fix f2py problem to build wrappers using PGI's Fortran
+* `#12812 <https://github.com/numpy/numpy/pull/12812>`__: BUG: double decref of dtype in failure codepath. Test and fix
+* `#12813 <https://github.com/numpy/numpy/pull/12813>`__: BUG, DOC: test, fix that f2py.compile accepts str and bytes,...
+* `#12816 <https://github.com/numpy/numpy/pull/12816>`__: BUG: resolve writeback in arr_insert failure paths
+* `#12820 <https://github.com/numpy/numpy/pull/12820>`__: ENH: Add mm->qm divmod
+* `#12843 <https://github.com/numpy/numpy/pull/12843>`__: BUG: fix to check before apply `shlex.split`
+* `#12844 <https://github.com/numpy/numpy/pull/12844>`__: BUG: Fix SystemError when pickling datetime64 array with pickle5
+* `#12845 <https://github.com/numpy/numpy/pull/12845>`__: BUG: Fix rounding of denormals in double and float to half casts.
+* `#12868 <https://github.com/numpy/numpy/pull/12868>`__: TEST: pin mingw version
+* `#12869 <https://github.com/numpy/numpy/pull/12869>`__: BUG: ndarrays pickled by 1.16 cannot be loaded by 1.15.4 and...
+* `#12870 <https://github.com/numpy/numpy/pull/12870>`__: BUG: do not Py_DECREF NULL pointer
+* `#12890 <https://github.com/numpy/numpy/pull/12890>`__: ENH: add _dtype_ctype to namespace for freeze analysis
+* `#12891 <https://github.com/numpy/numpy/pull/12891>`__: BUG: fail if old multiarray module detected
+* `#12898 <https://github.com/numpy/numpy/pull/12898>`__: BUG: Do not double-quote arguments passed on to the linker
+* `#12899 <https://github.com/numpy/numpy/pull/12899>`__: BUG: Do not insert extra double quote into preprocessor macros
+* `#12902 <https://github.com/numpy/numpy/pull/12902>`__: DOC: Prepare for 1.16.1 release.
diff --git a/doc/changelog/1.16.2-changelog.rst b/doc/changelog/1.16.2-changelog.rst
new file mode 100644
index 000000000..3cf0cc566
--- /dev/null
+++ b/doc/changelog/1.16.2-changelog.rst
@@ -0,0 +1,25 @@
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Matti Picus
+* Tyler Reddy
+* Tony LaTorre +
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#12909 <https://github.com/numpy/numpy/pull/12909>`__: TST: fix vmImage dispatch in Azure
+* `#12923 <https://github.com/numpy/numpy/pull/12923>`__: MAINT: remove complicated test of multiarray import failure mode
+* `#13020 <https://github.com/numpy/numpy/pull/13020>`__: BUG: fix signed zero behavior in npy_divmod
+* `#13026 <https://github.com/numpy/numpy/pull/13026>`__: MAINT: Add functions to parse shell-strings in the platform-native...
+* `#13028 <https://github.com/numpy/numpy/pull/13028>`__: BUG: Fix regression in parsing of F90 and F77 environment variables
+* `#13038 <https://github.com/numpy/numpy/pull/13038>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args
+* `#13041 <https://github.com/numpy/numpy/pull/13041>`__: BLD: Windows absolute path DLL loading
diff --git a/doc/changelog/1.16.3-changelog.rst b/doc/changelog/1.16.3-changelog.rst
new file mode 100644
index 000000000..96291c0ae
--- /dev/null
+++ b/doc/changelog/1.16.3-changelog.rst
@@ -0,0 +1,55 @@
+
+Contributors
+============
+
+A total of 16 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Andreas Schwab
+* Bharat Raghunathan +
+* Bran +
+* Charles Harris
+* Eric Wieser
+* Jakub Wilk
+* Kevin Sheppard
+* Marten van Kerkwijk
+* Matti Picus
+* Paul Ivanov
+* Ralf Gommers
+* Sebastian Berg
+* Tyler Reddy
+* Warren Weckesser
+* Yu Feng
+* adeak +
+
+Pull requests merged
+====================
+
+A total of 26 pull requests were merged for this release.
+
+* `#13072 <https://github.com/numpy/numpy/pull/13072>`__: BUG: Fixes to numpy.distutils.Configuration.get_version (#13056)
+* `#13082 <https://github.com/numpy/numpy/pull/13082>`__: BUG: Fix errors in string formatting while producing an error
+* `#13083 <https://github.com/numpy/numpy/pull/13083>`__: BUG: Convert fortran flags in environment variable
+* `#13084 <https://github.com/numpy/numpy/pull/13084>`__: BUG: Remove error-prone borrowed reference handling
+* `#13085 <https://github.com/numpy/numpy/pull/13085>`__: BUG: Add error checks when converting integers to datetime types
+* `#13091 <https://github.com/numpy/numpy/pull/13091>`__: BUG: Remove our patched version of `distutils.split_quoted`
+* `#13141 <https://github.com/numpy/numpy/pull/13141>`__: BUG: Fix testsuite failures on ppc and riscv
+* `#13142 <https://github.com/numpy/numpy/pull/13142>`__: BUG: Fix parameter validity checks in ``random.choice``
+* `#13143 <https://github.com/numpy/numpy/pull/13143>`__: BUG: Ensure linspace works on object input.
+* `#13144 <https://github.com/numpy/numpy/pull/13144>`__: BLD: fix include list for sdist building.
+* `#13145 <https://github.com/numpy/numpy/pull/13145>`__: BUG: __array_interface__ offset was always ignored
+* `#13274 <https://github.com/numpy/numpy/pull/13274>`__: MAINT: f2py: Add a cast to avoid a compiler warning.
+* `#13275 <https://github.com/numpy/numpy/pull/13275>`__: BUG, MAINT: fix reference count error on invalid input to ndarray.flat
+* `#13276 <https://github.com/numpy/numpy/pull/13276>`__: ENH: Cast covariance to double in random mvnormal
+* `#13278 <https://github.com/numpy/numpy/pull/13278>`__: BUG: Fix null pointer dereference in PyArray_DTypeFromObjectHelper
+* `#13339 <https://github.com/numpy/numpy/pull/13339>`__: BUG: Use C call to sysctlbyname for AVX detection on MacOS.
+* `#13340 <https://github.com/numpy/numpy/pull/13340>`__: BUG: Fix crash when calling savetxt on a padded array
+* `#13341 <https://github.com/numpy/numpy/pull/13341>`__: BUG: ufunc.at iteration variable size fix
+* `#13342 <https://github.com/numpy/numpy/pull/13342>`__: DOC: Add as_ctypes_type to the documentation
+* `#13350 <https://github.com/numpy/numpy/pull/13350>`__: BUG: Return the coefficients array directly
+* `#13351 <https://github.com/numpy/numpy/pull/13351>`__: BUG/MAINT: Tidy typeinfo.h and .c
+* `#13359 <https://github.com/numpy/numpy/pull/13359>`__: BUG: Make allow_pickle=False the default for loading
+* `#13360 <https://github.com/numpy/numpy/pull/13360>`__: DOC: fix some doctest failures
+* `#13363 <https://github.com/numpy/numpy/pull/13363>`__: BUG/MAINT: Tidy typeinfo.h and .c
+* `#13381 <https://github.com/numpy/numpy/pull/13381>`__: BLD: address mingw-w64 issue. Follow-up to gh-9977
+* `#13382 <https://github.com/numpy/numpy/pull/13382>`__: REL: Prepare for the NumPy release.
diff --git a/doc/changelog/1.16.4-changelog.rst b/doc/changelog/1.16.4-changelog.rst
new file mode 100644
index 000000000..b32881c37
--- /dev/null
+++ b/doc/changelog/1.16.4-changelog.rst
@@ -0,0 +1,39 @@
+
+Contributors
+============
+
+A total of 10 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Dennis Zollo +
+* Hunter Damron +
+* Jingbei Li +
+* Kevin Sheppard
+* Matti Picus
+* Nicola Soranzo +
+* Sebastian Berg
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 16 pull requests were merged for this release.
+
+* `#13392 <https://github.com/numpy/numpy/pull/13392>`__: BUG: Some PyPy versions lack PyStructSequence_InitType2.
+* `#13394 <https://github.com/numpy/numpy/pull/13394>`__: MAINT, DEP: Fix deprecated ``assertEquals()``
+* `#13396 <https://github.com/numpy/numpy/pull/13396>`__: BUG: Fix structured_to_unstructured on single-field types (backport)
+* `#13549 <https://github.com/numpy/numpy/pull/13549>`__: BLD: Make CI pass again with pytest 4.5
+* `#13552 <https://github.com/numpy/numpy/pull/13552>`__: TST: Register markers in conftest.py.
+* `#13559 <https://github.com/numpy/numpy/pull/13559>`__: BUG: Removes ValueError for empty kwargs in arraymultiter_new
+* `#13560 <https://github.com/numpy/numpy/pull/13560>`__: BUG: Add TypeError to accepted exceptions in crackfortran.
+* `#13561 <https://github.com/numpy/numpy/pull/13561>`__: BUG: Handle subarrays in descr_to_dtype
+* `#13562 <https://github.com/numpy/numpy/pull/13562>`__: BUG: Protect generators from log(0.0)
+* `#13563 <https://github.com/numpy/numpy/pull/13563>`__: BUG: Always return views from structured_to_unstructured when...
+* `#13564 <https://github.com/numpy/numpy/pull/13564>`__: BUG: Catch stderr when checking compiler version
+* `#13565 <https://github.com/numpy/numpy/pull/13565>`__: BUG: longdouble(int) does not work
+* `#13587 <https://github.com/numpy/numpy/pull/13587>`__: BUG: distutils/system_info.py fix missing subprocess import (#13523)
+* `#13620 <https://github.com/numpy/numpy/pull/13620>`__: BUG,DEP: Fix writeable flag setting for arrays without base
+* `#13641 <https://github.com/numpy/numpy/pull/13641>`__: MAINT: Prepare for the 1.16.4 release.
+* `#13644 <https://github.com/numpy/numpy/pull/13644>`__: BUG: special case object arrays when printing rel-, abs-error
diff --git a/doc/changelog/1.16.5-changelog.rst b/doc/changelog/1.16.5-changelog.rst
new file mode 100644
index 000000000..19374058d
--- /dev/null
+++ b/doc/changelog/1.16.5-changelog.rst
@@ -0,0 +1,54 @@
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
diff --git a/doc/changelog/1.17.0-changelog.rst b/doc/changelog/1.17.0-changelog.rst
new file mode 100644
index 000000000..debfb6f5b
--- /dev/null
+++ b/doc/changelog/1.17.0-changelog.rst
@@ -0,0 +1,694 @@
+
+Contributors
+============
+
+A total of 150 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Aaron Voelker +
+* Abdur Rehman +
+* Abdur-Rahmaan Janhangeer +
+* Abhinav Sagar +
+* Adam J. Stewart +
+* Adam Orr +
+* Albert Thomas +
+* Alex Watt +
+* Alexander Blinne +
+* Alexander Shadchin
+* Allan Haldane
+* Ander Ustarroz +
+* Andras Deak
+* Andrea Pattori +
+* Andreas Schwab
+* Andrew Naguib +
+* Andy Scholand +
+* Ankit Shukla +
+* Anthony Sottile
+* Antoine Pitrou
+* Antony Lee
+* Arcesio Castaneda Medina +
+* Assem +
+* Bernardt Duvenhage +
+* Bharat Raghunathan +
+* Bharat123rox +
+* Bran +
+* Bruce Merry +
+* Charles Harris
+* Chirag Nighut +
+* Christoph Gohlke
+* Christopher Whelan +
+* Chuanzhu Xu +
+* Colin Snyder +
+* Dan Allan +
+* Daniel Hrisca
+* Daniel Lawrence +
+* Debsankha Manik +
+* Dennis Zollo +
+* Dieter Werthmüller +
+* Dominic Jack +
+* EelcoPeacs +
+* Eric Larson
+* Eric Wieser
+* Fabrice Fontaine +
+* Gary Gurlaskie +
+* Gregory Lee +
+* Gregory R. Lee
+* Guillaume Horel +
+* Hameer Abbasi
+* Haoyu Sun +
+* Harmon +
+* He Jia +
+* Hunter Damron +
+* Ian Sanders +
+* Ilja +
+* Isaac Virshup +
+* Isaiah Norton +
+* Jackie Leng +
+* Jaime Fernandez
+* Jakub Wilk
+* Jan S. (Milania1) +
+* Jarrod Millman
+* Javier Dehesa +
+* Jeremy Lay +
+* Jim Turner +
+* Jingbei Li +
+* Joachim Hereth +
+* Johannes Hampp +
+* John Belmonte +
+* John Kirkham
+* John Law +
+* Jonas Jensen
+* Joseph Fox-Rabinovitz
+* Joseph Martinot-Lagarde
+* Josh Wilson
+* Juan Luis Cano Rodríguez
+* Julian Taylor
+* Jérémie du Boisberranger +
+* Kai Striega +
+* Katharine Hyatt +
+* Kevin Sheppard
+* Kexuan Sun
+* Kiko Correoso +
+* Kriti Singh +
+* Lars Grueter +
+* Luis Pedro Coelho
+* Maksim Shabunin +
+* Manvi07 +
+* Mark Harfouche
+* Marten van Kerkwijk
+* Martin Reinecke +
+* Matthew Brett
+* Matthias Bussonnier
+* Matti Picus
+* Michel Fruchart +
+* Mike Lui +
+* Mike Taves +
+* Min ho Kim +
+* Mircea Akos Bruma
+* Nick Minkyu Lee
+* Nick Papior
+* Nick R. Papior +
+* Nicola Soranzo +
+* Nimish Telang +
+* OBATA Akio +
+* Oleksandr Pavlyk
+* Ori Broda +
+* Paul Ivanov
+* Pauli Virtanen
+* Peter Andreas Entschev +
+* Peter Bell +
+* Pierre de Buyl
+* Piyush Jaipuriayar +
+* Prithvi MK +
+* Raghuveer Devulapalli +
+* Ralf Gommers
+* Richard Harris +
+* Rishabh Chakrabarti +
+* Riya Sharma +
+* Robert Kern
+* Roman Yurchak
+* Ryan Levy +
+* Sebastian Berg
+* Sergei Lebedev +
+* Shekhar Prasad Rajak +
+* Stefan van der Walt
+* Stephan Hoyer
+* Steve Stagg +
+* SuryaChand P +
+* Søren Rasmussen +
+* Thibault Hallouin +
+* Thomas A Caswell
+* Tobias Uelwer +
+* Tony LaTorre +
+* Toshiki Kataoka
+* Tyler Moncur +
+* Tyler Reddy
+* Valentin Haenel
+* Vrinda Narayan +
+* Warren Weckesser
+* Weitang Li
+* Wojtek Ruszczewski
+* Yu Feng
+* Yu Kobayashi +
+* Yury Kirienko +
+* aashuli +
+* luzpaz
+* parul +
+* spacescientist +
+
+Pull requests merged
+====================
+
+A total of 531 pull requests were merged for this release.
+
+* `#4808 <https://github.com/numpy/numpy/pull/4808>`__: ENH: Make the `mode` parameter of np.pad default to 'constant'
+* `#8131 <https://github.com/numpy/numpy/pull/8131>`__: BUG: Fix help() formatting for deprecated functions.
+* `#8159 <https://github.com/numpy/numpy/pull/8159>`__: ENH: Add import time benchmarks.
+* `#8641 <https://github.com/numpy/numpy/pull/8641>`__: BUG: Preserve types of empty arrays in ix_ when known
+* `#8662 <https://github.com/numpy/numpy/pull/8662>`__: ENH: preserve subclasses in ufunc.outer
+* `#9330 <https://github.com/numpy/numpy/pull/9330>`__: ENH: Make errstate a ContextDecorator in Python3
+* `#10308 <https://github.com/numpy/numpy/pull/10308>`__: API: Make MaskedArray.mask return a view, rather than the underlying...
+* `#10417 <https://github.com/numpy/numpy/pull/10417>`__: ENH: Allow dtype objects to be indexed with multiple fields at...
+* `#10723 <https://github.com/numpy/numpy/pull/10723>`__: BUG: longdouble(int) does not work
+* `#10741 <https://github.com/numpy/numpy/pull/10741>`__: ENH: Implement `np.floating.as_integer_ratio`
+* `#10855 <https://github.com/numpy/numpy/pull/10855>`__: ENH: Adding a count parameter to np.unpackbits
+* `#11230 <https://github.com/numpy/numpy/pull/11230>`__: MAINT: More cleanup of einsum
+* `#11233 <https://github.com/numpy/numpy/pull/11233>`__: BUG: ensure i0 does not change the shape.
+* `#11684 <https://github.com/numpy/numpy/pull/11684>`__: BUG: Raise when unravel_index, ravel_multi_index are given empty...
+* `#11689 <https://github.com/numpy/numpy/pull/11689>`__: DOC: Add ref docs for C generic types.
+* `#11721 <https://github.com/numpy/numpy/pull/11721>`__: BUG: Make `arr.ctypes.data` hold onto a reference to the underlying...
+* `#11829 <https://github.com/numpy/numpy/pull/11829>`__: MAINT: Use textwrap.dedent in f2py tests
+* `#11859 <https://github.com/numpy/numpy/pull/11859>`__: BUG: test and fix np.dtype('i,L') #5645
+* `#11888 <https://github.com/numpy/numpy/pull/11888>`__: ENH: Add pocketfft sources to numpy for testing, benchmarks,...
+* `#11977 <https://github.com/numpy/numpy/pull/11977>`__: BUG: reference cycle in np.vectorize
+* `#12025 <https://github.com/numpy/numpy/pull/12025>`__: DOC: add detail for 'where' argument in ufunc
+* `#12152 <https://github.com/numpy/numpy/pull/12152>`__: TST: Added tests for np.tensordot()
+* `#12201 <https://github.com/numpy/numpy/pull/12201>`__: TST: coverage for _commonType()
+* `#12234 <https://github.com/numpy/numpy/pull/12234>`__: MAINT: refactor PyArray_AdaptFlexibleDType to return a meaningful...
+* `#12239 <https://github.com/numpy/numpy/pull/12239>`__: BUG: polyval returned non-masked arrays for masked input.
+* `#12253 <https://github.com/numpy/numpy/pull/12253>`__: DOC, TST: enable doctests
+* `#12308 <https://github.com/numpy/numpy/pull/12308>`__: ENH: add mm->q floordiv
+* `#12317 <https://github.com/numpy/numpy/pull/12317>`__: ENH: port np.core.overrides to C for speed
+* `#12333 <https://github.com/numpy/numpy/pull/12333>`__: DOC: update description of the Dirichlet distribution
+* `#12418 <https://github.com/numpy/numpy/pull/12418>`__: ENH: Add timsort to npysort
+* `#12428 <https://github.com/numpy/numpy/pull/12428>`__: ENH: always use zip64, upgrade pickle protocol to 3
+* `#12456 <https://github.com/numpy/numpy/pull/12456>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
+* `#12457 <https://github.com/numpy/numpy/pull/12457>`__: TST: openblas for Azure MacOS
+* `#12463 <https://github.com/numpy/numpy/pull/12463>`__: DOC: fix docstrings for broadcastable inputs in ufunc
+* `#12502 <https://github.com/numpy/numpy/pull/12502>`__: TST: Azure Python version fix
+* `#12506 <https://github.com/numpy/numpy/pull/12506>`__: MAINT: Prepare master for 1.17.0 development.
+* `#12508 <https://github.com/numpy/numpy/pull/12508>`__: DOC, MAINT: Make `PYVER = 3` in doc/Makefile.
+* `#12511 <https://github.com/numpy/numpy/pull/12511>`__: BUG: don't check alignment of size=0 arrays (RELAXED_STRIDES)
+* `#12512 <https://github.com/numpy/numpy/pull/12512>`__: added template-generated files to .gitignore
+* `#12519 <https://github.com/numpy/numpy/pull/12519>`__: ENH/DEP: Use a ufunc under the hood for ndarray.clip
+* `#12522 <https://github.com/numpy/numpy/pull/12522>`__: BUG: Make new-lines in compiler error messages print to the console
+* `#12524 <https://github.com/numpy/numpy/pull/12524>`__: BUG: fix improper use of C-API
+* `#12526 <https://github.com/numpy/numpy/pull/12526>`__: BUG: reorder operations for VS2015
+* `#12527 <https://github.com/numpy/numpy/pull/12527>`__: DEV: Fix lgtm.com C/C++ build
+* `#12528 <https://github.com/numpy/numpy/pull/12528>`__: BUG: fix an unsafe PyTuple_GET_ITEM call
+* `#12532 <https://github.com/numpy/numpy/pull/12532>`__: DEV: add ctags option file
+* `#12534 <https://github.com/numpy/numpy/pull/12534>`__: DOC: Fix desc. of Ellipsis behavior in reference
+* `#12537 <https://github.com/numpy/numpy/pull/12537>`__: DOC: Change 'num' to 'np'
+* `#12538 <https://github.com/numpy/numpy/pull/12538>`__: MAINT: remove VC 9.0 from CI
+* `#12539 <https://github.com/numpy/numpy/pull/12539>`__: DEV: remove travis 32 bit job since it is running on azure
+* `#12543 <https://github.com/numpy/numpy/pull/12543>`__: TST: wheel-match Linux openblas in CI
+* `#12544 <https://github.com/numpy/numpy/pull/12544>`__: BUG: fix refcount issue caused by #12524
+* `#12545 <https://github.com/numpy/numpy/pull/12545>`__: BUG: Ensure probabilities are not NaN in choice
+* `#12546 <https://github.com/numpy/numpy/pull/12546>`__: BUG: check for errors after PyArray_DESCR_REPLACE
+* `#12547 <https://github.com/numpy/numpy/pull/12547>`__: ENH: Cast covariance to double in random mvnormal
+* `#12549 <https://github.com/numpy/numpy/pull/12549>`__: TST: relax codecov project threshold
+* `#12551 <https://github.com/numpy/numpy/pull/12551>`__: MAINT: add warning to numpy.distutils for LDFLAGS append behavior.
+* `#12552 <https://github.com/numpy/numpy/pull/12552>`__: BENCH: Improve benchmarks for numpy.pad
+* `#12554 <https://github.com/numpy/numpy/pull/12554>`__: DOC: more doc updates for structured arrays
+* `#12555 <https://github.com/numpy/numpy/pull/12555>`__: BUG: only override vector size for avx code
+* `#12560 <https://github.com/numpy/numpy/pull/12560>`__: DOC: fix some doctest failures
+* `#12566 <https://github.com/numpy/numpy/pull/12566>`__: BUG: fix segfault in ctypeslib with obj being collected
+* `#12571 <https://github.com/numpy/numpy/pull/12571>`__: Revert "Merge pull request #11721 from eric-wieser/fix-9647"
+* `#12572 <https://github.com/numpy/numpy/pull/12572>`__: BUG: Make `arr.ctypes.data` hold a reference to the underlying...
+* `#12575 <https://github.com/numpy/numpy/pull/12575>`__: ENH: improve performance for numpy.core.records.find_duplicate
+* `#12577 <https://github.com/numpy/numpy/pull/12577>`__: BUG: fix f2py pep338 execution method
+* `#12578 <https://github.com/numpy/numpy/pull/12578>`__: TST: activate shippable maintenance branches
+* `#12583 <https://github.com/numpy/numpy/pull/12583>`__: TST: add test for 'python -mnumpy.f2py'
+* `#12584 <https://github.com/numpy/numpy/pull/12584>`__: Clarify skiprows in loadtxt
+* `#12586 <https://github.com/numpy/numpy/pull/12586>`__: ENH: Implement radix sort
+* `#12589 <https://github.com/numpy/numpy/pull/12589>`__: MAINT: Update changelog.py for Python 3.
+* `#12591 <https://github.com/numpy/numpy/pull/12591>`__: ENH: add "max difference" messages to np.testing.assert_array_equal
+* `#12592 <https://github.com/numpy/numpy/pull/12592>`__: BUG,TST: Remove the misguided `run_command` that wraps subprocess
+* `#12593 <https://github.com/numpy/numpy/pull/12593>`__: ENH,WIP: Use richer exception types for ufunc type resolution...
+* `#12594 <https://github.com/numpy/numpy/pull/12594>`__: DEV, BUILD: add pypy3 to azure CI
+* `#12596 <https://github.com/numpy/numpy/pull/12596>`__: ENH: improve performance of numpy.core.records.fromarrays
+* `#12601 <https://github.com/numpy/numpy/pull/12601>`__: DOC: Correct documentation of `numpy.delete` obj parameter.
+* `#12602 <https://github.com/numpy/numpy/pull/12602>`__: DOC: Update RELEASE_WALKTHROUGH.rst.txt.
+* `#12604 <https://github.com/numpy/numpy/pull/12604>`__: BUG: Check that dtype and formats arguments for None.
+* `#12606 <https://github.com/numpy/numpy/pull/12606>`__: DOC: Document NPY_SORTKIND parameter in PyArray_Sort
+* `#12608 <https://github.com/numpy/numpy/pull/12608>`__: MAINT: Use `*.format` for some strings.
+* `#12609 <https://github.com/numpy/numpy/pull/12609>`__: ENH: Deprecate writeable broadcast_array
+* `#12610 <https://github.com/numpy/numpy/pull/12610>`__: TST: Update runtests.py to specify C99 for gcc.
+* `#12611 <https://github.com/numpy/numpy/pull/12611>`__: BUG: longdouble with elsize 12 is never uint alignable
+* `#12612 <https://github.com/numpy/numpy/pull/12612>`__: TST: Update `travis-test.sh` for C99
+* `#12616 <https://github.com/numpy/numpy/pull/12616>`__: BLD: Fix minimum Python version in setup.py
+* `#12617 <https://github.com/numpy/numpy/pull/12617>`__: BUG: Add missing free in ufunc dealloc
+* `#12618 <https://github.com/numpy/numpy/pull/12618>`__: MAINT: add test for 12-byte alignment
+* `#12620 <https://github.com/numpy/numpy/pull/12620>`__: BLD: move -std=c99 addition to CFLAGS to Azure config
+* `#12624 <https://github.com/numpy/numpy/pull/12624>`__: BUG: Fix incorrect/missing reference cleanups found using valgrind
+* `#12626 <https://github.com/numpy/numpy/pull/12626>`__: BUG: fix uint alignment asserts in lowlevel loops
+* `#12631 <https://github.com/numpy/numpy/pull/12631>`__: BUG: fix f2py problem to build wrappers using PGI's Fortran
+* `#12634 <https://github.com/numpy/numpy/pull/12634>`__: DOC, TST: remove "agg" setting from docs
+* `#12639 <https://github.com/numpy/numpy/pull/12639>`__: BENCH: don't fail at import time with old Numpy
+* `#12641 <https://github.com/numpy/numpy/pull/12641>`__: DOC: update 2018 -> 2019
+* `#12644 <https://github.com/numpy/numpy/pull/12644>`__: ENH: where for ufunc reductions
+* `#12645 <https://github.com/numpy/numpy/pull/12645>`__: DOC: Minor fix to pocketfft release note
+* `#12650 <https://github.com/numpy/numpy/pull/12650>`__: BUG: Fix reference counting for subarrays containing objects
+* `#12651 <https://github.com/numpy/numpy/pull/12651>`__: DOC: SimpleNewFromDescr cannot be given NULL for descr
+* `#12666 <https://github.com/numpy/numpy/pull/12666>`__: BENCH: add asv nanfunction benchmarks
+* `#12668 <https://github.com/numpy/numpy/pull/12668>`__: ENH: Improve error messages for non-matching shapes in concatenate.
+* `#12671 <https://github.com/numpy/numpy/pull/12671>`__: TST: Fix endianness in unstuctured_to_structured test
+* `#12672 <https://github.com/numpy/numpy/pull/12672>`__: BUG: Add 'sparc' to platforms implementing 16 byte reals.
+* `#12677 <https://github.com/numpy/numpy/pull/12677>`__: MAINT: Further fixups to uint alignment checks
+* `#12679 <https://github.com/numpy/numpy/pull/12679>`__: ENH: remove "Invalid value" warnings from median, percentile
+* `#12680 <https://github.com/numpy/numpy/pull/12680>`__: BUG: Ensure failing memory allocations are reported
+* `#12683 <https://github.com/numpy/numpy/pull/12683>`__: ENH: add mm->qm divmod
+* `#12684 <https://github.com/numpy/numpy/pull/12684>`__: DEV: remove _arg from public API, add matmul to benchmark ufuncs
+* `#12685 <https://github.com/numpy/numpy/pull/12685>`__: BUG: Make pocketfft handle long doubles.
+* `#12687 <https://github.com/numpy/numpy/pull/12687>`__: ENH: Better links in documentation
+* `#12690 <https://github.com/numpy/numpy/pull/12690>`__: WIP, ENH: add _nan_mask function
+* `#12693 <https://github.com/numpy/numpy/pull/12693>`__: ENH: Add a hermitian argument to `pinv` and `svd`, matching `matrix_rank`
+* `#12696 <https://github.com/numpy/numpy/pull/12696>`__: BUG: Fix leak of void scalar buffer info
+* `#12698 <https://github.com/numpy/numpy/pull/12698>`__: DOC: improve comments in copycast_isaligned
+* `#12700 <https://github.com/numpy/numpy/pull/12700>`__: ENH: chain additional exception on ufunc method lookup error
+* `#12702 <https://github.com/numpy/numpy/pull/12702>`__: TST: Check FFT results for C/Fortran ordered and non contigous...
+* `#12704 <https://github.com/numpy/numpy/pull/12704>`__: TST: pin Azure brew version for stability
+* `#12709 <https://github.com/numpy/numpy/pull/12709>`__: TST: add ppc64le to Travis CI matrix
+* `#12713 <https://github.com/numpy/numpy/pull/12713>`__: BUG: loosen kwargs requirements in ediff1d
+* `#12722 <https://github.com/numpy/numpy/pull/12722>`__: BUG: Fix rounding of denormals in double and float to half casts...
+* `#12723 <https://github.com/numpy/numpy/pull/12723>`__: BENCH: Include other sort benchmarks
+* `#12724 <https://github.com/numpy/numpy/pull/12724>`__: BENCH: quiet DeprecationWarning
+* `#12727 <https://github.com/numpy/numpy/pull/12727>`__: DOC: fix and doctest tutorial
+* `#12728 <https://github.com/numpy/numpy/pull/12728>`__: DOC: clarify the suffix of single/extended precision math constants
+* `#12729 <https://github.com/numpy/numpy/pull/12729>`__: DOC: Extend documentation of `ndarray.tolist`
+* `#12731 <https://github.com/numpy/numpy/pull/12731>`__: DOC: Update release notes and changelog after 1.16.0 release.
+* `#12733 <https://github.com/numpy/numpy/pull/12733>`__: DOC: clarify the extend of __array_function__ support in NumPy...
+* `#12741 <https://github.com/numpy/numpy/pull/12741>`__: DOC: fix generalized eigenproblem reference in "NumPy for MATLAB...
+* `#12743 <https://github.com/numpy/numpy/pull/12743>`__: BUG: Fix crash in error message formatting introduced by gh-11230
+* `#12748 <https://github.com/numpy/numpy/pull/12748>`__: BUG: Fix SystemError when pickling datetime64 array with pickle5
+* `#12757 <https://github.com/numpy/numpy/pull/12757>`__: BUG: Added parens to macro argument expansions
+* `#12758 <https://github.com/numpy/numpy/pull/12758>`__: DOC: Update docstring of diff() to use 'i' not 'n'
+* `#12762 <https://github.com/numpy/numpy/pull/12762>`__: MAINT: Change the order of checking for locale file and import...
+* `#12783 <https://github.com/numpy/numpy/pull/12783>`__: DOC: document C99 requirement in dev guide
+* `#12787 <https://github.com/numpy/numpy/pull/12787>`__: DOC: remove recommendation to add main for testing
+* `#12805 <https://github.com/numpy/numpy/pull/12805>`__: BUG: double decref of dtype in failure codepath. Test and fix
+* `#12807 <https://github.com/numpy/numpy/pull/12807>`__: BUG, DOC: test, fix that f2py.compile accepts str and bytes,...
+* `#12814 <https://github.com/numpy/numpy/pull/12814>`__: BUG: resolve writeback in arr_insert failure paths
+* `#12815 <https://github.com/numpy/numpy/pull/12815>`__: BUG: Fix testing of f2py.compile from strings.
+* `#12818 <https://github.com/numpy/numpy/pull/12818>`__: DOC: remove python2-only methods, small cleanups
+* `#12824 <https://github.com/numpy/numpy/pull/12824>`__: BUG: fix to check before apply `shlex.split`
+* `#12830 <https://github.com/numpy/numpy/pull/12830>`__: ENH: __array_function__ updates for NumPy 1.17.0
+* `#12831 <https://github.com/numpy/numpy/pull/12831>`__: BUG: Catch stderr when checking compiler version
+* `#12842 <https://github.com/numpy/numpy/pull/12842>`__: BUG: ndarrays pickled by 1.16 cannot be loaded by 1.15.4 and...
+* `#12846 <https://github.com/numpy/numpy/pull/12846>`__: BUG: fix signed zero behavior in npy_divmod
+* `#12850 <https://github.com/numpy/numpy/pull/12850>`__: BUG: fail if old multiarray module detected
+* `#12851 <https://github.com/numpy/numpy/pull/12851>`__: TEST: use xenial by default for travis
+* `#12854 <https://github.com/numpy/numpy/pull/12854>`__: BUG: do not Py_DECREF NULL pointer
+* `#12857 <https://github.com/numpy/numpy/pull/12857>`__: STY: simplify code
+* `#12863 <https://github.com/numpy/numpy/pull/12863>`__: TEST: pin mingw version
+* `#12866 <https://github.com/numpy/numpy/pull/12866>`__: DOC: link to benchmarking info
+* `#12867 <https://github.com/numpy/numpy/pull/12867>`__: TST: Use same OpenBLAS build for testing as for current wheels.
+* `#12871 <https://github.com/numpy/numpy/pull/12871>`__: ENH: add c-imported modules to namespace for freeze analysis
+* `#12877 <https://github.com/numpy/numpy/pull/12877>`__: Remove deprecated ``sudo: false`` from .travis.yml
+* `#12879 <https://github.com/numpy/numpy/pull/12879>`__: DEP: deprecate exec_command
+* `#12885 <https://github.com/numpy/numpy/pull/12885>`__: DOC: fix math formatting of np.linalg.lstsq docs
+* `#12886 <https://github.com/numpy/numpy/pull/12886>`__: DOC: add missing character routines, fix #8578
+* `#12887 <https://github.com/numpy/numpy/pull/12887>`__: BUG: Fix np.rec.fromarrays on arrays which are already structured
+* `#12889 <https://github.com/numpy/numpy/pull/12889>`__: BUG: Make allow_pickle=False the default for loading
+* `#12892 <https://github.com/numpy/numpy/pull/12892>`__: BUG: Do not double-quote arguments passed on to the linker
+* `#12894 <https://github.com/numpy/numpy/pull/12894>`__: MAINT: Removed unused and confusingly indirect imports from mingw32ccompiler
+* `#12895 <https://github.com/numpy/numpy/pull/12895>`__: BUG: Do not insert extra double quote into preprocessor macros
+* `#12903 <https://github.com/numpy/numpy/pull/12903>`__: TST: fix vmImage dispatch in Azure
+* `#12905 <https://github.com/numpy/numpy/pull/12905>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#12908 <https://github.com/numpy/numpy/pull/12908>`__: DOC: Update master following 1.16.1 release.
+* `#12911 <https://github.com/numpy/numpy/pull/12911>`__: BLD: fix doc build for distribution.
+* `#12915 <https://github.com/numpy/numpy/pull/12915>`__: ENH: pathlib support for fromfile(), .tofile() and .dump()
+* `#12920 <https://github.com/numpy/numpy/pull/12920>`__: MAINT: remove complicated test of multiarray import failure mode
+* `#12922 <https://github.com/numpy/numpy/pull/12922>`__: DOC: Add note about arbitrary code execution to numpy.load
+* `#12925 <https://github.com/numpy/numpy/pull/12925>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args
+* `#12928 <https://github.com/numpy/numpy/pull/12928>`__: MAINT: Merge together the unary and binary type resolvers
+* `#12929 <https://github.com/numpy/numpy/pull/12929>`__: DOC: fix documentation bug in np.argsort and extend examples
+* `#12931 <https://github.com/numpy/numpy/pull/12931>`__: MAINT: Remove recurring check
+* `#12932 <https://github.com/numpy/numpy/pull/12932>`__: BUG: do not dereference NULL pointer
+* `#12937 <https://github.com/numpy/numpy/pull/12937>`__: DOC: Correct negative_binomial docstring
+* `#12944 <https://github.com/numpy/numpy/pull/12944>`__: BUG: Make timsort deal with zero length elements.
+* `#12945 <https://github.com/numpy/numpy/pull/12945>`__: BUG: Add timsort without breaking the API.
+* `#12949 <https://github.com/numpy/numpy/pull/12949>`__: DOC: ndarray.max is missing
+* `#12962 <https://github.com/numpy/numpy/pull/12962>`__: ENH: Add 'bitorder' keyword to packbits, unpackbits
+* `#12963 <https://github.com/numpy/numpy/pull/12963>`__: DOC: Grammatical fix in numpy doc
+* `#12964 <https://github.com/numpy/numpy/pull/12964>`__: DOC: Document that ``scale==0`` is now allowed in many distributions.
+* `#12965 <https://github.com/numpy/numpy/pull/12965>`__: DOC: Properly format Return section of ogrid Docstring,
+* `#12968 <https://github.com/numpy/numpy/pull/12968>`__: BENCH: Re-write sorting benchmarks
+* `#12971 <https://github.com/numpy/numpy/pull/12971>`__: ENH: Add 'offset' keyword to 'numpy.fromfile()'
+* `#12973 <https://github.com/numpy/numpy/pull/12973>`__: DOC: Recommend adding dimension to switch between row and column...
+* `#12983 <https://github.com/numpy/numpy/pull/12983>`__: DOC: Randomstate docstring fixes
+* `#12984 <https://github.com/numpy/numpy/pull/12984>`__: DOC: Add examples of negative shifts in np.roll
+* `#12986 <https://github.com/numpy/numpy/pull/12986>`__: BENCH: set ones in any/all benchmarks to 1 instead of 0
+* `#12988 <https://github.com/numpy/numpy/pull/12988>`__: ENH: Create boolean and integer ufuncs for isnan, isinf, and...
+* `#12989 <https://github.com/numpy/numpy/pull/12989>`__: ENH: Correct handling of infinities in np.interp (option B)
+* `#12995 <https://github.com/numpy/numpy/pull/12995>`__: BUG: Add missing PyErr_NoMemory() for reporting a failed malloc
+* `#12996 <https://github.com/numpy/numpy/pull/12996>`__: MAINT: Use the same multiplication order in interp for cached...
+* `#13002 <https://github.com/numpy/numpy/pull/13002>`__: DOC: reduce warnings when building, and rephrase slightly
+* `#13004 <https://github.com/numpy/numpy/pull/13004>`__: MAINT: minor changes for consistency to site.cfg.example
+* `#13008 <https://github.com/numpy/numpy/pull/13008>`__: MAINT: Move pickle import to numpy.compat
+* `#13019 <https://github.com/numpy/numpy/pull/13019>`__: BLD: Windows absolute path DLL loading
+* `#13023 <https://github.com/numpy/numpy/pull/13023>`__: BUG: Changes to string-to-shell parsing behavior broke paths...
+* `#13027 <https://github.com/numpy/numpy/pull/13027>`__: BUG: Fix regression in parsing of F90 and F77 environment variables
+* `#13031 <https://github.com/numpy/numpy/pull/13031>`__: MAINT: Replace if statement with a dictionary lookup for ease...
+* `#13032 <https://github.com/numpy/numpy/pull/13032>`__: MAINT: Extract the loop macros into their own header
+* `#13033 <https://github.com/numpy/numpy/pull/13033>`__: MAINT: Convert property to @property
+* `#13035 <https://github.com/numpy/numpy/pull/13035>`__: DOC: Draw more attention to which functions in random are convenience...
+* `#13036 <https://github.com/numpy/numpy/pull/13036>`__: BUG: __array_interface__ offset was always ignored
+* `#13039 <https://github.com/numpy/numpy/pull/13039>`__: BUG: Remove error-prone borrowed reference handling
+* `#13044 <https://github.com/numpy/numpy/pull/13044>`__: DOC: link to devdocs in README
+* `#13046 <https://github.com/numpy/numpy/pull/13046>`__: ENH: Add shape to *_like() array creation
+* `#13049 <https://github.com/numpy/numpy/pull/13049>`__: MAINT: remove undocumented __buffer__ attribute lookup
+* `#13050 <https://github.com/numpy/numpy/pull/13050>`__: BLD: make doc build work more robustly.
+* `#13054 <https://github.com/numpy/numpy/pull/13054>`__: DOC: Added maximum_sctype to documentation
+* `#13055 <https://github.com/numpy/numpy/pull/13055>`__: DOC: Post NumPy 1.16.2 release update.
+* `#13056 <https://github.com/numpy/numpy/pull/13056>`__: BUG: Fixes to numpy.distutils.Configuration.get_version
+* `#13058 <https://github.com/numpy/numpy/pull/13058>`__: DOC: update docstring in numpy.interp docstring
+* `#13060 <https://github.com/numpy/numpy/pull/13060>`__: BUG: Use C call to sysctlbyname for AVX detection on MacOS
+* `#13063 <https://github.com/numpy/numpy/pull/13063>`__: DOC: revert PR #13058 and fixup Makefile
+* `#13067 <https://github.com/numpy/numpy/pull/13067>`__: MAINT: Use with statements for opening files in distutils
+* `#13068 <https://github.com/numpy/numpy/pull/13068>`__: BUG: Add error checks when converting integers to datetime types
+* `#13071 <https://github.com/numpy/numpy/pull/13071>`__: DOC: Removed incorrect claim regarding shape constraints for...
+* `#13073 <https://github.com/numpy/numpy/pull/13073>`__: MAINT: Fix ABCPolyBase in various ways
+* `#13075 <https://github.com/numpy/numpy/pull/13075>`__: BUG: Convert fortran flags in environment variable
+* `#13076 <https://github.com/numpy/numpy/pull/13076>`__: BUG: Remove our patched version of `distutils.split_quoted`
+* `#13077 <https://github.com/numpy/numpy/pull/13077>`__: BUG: Fix errors in string formatting while producing an error
+* `#13078 <https://github.com/numpy/numpy/pull/13078>`__: MAINT: deduplicate fromroots in np.polynomial
+* `#13079 <https://github.com/numpy/numpy/pull/13079>`__: MAINT: Merge duplicate implementations of `*vander2d` and `*vander3d`...
+* `#13086 <https://github.com/numpy/numpy/pull/13086>`__: BLD: fix include list for sdist building
+* `#13090 <https://github.com/numpy/numpy/pull/13090>`__: BUILD: sphinx 1.8.3 can be used with our outdated templates
+* `#13092 <https://github.com/numpy/numpy/pull/13092>`__: BUG: ensure linspace works on object input.
+* `#13093 <https://github.com/numpy/numpy/pull/13093>`__: BUG: Fix parameter validity checks in ``random.choice``.
+* `#13095 <https://github.com/numpy/numpy/pull/13095>`__: BUG: Fix testsuite failures on ppc and riscv
+* `#13096 <https://github.com/numpy/numpy/pull/13096>`__: TEST: allow refcheck result to vary, increase discoverability...
+* `#13097 <https://github.com/numpy/numpy/pull/13097>`__: DOC: update doc of `ndarray.T`
+* `#13099 <https://github.com/numpy/numpy/pull/13099>`__: DOC: Add note about "copy and slicing"
+* `#13104 <https://github.com/numpy/numpy/pull/13104>`__: DOC: fix references in docs
+* `#13107 <https://github.com/numpy/numpy/pull/13107>`__: MAINT: Unify polynomial valnd functions
+* `#13108 <https://github.com/numpy/numpy/pull/13108>`__: MAINT: Merge duplicate implementations of `hermvander2d` and...
+* `#13109 <https://github.com/numpy/numpy/pull/13109>`__: Prevent traceback chaining in _wrapfunc.
+* `#13111 <https://github.com/numpy/numpy/pull/13111>`__: MAINT: Unify polydiv
+* `#13115 <https://github.com/numpy/numpy/pull/13115>`__: DOC: Fix #12050 by updating numpy.random.hypergeometric docs
+* `#13116 <https://github.com/numpy/numpy/pull/13116>`__: DOC: Add backticks in linalg docstrings.
+* `#13117 <https://github.com/numpy/numpy/pull/13117>`__: DOC: Fix arg type for np.pad, fix #9489
+* `#13118 <https://github.com/numpy/numpy/pull/13118>`__: DOC: update scipy-sphinx-theme, fixes search
+* `#13119 <https://github.com/numpy/numpy/pull/13119>`__: DOC: Fix c-api function documentation duplication.
+* `#13125 <https://github.com/numpy/numpy/pull/13125>`__: BUG: Fix unhandled exception in CBLAS detection
+* `#13126 <https://github.com/numpy/numpy/pull/13126>`__: DEP: polynomial: Be stricter about integral arguments
+* `#13127 <https://github.com/numpy/numpy/pull/13127>`__: DOC: Tidy 1.17.0 release note newlines
+* `#13128 <https://github.com/numpy/numpy/pull/13128>`__: MAINT: Unify polynomial addition and subtraction functions
+* `#13130 <https://github.com/numpy/numpy/pull/13130>`__: MAINT: Unify polynomial fitting functions
+* `#13131 <https://github.com/numpy/numpy/pull/13131>`__: BUILD: use 'quiet' when building docs
+* `#13132 <https://github.com/numpy/numpy/pull/13132>`__: BLD: Allow users to specify BLAS and LAPACK library link order
+* `#13134 <https://github.com/numpy/numpy/pull/13134>`__: ENH: Use AVX for float32 implementation of np.exp & np.log
+* `#13137 <https://github.com/numpy/numpy/pull/13137>`__: BUG: Fix build for glibc on ARC and uclibc.
+* `#13140 <https://github.com/numpy/numpy/pull/13140>`__: DEV: cleanup imports and some assignments (from LGTM)
+* `#13146 <https://github.com/numpy/numpy/pull/13146>`__: MAINT: Unify polynomial power functions
+* `#13147 <https://github.com/numpy/numpy/pull/13147>`__: DOC: Add description of overflow errors
+* `#13149 <https://github.com/numpy/numpy/pull/13149>`__: DOC: correction to numpy.pad docstring
+* `#13157 <https://github.com/numpy/numpy/pull/13157>`__: BLD: streamlined library names in site.cfg sections
+* `#13158 <https://github.com/numpy/numpy/pull/13158>`__: BLD: Add libflame as a LAPACK back-end
+* `#13161 <https://github.com/numpy/numpy/pull/13161>`__: BLD: streamlined CBLAS linkage tries, default to try libraries...
+* `#13162 <https://github.com/numpy/numpy/pull/13162>`__: BUILD: update numpydoc to latest version
+* `#13163 <https://github.com/numpy/numpy/pull/13163>`__: ENH: randomgen
+* `#13169 <https://github.com/numpy/numpy/pull/13169>`__: STY: Fix weird indents to be multiples of 4 spaces
+* `#13170 <https://github.com/numpy/numpy/pull/13170>`__: DOC, BUILD: fail the devdoc build if there are warnings
+* `#13174 <https://github.com/numpy/numpy/pull/13174>`__: DOC: Removed some c-api duplication
+* `#13176 <https://github.com/numpy/numpy/pull/13176>`__: BUG: fix reference count error on invalid input to ndarray.flat
+* `#13181 <https://github.com/numpy/numpy/pull/13181>`__: BENCH, BUG: fix Savez suite, previously was actually calling...
+* `#13182 <https://github.com/numpy/numpy/pull/13182>`__: MAINT: add overlap checks to choose, take, put, putmask
+* `#13188 <https://github.com/numpy/numpy/pull/13188>`__: MAINT: Simplify logic in convert_datetime_to_datetimestruct
+* `#13202 <https://github.com/numpy/numpy/pull/13202>`__: ENH: use rotated companion matrix to reduce error
+* `#13203 <https://github.com/numpy/numpy/pull/13203>`__: DOC: Use std docstring for multivariate normal
+* `#13205 <https://github.com/numpy/numpy/pull/13205>`__: DOC : Fix C-API documentation references to items that don't...
+* `#13206 <https://github.com/numpy/numpy/pull/13206>`__: BUILD: pin sphinx to 1.8.5
+* `#13208 <https://github.com/numpy/numpy/pull/13208>`__: MAINT: cleanup of fast_loop_macros.h
+* `#13216 <https://github.com/numpy/numpy/pull/13216>`__: Adding an example of successful execution of numpy.test() to...
+* `#13217 <https://github.com/numpy/numpy/pull/13217>`__: TST: always publish Azure tests
+* `#13218 <https://github.com/numpy/numpy/pull/13218>`__: ENH: `isfinite` support for `datetime64` and `timedelta64`
+* `#13219 <https://github.com/numpy/numpy/pull/13219>`__: ENH: nan_to_num keyword addition (was #9355)
+* `#13222 <https://github.com/numpy/numpy/pull/13222>`__: DOC: Document/ Deprecate functions exposed in "numpy" namespace
+* `#13224 <https://github.com/numpy/numpy/pull/13224>`__: Improve error message for negative valued argument
+* `#13226 <https://github.com/numpy/numpy/pull/13226>`__: DOC: Fix small issues in mtrand doc strings
+* `#13231 <https://github.com/numpy/numpy/pull/13231>`__: DOC: Change the required Sphinx version to build documentation
+* `#13234 <https://github.com/numpy/numpy/pull/13234>`__: DOC : PyArray_Descr.names undocumented
+* `#13239 <https://github.com/numpy/numpy/pull/13239>`__: DOC: Minor grammatical fixes in NumPy docs
+* `#13242 <https://github.com/numpy/numpy/pull/13242>`__: DOC: fix docstring for floor_divide
+* `#13243 <https://github.com/numpy/numpy/pull/13243>`__: MAINT: replace SETREF with assignment to ret array in ndarray.flat
+* `#13244 <https://github.com/numpy/numpy/pull/13244>`__: DOC: Improve mtrand docstrings
+* `#13250 <https://github.com/numpy/numpy/pull/13250>`__: MAINT: Improve efficiency of pad by avoiding use of apply_along_axis
+* `#13253 <https://github.com/numpy/numpy/pull/13253>`__: TST: fail Azure CI if test failures
+* `#13259 <https://github.com/numpy/numpy/pull/13259>`__: DOC: Small readability improvement
+* `#13262 <https://github.com/numpy/numpy/pull/13262>`__: DOC : Correcting bug on Documentation Page (Byteswapping)
+* `#13264 <https://github.com/numpy/numpy/pull/13264>`__: TST: use OpenBLAS v0.3.5 for POWER8 CI runs
+* `#13269 <https://github.com/numpy/numpy/pull/13269>`__: BUG, MAINT: f2py: Add a cast to avoid a compiler warning.
+* `#13270 <https://github.com/numpy/numpy/pull/13270>`__: TST: use OpenBLAS v0.3.5 for ARMv8 CI
+* `#13271 <https://github.com/numpy/numpy/pull/13271>`__: ENH: vectorize np.abs for unsigned ints and half, improving performance...
+* `#13273 <https://github.com/numpy/numpy/pull/13273>`__: BUG: Fix null pointer dereference in PyArray_DTypeFromObject
+* `#13277 <https://github.com/numpy/numpy/pull/13277>`__: DOC: Document caveat in random.uniform
+* `#13287 <https://github.com/numpy/numpy/pull/13287>`__: Add benchmark for sorting random array.
+* `#13289 <https://github.com/numpy/numpy/pull/13289>`__: DOC: add Quansight Labs as an Institutional Partner
+* `#13291 <https://github.com/numpy/numpy/pull/13291>`__: MAINT: fix unused variable warning in npy_math_complex.c.src
+* `#13292 <https://github.com/numpy/numpy/pull/13292>`__: DOC: update numpydoc to latest master
+* `#13293 <https://github.com/numpy/numpy/pull/13293>`__: DOC: add more info to failure message
+* `#13298 <https://github.com/numpy/numpy/pull/13298>`__: ENH: Added clearer exception for np.diff on 0-dimensional ndarray
+* `#13301 <https://github.com/numpy/numpy/pull/13301>`__: BUG: Fix crash when calling savetxt on a padded array
+* `#13305 <https://github.com/numpy/numpy/pull/13305>`__: NEP: Update NEP-18 to include the ``__skip_array_function__``...
+* `#13306 <https://github.com/numpy/numpy/pull/13306>`__: MAINT: better MemoryError message (#13225)
+* `#13309 <https://github.com/numpy/numpy/pull/13309>`__: DOC: list Quansight rather than Quansight Labs as Institutional...
+* `#13310 <https://github.com/numpy/numpy/pull/13310>`__: ENH: Add project_urls to setup
+* `#13311 <https://github.com/numpy/numpy/pull/13311>`__: BUG: Fix bad error message in np.memmap
+* `#13312 <https://github.com/numpy/numpy/pull/13312>`__: BUG: Close files if an error occurs in genfromtxt
+* `#13313 <https://github.com/numpy/numpy/pull/13313>`__: MAINT: fix typo in 'self'
+* `#13314 <https://github.com/numpy/numpy/pull/13314>`__: DOC: remove misplaced section at bottom of governance people...
+* `#13316 <https://github.com/numpy/numpy/pull/13316>`__: DOC: Added anti-diagonal examples to np.diagonal and np.fill_diagonal
+* `#13320 <https://github.com/numpy/numpy/pull/13320>`__: MAINT: remove unused file
+* `#13321 <https://github.com/numpy/numpy/pull/13321>`__: MAINT: Move exceptions from core._internal to core._exceptions
+* `#13322 <https://github.com/numpy/numpy/pull/13322>`__: MAINT: Move umath error helpers into their own module
+* `#13323 <https://github.com/numpy/numpy/pull/13323>`__: BUG: ufunc.at iteration variable size fix
+* `#13324 <https://github.com/numpy/numpy/pull/13324>`__: MAINT: Move asarray helpers into their own module
+* `#13326 <https://github.com/numpy/numpy/pull/13326>`__: DEP: Deprecate collapsing shape-1 dtype fields to scalars.
+* `#13328 <https://github.com/numpy/numpy/pull/13328>`__: MAINT: Tidy up error message for accumulate and reduceat
+* `#13331 <https://github.com/numpy/numpy/pull/13331>`__: DOC, BLD: fix doc build issues in preparation for the next numpydoc...
+* `#13332 <https://github.com/numpy/numpy/pull/13332>`__: BUG: Always return views from structured_to_unstructured when...
+* `#13334 <https://github.com/numpy/numpy/pull/13334>`__: BUG: Fix structured_to_unstructured on single-field types
+* `#13335 <https://github.com/numpy/numpy/pull/13335>`__: DOC: Add as_ctypes_type to the documentation
+* `#13336 <https://github.com/numpy/numpy/pull/13336>`__: BUILD: fail documentation build if numpy version does not match
+* `#13337 <https://github.com/numpy/numpy/pull/13337>`__: DOC: Add docstrings for consistency in aliases
+* `#13346 <https://github.com/numpy/numpy/pull/13346>`__: BUG/MAINT: Tidy typeinfo.h and .c
+* `#13348 <https://github.com/numpy/numpy/pull/13348>`__: BUG: Return the coefficients array directly
+* `#13354 <https://github.com/numpy/numpy/pull/13354>`__: TST: Added test_fftpocket.py::test_axes
+* `#13367 <https://github.com/numpy/numpy/pull/13367>`__: DOC: reorganize developer docs, use scikit-image as a base for...
+* `#13371 <https://github.com/numpy/numpy/pull/13371>`__: BUG/ENH: Make floor, ceil, and trunc call the matching special...
+* `#13374 <https://github.com/numpy/numpy/pull/13374>`__: DOC: Specify range for numpy.angle
+* `#13377 <https://github.com/numpy/numpy/pull/13377>`__: DOC: Add missing macros to C API documentation
+* `#13379 <https://github.com/numpy/numpy/pull/13379>`__: BLD: address mingw-w64 issue. Follow-up to gh-9977
+* `#13383 <https://github.com/numpy/numpy/pull/13383>`__: MAINT, DOC: Post 1.16.3 release updates
+* `#13388 <https://github.com/numpy/numpy/pull/13388>`__: BUG: Some PyPy versions lack PyStructSequence_InitType2.
+* `#13389 <https://github.com/numpy/numpy/pull/13389>`__: ENH: implement ``__skip_array_function__`` attribute for NEP-18
+* `#13390 <https://github.com/numpy/numpy/pull/13390>`__: ENH: Add support for Fraction to percentile and quantile
+* `#13391 <https://github.com/numpy/numpy/pull/13391>`__: MAINT, DEP: Fix deprecated ``assertEquals()``
+* `#13395 <https://github.com/numpy/numpy/pull/13395>`__: DOC: note re defaults allclose to assert_allclose
+* `#13397 <https://github.com/numpy/numpy/pull/13397>`__: DOC: Resolve confusion regarding hashtag in header line of csv
+* `#13399 <https://github.com/numpy/numpy/pull/13399>`__: ENH: Improved performance of PyArray_FromAny for sequences of...
+* `#13402 <https://github.com/numpy/numpy/pull/13402>`__: DOC: Show the default value of deletechars in the signature of...
+* `#13403 <https://github.com/numpy/numpy/pull/13403>`__: DOC: fix typos in dev/index
+* `#13404 <https://github.com/numpy/numpy/pull/13404>`__: DOC: Add Sebastian Berg as sponsored by BIDS
+* `#13406 <https://github.com/numpy/numpy/pull/13406>`__: DOC: clarify array_{2string,str,repr} defaults
+* `#13409 <https://github.com/numpy/numpy/pull/13409>`__: BUG: (py2 only) fix unicode support for savetxt fmt string
+* `#13413 <https://github.com/numpy/numpy/pull/13413>`__: DOC: document existence of linalg backends
+* `#13415 <https://github.com/numpy/numpy/pull/13415>`__: BUG: fixing bugs in AVX exp/log while handling special value...
+* `#13416 <https://github.com/numpy/numpy/pull/13416>`__: BUG: Protect generators from log(0.0)
+* `#13417 <https://github.com/numpy/numpy/pull/13417>`__: DOC: dimension sizes are non-negative, not positive
+* `#13425 <https://github.com/numpy/numpy/pull/13425>`__: MAINT: fixed typo 'Mismacth' from numpy/core/setup_common.py
+* `#13433 <https://github.com/numpy/numpy/pull/13433>`__: BUG: Handle subarrays in descr_to_dtype
+* `#13435 <https://github.com/numpy/numpy/pull/13435>`__: BUG: Add TypeError to accepted exceptions in crackfortran.
+* `#13436 <https://github.com/numpy/numpy/pull/13436>`__: TST: Add file-not-closed check to LGTM analysis.
+* `#13440 <https://github.com/numpy/numpy/pull/13440>`__: MAINT: fixed typo 'wtihout' from numpy/core/shape_base.py
+* `#13443 <https://github.com/numpy/numpy/pull/13443>`__: BLD, TST: implicit func errors
+* `#13445 <https://github.com/numpy/numpy/pull/13445>`__: MAINT: refactor PyArrayMultiIterObject constructors
+* `#13446 <https://github.com/numpy/numpy/pull/13446>`__: MANT: refactor unravel_index for code repetition
+* `#13449 <https://github.com/numpy/numpy/pull/13449>`__: BUG: missing git raises an OSError
+* `#13456 <https://github.com/numpy/numpy/pull/13456>`__: TST: refine Azure fail reports
+* `#13463 <https://github.com/numpy/numpy/pull/13463>`__: BUG,DEP: Fix writeable flag setting for arrays without base
+* `#13467 <https://github.com/numpy/numpy/pull/13467>`__: ENH: err msg for too large sequences. See #13450
+* `#13469 <https://github.com/numpy/numpy/pull/13469>`__: DOC: correct "version added" in npymath docs
+* `#13471 <https://github.com/numpy/numpy/pull/13471>`__: LICENSE: split license file in standard BSD 3-clause and bundled.
+* `#13477 <https://github.com/numpy/numpy/pull/13477>`__: DOC: have notes in histogram_bin_edges match parameter style
+* `#13479 <https://github.com/numpy/numpy/pull/13479>`__: DOC: Mention the handling of nan in the assert_equal docstring.
+* `#13482 <https://github.com/numpy/numpy/pull/13482>`__: TEST: add duration report to tests, speed up two outliers
+* `#13483 <https://github.com/numpy/numpy/pull/13483>`__: DOC: update mailmap for Bill Spotz
+* `#13485 <https://github.com/numpy/numpy/pull/13485>`__: DOC: add security vulnerability reporting and doc links to README
+* `#13491 <https://github.com/numpy/numpy/pull/13491>`__: BUG/ENH: Create npy format 3.0 to support extended unicode characters...
+* `#13495 <https://github.com/numpy/numpy/pull/13495>`__: BUG: test all ufunc.types for return type, fix for exp, log
+* `#13496 <https://github.com/numpy/numpy/pull/13496>`__: BUG: ma.tostring should respect the order parameter
+* `#13498 <https://github.com/numpy/numpy/pull/13498>`__: DOC: Clarify rcond normalization in linalg.pinv
+* `#13499 <https://github.com/numpy/numpy/pull/13499>`__: MAINT: Use with statement to open/close files to fix LGTM alerts
+* `#13503 <https://github.com/numpy/numpy/pull/13503>`__: ENH: Support object arrays in matmul
+* `#13504 <https://github.com/numpy/numpy/pull/13504>`__: DOC: Update links in PULL_REQUEST_TEMPLATE.md
+* `#13506 <https://github.com/numpy/numpy/pull/13506>`__: ENH: Add sparse option to np.core.numeric.indices
+* `#13507 <https://github.com/numpy/numpy/pull/13507>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#13508 <https://github.com/numpy/numpy/pull/13508>`__: BUG: Removes ValueError for empty kwargs in arraymultiter_new
+* `#13518 <https://github.com/numpy/numpy/pull/13518>`__: MAINT: implement assert_array_compare without converting array...
+* `#13520 <https://github.com/numpy/numpy/pull/13520>`__: BUG: exp, log AVX loops do not use steps
+* `#13523 <https://github.com/numpy/numpy/pull/13523>`__: BUG: distutils/system_info.py fix missing subprocess import
+* `#13529 <https://github.com/numpy/numpy/pull/13529>`__: MAINT: Use exec() instead array_function_dispatch to improve...
+* `#13530 <https://github.com/numpy/numpy/pull/13530>`__: BENCH: Modify benchmarks for radix sort.
+* `#13534 <https://github.com/numpy/numpy/pull/13534>`__: BLD: Make CI pass again with pytest 4.5
+* `#13541 <https://github.com/numpy/numpy/pull/13541>`__: ENH: restore unpack bit lookup table
+* `#13544 <https://github.com/numpy/numpy/pull/13544>`__: ENH: Allow broadcast to be called with zero arguments
+* `#13550 <https://github.com/numpy/numpy/pull/13550>`__: TST: Register markers in conftest.py.
+* `#13551 <https://github.com/numpy/numpy/pull/13551>`__: DOC: Add note to ``nonzero`` docstring.
+* `#13558 <https://github.com/numpy/numpy/pull/13558>`__: MAINT: Fix errors seen on new python 3.8
+* `#13570 <https://github.com/numpy/numpy/pull/13570>`__: DOC: Remove duplicate documentation of the PyArray_SimpleNew...
+* `#13571 <https://github.com/numpy/numpy/pull/13571>`__: DOC: Mention that expand_dims returns a view
+* `#13574 <https://github.com/numpy/numpy/pull/13574>`__: DOC: remove performance claim from searchsorted()
+* `#13575 <https://github.com/numpy/numpy/pull/13575>`__: TST: Apply ufunc signature and type test fixmes.
+* `#13581 <https://github.com/numpy/numpy/pull/13581>`__: ENH: AVX support for exp/log for strided float32 arrays
+* `#13584 <https://github.com/numpy/numpy/pull/13584>`__: DOC: roadmap update
+* `#13589 <https://github.com/numpy/numpy/pull/13589>`__: MAINT: Increment stacklevel for warnings to account for NEP-18...
+* `#13590 <https://github.com/numpy/numpy/pull/13590>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#13595 <https://github.com/numpy/numpy/pull/13595>`__: NEP: update NEP 19 with API terminology
+* `#13599 <https://github.com/numpy/numpy/pull/13599>`__: DOC: Fixed minor doc error in take_along_axis
+* `#13603 <https://github.com/numpy/numpy/pull/13603>`__: TST: bump / verify OpenBLAS in CI
+* `#13619 <https://github.com/numpy/numpy/pull/13619>`__: DOC: Add missing return value documentation in ndarray.require
+* `#13621 <https://github.com/numpy/numpy/pull/13621>`__: DOC: Update boolean indices in index arrays with slices example
+* `#13623 <https://github.com/numpy/numpy/pull/13623>`__: BUG: Workaround for bug in clang7.0
+* `#13624 <https://github.com/numpy/numpy/pull/13624>`__: DOC: revert __skip_array_function__ from NEP-18
+* `#13626 <https://github.com/numpy/numpy/pull/13626>`__: DOC: update isfortran docs with return value
+* `#13627 <https://github.com/numpy/numpy/pull/13627>`__: MAINT: revert __skip_array_function__ from NEP-18
+* `#13629 <https://github.com/numpy/numpy/pull/13629>`__: BUG: setup.py install --skip-build fails
+* `#13632 <https://github.com/numpy/numpy/pull/13632>`__: MAINT: Collect together the special-casing of 0d nonzero into...
+* `#13633 <https://github.com/numpy/numpy/pull/13633>`__: DOC: caution against relying upon NumPy's implementation in subclasses
+* `#13634 <https://github.com/numpy/numpy/pull/13634>`__: MAINT: avoid nested dispatch in numpy.core.shape_base
+* `#13636 <https://github.com/numpy/numpy/pull/13636>`__: DOC: Add return section to linalg.matrix_rank & tensordot
+* `#13639 <https://github.com/numpy/numpy/pull/13639>`__: MAINT: Update mailmap for 1.17.0
+* `#13642 <https://github.com/numpy/numpy/pull/13642>`__: BUG: special case object arrays when printing rel-, abs-error...
+* `#13648 <https://github.com/numpy/numpy/pull/13648>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#13649 <https://github.com/numpy/numpy/pull/13649>`__: DOC: Mention PyArray_GetField steals a reference
+* `#13652 <https://github.com/numpy/numpy/pull/13652>`__: MAINT: remove superfluous setting in can_cast_safely_table.
+* `#13655 <https://github.com/numpy/numpy/pull/13655>`__: BUG/MAINT: Non-native byteorder in random ints
+* `#13656 <https://github.com/numpy/numpy/pull/13656>`__: PERF: Use intrinsic rotr on Windows
+* `#13657 <https://github.com/numpy/numpy/pull/13657>`__: BUG: Avoid leading underscores in C function names.
+* `#13660 <https://github.com/numpy/numpy/pull/13660>`__: DOC: Updates following NumPy 1.16.4 release.
+* `#13663 <https://github.com/numpy/numpy/pull/13663>`__: BUG: regression for array([pandas.DataFrame()])
+* `#13664 <https://github.com/numpy/numpy/pull/13664>`__: MAINT: Misc. typo fixes
+* `#13665 <https://github.com/numpy/numpy/pull/13665>`__: MAINT: Use intrinsics in Win64-PCG64
+* `#13670 <https://github.com/numpy/numpy/pull/13670>`__: BUG: Fix RandomState argument name
+* `#13672 <https://github.com/numpy/numpy/pull/13672>`__: DOC: Fix rst markup in RELEASE_WALKTHROUGH.
+* `#13678 <https://github.com/numpy/numpy/pull/13678>`__: BUG: fix benchmark suite importability on Numpy<1.17
+* `#13682 <https://github.com/numpy/numpy/pull/13682>`__: ENH: Support __length_hint__ in PyArray_FromIter
+* `#13684 <https://github.com/numpy/numpy/pull/13684>`__: BUG: Move ndarray.dump to python and make it close the file it...
+* `#13687 <https://github.com/numpy/numpy/pull/13687>`__: DOC: Remove misleading statement
+* `#13688 <https://github.com/numpy/numpy/pull/13688>`__: MAINT: Correct masked aliases
+* `#13690 <https://github.com/numpy/numpy/pull/13690>`__: MAINT: Remove version added from Generator
+* `#13691 <https://github.com/numpy/numpy/pull/13691>`__: BUG: Prevent passing of size 0 to array alloc C functions
+* `#13692 <https://github.com/numpy/numpy/pull/13692>`__: DOC: Update C-API documentation of scanfunc, fromstr
+* `#13693 <https://github.com/numpy/numpy/pull/13693>`__: ENH: Pass input strides and dimensions by pointer to const
+* `#13695 <https://github.com/numpy/numpy/pull/13695>`__: BUG: Ensure Windows choice returns int32
+* `#13696 <https://github.com/numpy/numpy/pull/13696>`__: DOC: Put the useful constants first
+* `#13697 <https://github.com/numpy/numpy/pull/13697>`__: MAINT: speed up hstack and vstack by eliminating list comprehension.
+* `#13700 <https://github.com/numpy/numpy/pull/13700>`__: Add links for GitHub Sponsors button.
+* `#13703 <https://github.com/numpy/numpy/pull/13703>`__: DOC: Adds documentation for numpy.dtype.base
+* `#13704 <https://github.com/numpy/numpy/pull/13704>`__: DOC: Mention PyArray_DIMS can be NULL
+* `#13708 <https://github.com/numpy/numpy/pull/13708>`__: DEP: Deprecate nonzero(0d) in favor of calling atleast_1d explicitly
+* `#13715 <https://github.com/numpy/numpy/pull/13715>`__: BUG: Fix use-after-free in boolean indexing
+* `#13716 <https://github.com/numpy/numpy/pull/13716>`__: BUG: Fix random.choice when probability is not C contiguous
+* `#13720 <https://github.com/numpy/numpy/pull/13720>`__: MAINT/BUG: Manage more files with with statements
+* `#13721 <https://github.com/numpy/numpy/pull/13721>`__: MAINT,BUG: More ufunc exception cleanup
+* `#13724 <https://github.com/numpy/numpy/pull/13724>`__: MAINT: fix use of cache_dim
+* `#13725 <https://github.com/numpy/numpy/pull/13725>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#13726 <https://github.com/numpy/numpy/pull/13726>`__: MAINT: Update PCG jump sizes
+* `#13729 <https://github.com/numpy/numpy/pull/13729>`__: DOC: Merge together DISTUTILS.rst.txt#template-files" and distutils.r…
+* `#13730 <https://github.com/numpy/numpy/pull/13730>`__: MAINT: Change keyword from reserved word
+* `#13737 <https://github.com/numpy/numpy/pull/13737>`__: DOC: Mention and try to explain pairwise summation in sum
+* `#13741 <https://github.com/numpy/numpy/pull/13741>`__: MAINT: random: Remove unused empty file binomial.h.
+* `#13743 <https://github.com/numpy/numpy/pull/13743>`__: MAINT: random: Rename legacy distributions file.
+* `#13744 <https://github.com/numpy/numpy/pull/13744>`__: DOC: Update the C style guide for C99.
+* `#13745 <https://github.com/numpy/numpy/pull/13745>`__: BUG: fix segfault on side-effect in __bool__ function in array.nonzero()
+* `#13746 <https://github.com/numpy/numpy/pull/13746>`__: [WIP] DOC : Refactor C-API -- Python Types and C structures
+* `#13757 <https://github.com/numpy/numpy/pull/13757>`__: MAINT: fix histogram*d dispatchers
+* `#13760 <https://github.com/numpy/numpy/pull/13760>`__: DOC: update test guidelines document to use pytest for skipif
+* `#13761 <https://github.com/numpy/numpy/pull/13761>`__: MAINT: random: Rewrite the hypergeometric distribution.
+* `#13762 <https://github.com/numpy/numpy/pull/13762>`__: MAINT: Use textwrap.dedent for multiline strings
+* `#13763 <https://github.com/numpy/numpy/pull/13763>`__: MAINT: Use with statements and dedent in core/setup.py
+* `#13767 <https://github.com/numpy/numpy/pull/13767>`__: DOC: Adds examples for dtype attributes
+* `#13770 <https://github.com/numpy/numpy/pull/13770>`__: MAINT: random: Combine ziggurat.h and ziggurat_constants.h
+* `#13771 <https://github.com/numpy/numpy/pull/13771>`__: DOC: Change random to uninitialized and unpredictable in empty...
+* `#13772 <https://github.com/numpy/numpy/pull/13772>`__: BUILD: use numpy-wheels/openblas_support.py to create _distributor_init.py
+* `#13773 <https://github.com/numpy/numpy/pull/13773>`__: DOC: Update of reference to paper for Lemire's method
+* `#13774 <https://github.com/numpy/numpy/pull/13774>`__: BUG: Make ``Generator._masked`` flag default to ``False``.
+* `#13777 <https://github.com/numpy/numpy/pull/13777>`__: MAINT: Remove duplication of should_use_min_scalar_type function
+* `#13780 <https://github.com/numpy/numpy/pull/13780>`__: ENH: use SeedSequence instead of seed()
+* `#13781 <https://github.com/numpy/numpy/pull/13781>`__: DOC: Update TESTS.rst.txt for pytest
+* `#13786 <https://github.com/numpy/numpy/pull/13786>`__: MAINT: random: Fix a few compiler warnings.
+* `#13787 <https://github.com/numpy/numpy/pull/13787>`__: DOC: Fixed the problem of "versionadded"
+* `#13788 <https://github.com/numpy/numpy/pull/13788>`__: MAINT: fix 'in' -> 'is' typo
+* `#13789 <https://github.com/numpy/numpy/pull/13789>`__: MAINT: Fix warnings in radixsort.c.src: comparing integers of...
+* `#13791 <https://github.com/numpy/numpy/pull/13791>`__: MAINT: remove dSFMT
+* `#13792 <https://github.com/numpy/numpy/pull/13792>`__: LICENSE: update dragon4 license to MIT
+* `#13793 <https://github.com/numpy/numpy/pull/13793>`__: MAINT: remove xoshiro* BitGenerators
+* `#13795 <https://github.com/numpy/numpy/pull/13795>`__: DOC: Update description of sep in fromstring
+* `#13803 <https://github.com/numpy/numpy/pull/13803>`__: DOC: Improve documentation for ``defchararray``
+* `#13813 <https://github.com/numpy/numpy/pull/13813>`__: BUG: further fixup to histogram2d dispatcher.
+* `#13815 <https://github.com/numpy/numpy/pull/13815>`__: MAINT: Correct intrinsic use on Windows
+* `#13818 <https://github.com/numpy/numpy/pull/13818>`__: TST: Add tests for ComplexWarning in astype
+* `#13819 <https://github.com/numpy/numpy/pull/13819>`__: DOC: Fix documented default value of ``__array_priority__`` for...
+* `#13820 <https://github.com/numpy/numpy/pull/13820>`__: MAINT, DOC: Fix misspelled words in documetation.
+* `#13821 <https://github.com/numpy/numpy/pull/13821>`__: MAINT: core: Fix a compiler warning.
+* `#13830 <https://github.com/numpy/numpy/pull/13830>`__: MAINT: Update tox for supported Python versions
+* `#13832 <https://github.com/numpy/numpy/pull/13832>`__: MAINT: remove pcg32 BitGenerator
+* `#13833 <https://github.com/numpy/numpy/pull/13833>`__: MAINT: remove ThreeFry BitGenerator
+* `#13837 <https://github.com/numpy/numpy/pull/13837>`__: MAINT, BUG: fixes from seedsequence
+* `#13838 <https://github.com/numpy/numpy/pull/13838>`__: ENH: SFC64 BitGenerator
+* `#13839 <https://github.com/numpy/numpy/pull/13839>`__: MAINT: Ignore some generated files.
+* `#13840 <https://github.com/numpy/numpy/pull/13840>`__: ENH: np.random.default_gen()
+* `#13843 <https://github.com/numpy/numpy/pull/13843>`__: DOC: remove note about `__array_ufunc__` being provisional for...
+* `#13849 <https://github.com/numpy/numpy/pull/13849>`__: DOC: np.random documentation cleanup and expansion.
+* `#13850 <https://github.com/numpy/numpy/pull/13850>`__: DOC: Update performance numbers
+* `#13851 <https://github.com/numpy/numpy/pull/13851>`__: MAINT: Update shippable.yml to remove Python 2 dependency
+* `#13855 <https://github.com/numpy/numpy/pull/13855>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#13856 <https://github.com/numpy/numpy/pull/13856>`__: MAINT: move location of bitgen.h
+* `#13858 <https://github.com/numpy/numpy/pull/13858>`__: BUG: do not force emulation of 128-bit arithmetic.
+* `#13859 <https://github.com/numpy/numpy/pull/13859>`__: DOC: Update performance numbers for PCG64
+* `#13861 <https://github.com/numpy/numpy/pull/13861>`__: BUG: Ensure consistent interpretation of uint64 states.
+* `#13863 <https://github.com/numpy/numpy/pull/13863>`__: DOC: Document the precise PCG variant.
+* `#13864 <https://github.com/numpy/numpy/pull/13864>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13869 <https://github.com/numpy/numpy/pull/13869>`__: DOC: Prepare for 1.17.0rc1 release
+* `#13870 <https://github.com/numpy/numpy/pull/13870>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#13873 <https://github.com/numpy/numpy/pull/13873>`__: ENH: Rename default_gen -> default_rng
+* `#13893 <https://github.com/numpy/numpy/pull/13893>`__: DOC: fix links in 1.17 release note
+* `#13897 <https://github.com/numpy/numpy/pull/13897>`__: DOC: Use Cython >= 0.29.11 for Python 3.8 support.
+* `#13932 <https://github.com/numpy/numpy/pull/13932>`__: MAINT,BUG,DOC: Fix errors in _add_newdocs
+* `#13963 <https://github.com/numpy/numpy/pull/13963>`__: ENH, BUILD: refactor all OpenBLAS downloads into a single, testable...
+* `#13971 <https://github.com/numpy/numpy/pull/13971>`__: DOC: emphasize random API changes
+* `#13972 <https://github.com/numpy/numpy/pull/13972>`__: MAINT: Rewrite Floyd algorithm
+* `#13992 <https://github.com/numpy/numpy/pull/13992>`__: BUG: Do not crash on recursive `.dtype` attribute lookup.
+* `#13993 <https://github.com/numpy/numpy/pull/13993>`__: DEP: Speed up WarnOnWrite deprecation in buffer interface
+* `#13995 <https://github.com/numpy/numpy/pull/13995>`__: BLD: Remove Trusty dist in Travis CI build
+* `#13996 <https://github.com/numpy/numpy/pull/13996>`__: BUG: Handle weird bytestrings in dtype()
+* `#13997 <https://github.com/numpy/numpy/pull/13997>`__: BUG: i0 Bessel function regression on array-likes supporting...
+* `#13998 <https://github.com/numpy/numpy/pull/13998>`__: BUG: Missing warnings import in polyutils.
+* `#13999 <https://github.com/numpy/numpy/pull/13999>`__: DOC: Document array_function at a higher level.
+* `#14001 <https://github.com/numpy/numpy/pull/14001>`__: DOC: Show workaround for Generator.integers backward compatibility
+* `#14021 <https://github.com/numpy/numpy/pull/14021>`__: DOC: Prepare 1.17.0rc2 release.
+* `#14040 <https://github.com/numpy/numpy/pull/14040>`__: DOC: Improve quickstart documentation of new random Generator.
+* `#14041 <https://github.com/numpy/numpy/pull/14041>`__: TST, MAINT: expand OpenBLAS version checking
+* `#14080 <https://github.com/numpy/numpy/pull/14080>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14081 <https://github.com/numpy/numpy/pull/14081>`__: BUG: fix build issue on icc 2016
+* `#14082 <https://github.com/numpy/numpy/pull/14082>`__: BUG: Fix file-like object check when saving arrays
+* `#14109 <https://github.com/numpy/numpy/pull/14109>`__: REV: "ENH: Improved performance of PyArray_FromAny for sequences...
+* `#14126 <https://github.com/numpy/numpy/pull/14126>`__: BUG, TEST: Adding validation test suite to validate float32 exp
+* `#14127 <https://github.com/numpy/numpy/pull/14127>`__: DOC: Add blank line above doctest for intersect1d
+* `#14128 <https://github.com/numpy/numpy/pull/14128>`__: MAINT: adjustments to test_ufunc_noncontigous
+* `#14129 <https://github.com/numpy/numpy/pull/14129>`__: MAINT: Use equality instead of identity check with literal
+* `#14133 <https://github.com/numpy/numpy/pull/14133>`__: MAINT: Update mailmap and changelog for 1.17.0
diff --git a/doc/changelog/1.17.1-changelog.rst b/doc/changelog/1.17.1-changelog.rst
new file mode 100644
index 000000000..c7c8b6c8e
--- /dev/null
+++ b/doc/changelog/1.17.1-changelog.rst
@@ -0,0 +1,55 @@
+
+Contributors
+============
+
+A total of 17 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Jung +
+* Allan Haldane
+* Charles Harris
+* Eric Wieser
+* Giuseppe Cuccu +
+* Hiroyuki V. Yamazaki
+* Jérémie du Boisberranger
+* Kmol Yuan +
+* Matti Picus
+* Max Bolingbroke +
+* Maxwell Aladago +
+* Oleksandr Pavlyk
+* Peter Andreas Entschev
+* Sergei Lebedev
+* Seth Troisi +
+* Vladimir Pershin +
+* Warren Weckesser
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
+* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static
+* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7.
+* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8.
+* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h
+* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist'
+* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing.
+* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport)
+* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd.
+* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test
+* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py
+* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable...
+* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name
+* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random
+* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1
+* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
+* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message
+* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
+* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection
+* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds
+* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
+* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228)
+* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release
diff --git a/doc/changelog/1.17.2-changelog.rst b/doc/changelog/1.17.2-changelog.rst
new file mode 100644
index 000000000..144f40038
--- /dev/null
+++ b/doc/changelog/1.17.2-changelog.rst
@@ -0,0 +1,28 @@
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* CakeWithSteak +
+* Charles Harris
+* Dan Allan
+* Hameer Abbasi
+* Lars Grueter
+* Matti Picus
+* Sebastian Berg
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing.
+* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation.
+* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes
+* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes.
+* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose.
+* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py.
+* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release.
diff --git a/doc/neps/_static/nep-0000.png b/doc/neps/_static/nep-0000.png
index 51eb2b258..0fc8176d2 100644
--- a/doc/neps/_static/nep-0000.png
+++ b/doc/neps/_static/nep-0000.png
Binary files differ
diff --git a/doc/neps/index.rst.tmpl b/doc/neps/index.rst.tmpl
index e7b8fedba..4c5b7766f 100644
--- a/doc/neps/index.rst.tmpl
+++ b/doc/neps/index.rst.tmpl
@@ -23,14 +23,25 @@ Meta-NEPs (NEPs about NEPs or Processes)
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Type'] == 'Process' %}
+{% for nep, tags in neps.items() if tags['Status'] == 'Active' %}
{{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
nep-template
-Accepted NEPs, implementation in progress
------------------------------------------
+Provisional NEPs (provisionally accepted; interface may change)
+---------------------------------------------------------------
+
+.. toctree::
+ :maxdepth: 1
+
+{% for nep, tags in neps.items() if tags['Status'] == 'Provisional' %}
+ {{ tags['Title'] }} <{{ tags['Filename'] }}>
+{% endfor %}
+
+
+Accepted NEPs (implementation in progress)
+------------------------------------------
.. toctree::
:maxdepth: 1
@@ -52,7 +63,7 @@ Open NEPs (under consideration)
-Implemented NEPs
+Finished NEPs
----------------
.. toctree::
@@ -62,22 +73,23 @@ Implemented NEPs
{{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
-Deferred NEPs
--------------
+Deferred and Superseded NEPs
+----------------------------
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Status'] == 'Deferred' %}
+{% for nep, tags in neps.items() if tags['Status'] in ('Deferred', 'Superseded') %}
{{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
-Rejected NEPs
--------------
+Rejected and Withdrawn NEPs
+---------------------------
.. toctree::
:maxdepth: 1
-{% for nep, tags in neps.items() if tags['Status'] == 'Rejected' %}
+{% for nep, tags in neps.items() if tags['Status'] in ('Rejected', 'Withdrawn') %}
{{ tags['Title'] }} <{{ tags['Filename'] }}>
{% endfor %}
+
diff --git a/doc/neps/nep-0000.rst b/doc/neps/nep-0000.rst
index a3ec3a42b..97b69279b 100644
--- a/doc/neps/nep-0000.rst
+++ b/doc/neps/nep-0000.rst
@@ -31,12 +31,18 @@ feature proposal [1]_.
Types
^^^^^
-There are two kinds of NEP:
+There are three kinds of NEPs:
1. A **Standards Track** NEP describes a new feature or implementation
for NumPy.
-2. A **Process** NEP describes a process surrounding NumPy, or
+2. An **Informational** NEP describes a NumPy design issue, or provides
+ general guidelines or information to the Python community, but does not
+ propose a new feature. Informational NEPs do not necessarily represent a
+ NumPy community consensus or recommendation, so users and implementers are
+ free to ignore Informational NEPs or follow their advice.
+
+3. A **Process** NEP describes a process surrounding NumPy, or
proposes a change to (or an event in) a process. Process NEPs are
like Standards Track NEPs but apply to areas other than the NumPy
language itself. They may propose an implementation, but not to
@@ -105,6 +111,20 @@ Once a NEP has been ``Accepted``, the reference implementation must be
completed. When the reference implementation is complete and incorporated
into the main source code repository, the status will be changed to ``Final``.
+To allow gathering of additional design and interface feedback before
+committing to long term stability for a language feature or standard library
+API, a NEP may also be marked as "Provisional". This is short for
+"Provisionally Accepted", and indicates that the proposal has been accepted for
+inclusion in the reference implementation, but additional user feedback is
+needed before the full design can be considered "Final". Unlike regular
+accepted NEPs, provisionally accepted NEPs may still be Rejected or Withdrawn
+even after the related changes have been included in a Python release.
+
+Wherever possible, it is considered preferable to reduce the scope of a
+proposal to avoid the need to rely on the "Provisional" status (e.g. by
+deferring some features to later NEPs), as this status can lead to version
+compatibility challenges in the wider NumPy ecosystem.
+
A NEP can also be assigned status ``Deferred``. The NEP author or a
core developer can assign the NEP this status when no progress is being made
on the NEP.
@@ -118,7 +138,7 @@ accepted that a competing proposal is a better alternative.
When a NEP is ``Accepted``, ``Rejected``, or ``Withdrawn``, the NEP should be
updated accordingly. In addition to updating the status field, at the very
least the ``Resolution`` header should be added with a link to the relevant
-post in the mailing list archives.
+thread in the mailing list archives.
NEPs can also be ``Superseded`` by a different NEP, rendering the
original obsolete. The ``Replaced-By`` and ``Replaces`` headers
diff --git a/doc/neps/nep-0010-new-iterator-ufunc.rst b/doc/neps/nep-0010-new-iterator-ufunc.rst
index 8601b4a4c..fd7b3e52c 100644
--- a/doc/neps/nep-0010-new-iterator-ufunc.rst
+++ b/doc/neps/nep-0010-new-iterator-ufunc.rst
@@ -1877,8 +1877,8 @@ the new iterator.
Here is one of the original functions, for reference, and some
random image data.::
- In [5]: rand1 = np.random.random_sample(1080*1920*4).astype(np.float32)
- In [6]: rand2 = np.random.random_sample(1080*1920*4).astype(np.float32)
+ In [5]: rand1 = np.random.random(1080*1920*4).astype(np.float32)
+ In [6]: rand2 = np.random.random(1080*1920*4).astype(np.float32)
In [7]: image1 = rand1.reshape(1080,1920,4).swapaxes(0,1)
In [8]: image2 = rand2.reshape(1080,1920,4).swapaxes(0,1)
diff --git a/doc/neps/nep-0015-merge-multiarray-umath.rst b/doc/neps/nep-0015-merge-multiarray-umath.rst
index 7c1f5faf8..576a21e23 100644
--- a/doc/neps/nep-0015-merge-multiarray-umath.rst
+++ b/doc/neps/nep-0015-merge-multiarray-umath.rst
@@ -3,7 +3,7 @@ NEP 15 — Merging multiarray and umath
=====================================
:Author: Nathaniel J. Smith <njs@pobox.com>
-:Status: Accepted
+:Status: Final
:Type: Standards Track
:Created: 2018-02-22
:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078345.html
diff --git a/doc/neps/nep-0016-abstract-array.rst b/doc/neps/nep-0016-abstract-array.rst
new file mode 100644
index 000000000..7551b11b9
--- /dev/null
+++ b/doc/neps/nep-0016-abstract-array.rst
@@ -0,0 +1,328 @@
+=============================================================
+NEP 16 — An abstract base class for identifying "duck arrays"
+=============================================================
+
+:Author: Nathaniel J. Smith <njs@pobox.com>
+:Status: Withdrawn
+:Type: Standards Track
+:Created: 2018-03-06
+:Resolution: https://github.com/numpy/numpy/pull/12174
+
+.. note::
+
+ This NEP has been withdrawn in favor of the protocol based approach
+ described in
+ `NEP 22 <nep-0022-ndarray-duck-typing-overview.html>`__
+
+Abstract
+--------
+
+We propose to add an abstract base class ``AbstractArray`` so that
+third-party classes can declare their ability to "quack like" an
+``ndarray``, and an ``asabstractarray`` function that performs
+similarly to ``asarray`` except that it passes through
+``AbstractArray`` instances unchanged.
+
+
+Detailed description
+--------------------
+
+Many functions, in NumPy and in third-party packages, start with some
+code like::
+
+ def myfunc(a, b):
+ a = np.asarray(a)
+ b = np.asarray(b)
+ ...
+
+This ensures that ``a`` and ``b`` are ``np.ndarray`` objects, so
+``myfunc`` can carry on assuming that they'll act like ndarrays both
+semantically (at the Python level), and also in terms of how they're
+stored in memory (at the C level). But many of these functions only
+work with arrays at the Python level, which means that they don't
+actually need ``ndarray`` objects *per se*: they could work just as
+well with any Python object that "quacks like" an ndarray, such as
+sparse arrays, dask's lazy arrays, or xarray's labeled arrays.
+
+However, currently, there's no way for these libraries to express that
+their objects can quack like an ndarray, and there's no way for
+functions like ``myfunc`` to express that they'd be happy with
+anything that quacks like an ndarray. The purpose of this NEP is to
+provide those two features.
+
+Sometimes people suggest using ``np.asanyarray`` for this purpose, but
+unfortunately its semantics are exactly backwards: it guarantees that
+the object it returns uses the same memory layout as an ``ndarray``,
+but tells you nothing at all about its semantics, which makes it
+essentially impossible to use safely in practice. Indeed, the two
+``ndarray`` subclasses distributed with NumPy – ``np.matrix`` and
+``np.ma.masked_array`` – do have incompatible semantics, and if they
+were passed to a function like ``myfunc`` that doesn't check for them
+as a special-case, then it may silently return incorrect results.
+
+
+Declaring that an object can quack like an array
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+There are two basic approaches we could use for checking whether an
+object quacks like an array. We could check for a special attribute on
+the class::
+
+ def quacks_like_array(obj):
+ return bool(getattr(type(obj), "__quacks_like_array__", False))
+
+Or, we could define an `abstract base class (ABC)
+<https://docs.python.org/3/library/collections.abc.html>`__::
+
+ def quacks_like_array(obj):
+ return isinstance(obj, AbstractArray)
+
+If you look at how ABCs work, this is essentially equivalent to
+keeping a global set of types that have been declared to implement the
+``AbstractArray`` interface, and then checking it for membership.
+
+Between these, the ABC approach seems to have a number of advantages:
+
+* It's Python's standard, "one obvious way" of doing this.
+
+* ABCs can be introspected (e.g. ``help(np.AbstractArray)`` does
+ something useful).
+
+* ABCs can provide useful mixin methods.
+
+* ABCs integrate with other features like mypy type-checking,
+ ``functools.singledispatch``, etc.
+
+One obvious thing to check is whether this choice affects speed. Using
+the attached benchmark script on a CPython 3.7 prerelease (revision
+c4d77a661138d, self-compiled, no PGO), on a Thinkpad T450s running
+Linux, we find::
+
+ np.asarray(ndarray_obj) 330 ns
+ np.asarray([]) 1400 ns
+
+ Attribute check, success 80 ns
+ Attribute check, failure 80 ns
+
+ ABC, success via subclass 340 ns
+ ABC, success via register() 700 ns
+ ABC, failure 370 ns
+
+Notes:
+
+* The first two lines are included to put the other lines in context.
+
+* This used 3.7 because both ``getattr`` and ABCs are receiving
+ substantial optimizations in this release, and it's more
+ representative of the long-term future of Python. (Failed
+ ``getattr`` doesn't necessarily construct an exception object
+ anymore, and ABCs were reimplemented in C.)
+
+* The "success" lines refer to cases where ``quacks_like_array`` would
+ return True. The "failure" lines are cases where it would return
+ False.
+
+* The first measurement for ABCs is subclasses defined like::
+
+ class MyArray(AbstractArray):
+ ...
+
+ The second is for subclasses defined like::
+
+ class MyArray:
+ ...
+
+ AbstractArray.register(MyArray)
+
+ I don't know why there's such a large difference between these.
+
+In practice, either way we'd only do the full test after first
+checking for well-known types like ``ndarray``, ``list``, etc. `This
+is how NumPy currently checks for other double-underscore attributes
+<https://github.com/numpy/numpy/blob/master/numpy/core/src/private/get_attr_string.h>`__
+and the same idea applies here to either approach. So these numbers
+won't affect the common case, just the case where we actually have an
+``AbstractArray``, or else another third-party object that will end up
+going through ``__array__`` or ``__array_interface__`` or end up as an
+object array.
+
+So in summary, using an ABC will be slightly slower than using an
+attribute, but this doesn't affect the most common paths, and the
+magnitude of slowdown is fairly small (~250 ns on an operation that
+already takes longer than that). Furthermore, we can potentially
+optimize this further (e.g. by keeping a tiny LRU cache of types that
+are known to be AbstractArray subclasses, on the assumption that most
+code will only use one or two of these types at a time), and it's very
+unclear that this even matters – if the speed of ``asarray`` no-op
+pass-throughs were a bottleneck that showed up in profiles, then
+probably we would have made them faster already! (It would be trivial
+to fast-path this, but we don't.)
+
+Given the semantic and usability advantages of ABCs, this seems like
+an acceptable trade-off.
+
+..
+ CPython 3.6 (from Debian)::
+
+ Attribute check, success 110 ns
+ Attribute check, failure 370 ns
+
+ ABC, success via subclass 690 ns
+ ABC, success via register() 690 ns
+ ABC, failure 1220 ns
+
+
+Specification of ``asabstractarray``
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Given ``AbstractArray``, the definition of ``asabstractarray`` is simple::
+
+ def asabstractarray(a, dtype=None):
+ if isinstance(a, AbstractArray):
+ if dtype is not None and dtype != a.dtype:
+ return a.astype(dtype)
+ return a
+ return asarray(a, dtype=dtype)
+
+Things to note:
+
+* ``asarray`` also accepts an ``order=`` argument, but we don't
+ include that here because it's about details of memory
+ representation, and the whole point of this function is that you use
+ it to declare that you don't care about details of memory
+ representation.
+
+* Using the ``astype`` method allows the ``a`` object to decide how to
+ implement casting for its particular type.
+
+* For strict compatibility with ``asarray``, we skip calling
+ ``astype`` when the dtype is already correct. Compare::
+
+ >>> a = np.arange(10)
+
+ # astype() always returns a view:
+ >>> a.astype(a.dtype) is a
+ False
+
+ # asarray() returns the original object if possible:
+ >>> np.asarray(a, dtype=a.dtype) is a
+ True
+
+
+What exactly are you promising if you inherit from ``AbstractArray``?
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This will presumably be refined over time. The ideal of course is that
+your class should be indistinguishable from a real ``ndarray``, but
+nothing enforces that except the expectations of users. In practice,
+declaring that your class implements the ``AbstractArray`` interface
+simply means that it will start passing through ``asabstractarray``,
+and so by subclassing it you're saying that if some code works for
+``ndarray``\s but breaks for your class, then you're willing to accept
+bug reports on that.
+
+To start with, we should declare ``__array_ufunc__`` to be an abstract
+method, and add the ``NDArrayOperatorsMixin`` methods as mixin
+methods.
+
+Declaring ``astype`` as an ``@abstractmethod`` probably makes sense as
+well, since it's used by ``asabstractarray``. We might also want to go
+ahead and add some basic attributes like ``ndim``, ``shape``,
+``dtype``.
+
+Adding new abstract methods will be a bit tricky, because ABCs enforce
+these at subclass time; therefore, simply adding a new
+`@abstractmethod` will be a backwards compatibility break. If this
+becomes a problem then we can use some hacks to implement an
+`@upcoming_abstractmethod` decorator that only issues a warning if the
+method is missing, and treat it like a regular deprecation cycle. (In
+this case, the thing we'd be deprecating is "support for abstract
+arrays that are missing feature X".)
+
+
+Naming
+~~~~~~
+
+The name of the ABC doesn't matter too much, because it will only be
+referenced rarely and in relatively specialized situations. The name
+of the function matters a lot, because most existing instances of
+``asarray`` should be replaced by this, and in the future it's what
+everyone should be reaching for by default unless they have a specific
+reason to use ``asarray`` instead. This suggests that its name really
+should be *shorter* and *more memorable* than ``asarray``... which
+is difficult. I've used ``asabstractarray`` in this draft, but I'm not
+really happy with it, because it's too long and people are unlikely to
+start using it by habit without endless exhortations.
+
+One option would be to actually change ``asarray``\'s semantics so
+that *it* passes through ``AbstractArray`` objects unchanged. But I'm
+worried that there may be a lot of code out there that calls
+``asarray`` and then passes the result into some C function that
+doesn't do any further type checking (because it knows that its caller
+has already used ``asarray``). If we allow ``asarray`` to return
+``AbstractArray`` objects, and then someone calls one of these C
+wrappers and passes it an ``AbstractArray`` object like a sparse
+array, then they'll get a segfault. Right now, in the same situation,
+``asarray`` will instead invoke the object's ``__array__`` method, or
+use the buffer interface to make a view, or pass through an array with
+object dtype, or raise an error, or similar. Probably none of these
+outcomes are actually desirable in most cases, so maybe making it a
+segfault instead would be OK? But it's dangerous given that we don't
+know how common such code is. OTOH, if we were starting from scratch
+then this would probably be the ideal solution.
+
+We can't use ``asanyarray`` or ``array``, since those are already
+taken.
+
+Any other ideas? ``np.cast``, ``np.coerce``?
+
+
+Implementation
+--------------
+
+1. Rename ``NDArrayOperatorsMixin`` to ``AbstractArray`` (leaving
+ behind an alias for backwards compatibility) and make it an ABC.
+
+2. Add ``asabstractarray`` (or whatever we end up calling it), and
+ probably a C API equivalent.
+
+3. Begin migrating NumPy internal functions to using
+ ``asabstractarray`` where appropriate.
+
+
+Backward compatibility
+----------------------
+
+This is purely a new feature, so there are no compatibility issues.
+(Unless we decide to change the semantics of ``asarray`` itself.)
+
+
+Rejected alternatives
+---------------------
+
+One suggestion that has come up is to define multiple abstract classes
+for different subsets of the array interface. Nothing in this proposal
+stops either NumPy or third-parties from doing this in the future, but
+it's very difficult to guess ahead of time which subsets would be
+useful. Also, "the full ndarray interface" is something that existing
+libraries are written to expect (because they work with actual
+ndarrays) and test (because they test with actual ndarrays), so it's
+by far the easiest place to start.
+
+
+Links to discussion
+-------------------
+
+* https://mail.python.org/pipermail/numpy-discussion/2018-March/077767.html
+
+
+Appendix: Benchmark script
+--------------------------
+
+.. literalinclude:: nep-0016-benchmark.py
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0016-benchmark.py b/doc/neps/nep-0016-benchmark.py
new file mode 100644
index 000000000..ec8e44726
--- /dev/null
+++ b/doc/neps/nep-0016-benchmark.py
@@ -0,0 +1,48 @@
+import perf
+import abc
+import numpy as np
+
+class NotArray:
+ pass
+
+class AttrArray:
+ __array_implementer__ = True
+
+class ArrayBase(abc.ABC):
+ pass
+
+class ABCArray1(ArrayBase):
+ pass
+
+class ABCArray2:
+ pass
+
+ArrayBase.register(ABCArray2)
+
+not_array = NotArray()
+attr_array = AttrArray()
+abc_array_1 = ABCArray1()
+abc_array_2 = ABCArray2()
+
+# Make sure ABC cache is primed
+isinstance(not_array, ArrayBase)
+isinstance(abc_array_1, ArrayBase)
+isinstance(abc_array_2, ArrayBase)
+
+runner = perf.Runner()
+def t(name, statement):
+ runner.timeit(name, statement, globals=globals())
+
+t("np.asarray([])", "np.asarray([])")
+arrobj = np.array([])
+t("np.asarray(arrobj)", "np.asarray(arrobj)")
+
+t("attr, False",
+ "getattr(not_array, '__array_implementer__', False)")
+t("attr, True",
+ "getattr(attr_array, '__array_implementer__', False)")
+
+t("ABC, False", "isinstance(not_array, ArrayBase)")
+t("ABC, True, via inheritance", "isinstance(abc_array_1, ArrayBase)")
+t("ABC, True, via register", "isinstance(abc_array_2, ArrayBase)")
+
diff --git a/doc/neps/nep-0018-array-function-protocol.rst b/doc/neps/nep-0018-array-function-protocol.rst
index a63068306..fb9b838b5 100644
--- a/doc/neps/nep-0018-array-function-protocol.rst
+++ b/doc/neps/nep-0018-array-function-protocol.rst
@@ -7,9 +7,10 @@ NEP 18 — A dispatch mechanism for NumPy's high level array functions
:Author: Marten van Kerkwijk <mhvk@astro.utoronto.ca>
:Author: Hameer Abbasi <hameerabbasi@yahoo.com>
:Author: Eric Wieser <wieser.eric@gmail.com>
-:Status: Accepted
+:Status: Provisional
:Type: Standards Track
:Created: 2018-05-29
+:Updated: 2019-05-25
:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-August/078493.html
Abstact
@@ -95,6 +96,18 @@ A prototype implementation can be found in
of NumPy functions for non-NumPy arrays. See "Non-goals" below for more
details.
+.. note::
+
+ Dispatch with the ``__array_function__`` protocol has been implemented but is
+ not yet enabled by default:
+
+ - In NumPy 1.16, you need to set the environment variable
+ ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1`` before importing NumPy to test
+ NumPy function overrides.
+ - In NumPy 1.17, the protocol will be enabled by default, but can be disabled
+ with ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0``.
+ - Eventually, expect to ``__array_function__`` to always be enabled.
+
The interface
~~~~~~~~~~~~~
@@ -119,8 +132,9 @@ implementing the array API.
As a convenience for ``__array_function__`` implementors, ``types`` provides all
argument types with an ``'__array_function__'`` attribute. This
-allows downstream implementations to quickly determine if they are likely able
-to support the operation. The type of ``types`` is intentionally vague:
+allows implementors to quickly identify cases where they should defer to
+``__array_function__`` implementations on other arguments.
+The type of ``types`` is intentionally vague:
``frozenset`` would most closely match intended use, but we may use ``tuple``
instead for performance reasons. In any case, ``__array_function__``
implementations should not rely on the iteration order of ``types``, which
@@ -189,6 +203,14 @@ include *all* of the corresponding NumPy function's optional arguments
Optional arguments are only passed in to ``__array_function__`` if they
were explicitly used in the NumPy function call.
+.. note::
+
+ Just like the case for builtin special methods like ``__add__``, properly
+ written ``__array_function__`` methods should always return
+ ``NotImplemented`` when an unknown type is encountered. Otherwise, it will
+ be impossible to correctly override NumPy functions from another object
+ if the operation also includes one of your objects.
+
Necessary changes within the NumPy codebase itself
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -290,6 +312,13 @@ In particular:
- If all ``__array_function__`` methods return ``NotImplemented``,
NumPy will raise ``TypeError``.
+If no ``__array_function__`` methods exist, NumPy will default to calling its
+own implementation, intended for use on NumPy arrays. This case arises, for
+example, when all array-like arguments are Python numbers or lists.
+(NumPy arrays do have a ``__array_function__`` method, given below, but it
+always returns ``NotImplemented`` if any argument other than a NumPy array
+subclass implements ``__array_function__``.)
+
One deviation from the current behavior of ``__array_ufunc__`` is that NumPy
will only call ``__array_function__`` on the *first* argument of each unique
type. This matches Python's
@@ -300,67 +329,83 @@ between these two dispatch protocols, we should
`also update <https://github.com/numpy/numpy/issues/11306>`_
``__array_ufunc__`` to match this behavior.
-Special handling of ``numpy.ndarray``
-'''''''''''''''''''''''''''''''''''''
+The ``__array_function__`` method on ``numpy.ndarray``
+''''''''''''''''''''''''''''''''''''''''''''''''''''''
The use cases for subclasses with ``__array_function__`` are the same as those
-with ``__array_ufunc__``, so ``numpy.ndarray`` should also define a
-``__array_function__`` method mirroring ``ndarray.__array_ufunc__``:
+with ``__array_ufunc__``, so ``numpy.ndarray`` also defines a
+``__array_function__`` method:
.. code:: python
def __array_function__(self, func, types, args, kwargs):
- # Cannot handle items that have __array_function__ other than our own.
- for t in types:
- if (hasattr(t, '__array_function__') and
- t.__array_function__ is not ndarray.__array_function__):
- return NotImplemented
-
- # Arguments contain no overrides, so we can safely call the
- # overloaded function again.
- return func(*args, **kwargs)
-
-To avoid infinite recursion, the dispatch rules for ``__array_function__`` need
-also the same special case they have for ``__array_ufunc__``: any arguments with
-an ``__array_function__`` method that is identical to
-``numpy.ndarray.__array_function__`` are not be called as
-``__array_function__`` implementations.
+ if not all(issubclass(t, ndarray) for t in types):
+ # Defer to any non-subclasses that implement __array_function__
+ return NotImplemented
+
+ # Use NumPy's private implementation without __array_function__
+ # dispatching
+ return func._implementation(*args, **kwargs)
+
+This method matches NumPy's dispatching rules, so for most part it is
+possible to pretend that ``ndarray.__array_function__`` does not exist.
+The private ``_implementation`` attribute, defined below in the
+``array_function_dispatch`` decorator, allows us to avoid the special cases for
+NumPy arrays that were needed in the ``__array_ufunc__`` protocol.
+
+The ``__array_function__`` protocol always calls subclasses before
+superclasses, so if any ``ndarray`` subclasses are involved in an operation,
+they will get the chance to override it, just as if any other argument
+overrides ``__array_function__``. But the default behavior in an operation
+that combines a base NumPy array and a subclass is different: if the subclass
+returns ``NotImplemented``, NumPy's implementation of the function will be
+called instead of raising an exception. This is appropriate since subclasses
+are `expected to be substitutable <https://en.wikipedia.org/wiki/Liskov_substitution_principle>`_.
+
+We still caution authors of subclasses to exercise caution when relying
+upon details of NumPy's internal implementations. It is not always possible to
+write a perfectly substitutable ndarray subclass, e.g., in cases involving the
+creation of new arrays, not least because NumPy makes use of internal
+optimizations specialized to base NumPy arrays, e.g., code written in C. Even
+if NumPy's implementation happens to work today, it may not work in the future.
+In these cases, your recourse is to re-implement top-level NumPy functions via
+``__array_function__`` on your subclass.
Changes within NumPy functions
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Given a function defining the above behavior, for now call it
-``try_array_function_override``, we now need to call that function from
-within every relevant NumPy function. This is a pervasive change, but of
-fairly simple and innocuous code that should complete quickly and
+``implement_array_function``, we now need to call that
+function from within every relevant NumPy function. This is a pervasive change,
+but of fairly simple and innocuous code that should complete quickly and
without effect if no arguments implement the ``__array_function__``
protocol.
-In most cases, these functions should written using the
-``array_function_dispatch`` decorator, which also associates dispatcher
-functions:
+To achieve this, we define a ``array_function_dispatch`` decorator to rewrite
+NumPy functions. The basic implementation is as follows:
.. code:: python
- def array_function_dispatch(dispatcher):
+ def array_function_dispatch(dispatcher, module=None):
"""Wrap a function for dispatch with the __array_function__ protocol."""
- def decorator(func):
- @functools.wraps(func)
- def new_func(*args, **kwargs):
- relevant_arguments = dispatcher(*args, **kwargs)
- success, value = try_array_function_override(
- new_func, relevant_arguments, args, kwargs)
- if success:
- return value
- return func(*args, **kwargs)
- return new_func
+ def decorator(implementation):
+ @functools.wraps(implementation)
+ def public_api(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, public_api, relevant_args, args, kwargs)
+ if module is not None:
+ public_api.__module__ = module
+ # for ndarray.__array_function__
+ public_api._implementation = implementation
+ return public_api
return decorator
# example usage
- def _broadcast_to_dispatcher(array, shape, subok=None, **ignored_kwargs):
+ def _broadcast_to_dispatcher(array, shape, subok=None):
return (array,)
- @array_function_dispatch(_broadcast_to_dispatcher)
+ @array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
def broadcast_to(array, shape, subok=False):
... # existing definition of np.broadcast_to
@@ -378,33 +423,47 @@ It's particularly worth calling out the decorator's use of
the wrapped NumPy function.
- On Python 3, it also ensures that the decorator function copies the original
function signature, which is important for introspection based tools such as
- auto-complete. If we care about preserving function signatures on Python 2,
- for the `short while longer <http://www.numpy.org/neps/nep-0014-dropping-python2.7-proposal.html>`_
- that NumPy supports Python 2.7, we do could do so by adding a vendored
- dependency on the (single-file, BSD licensed)
- `decorator library <https://github.com/micheles/decorator>`_.
+ auto-complete.
- Finally, it ensures that the wrapped function
`can be pickled <http://gael-varoquaux.info/programming/decoration-in-python-done-right-decorating-and-pickling.html>`_.
-In a few cases, it would not make sense to use the ``array_function_dispatch``
-decorator directly, but override implementation in terms of
-``try_array_function_override`` should still be straightforward.
-
-- Functions written entirely in C (e.g., ``np.concatenate``) can't use
- decorators, but they could still use a C equivalent of
- ``try_array_function_override``. If performance is not a concern, they could
- also be easily wrapped with a small Python wrapper.
-- ``np.einsum`` does complicated argument parsing to handle two different
- function signatures. It would probably be best to avoid the overhead of
- parsing it twice in the typical case of no overrides.
-
-Fortunately, in each of these cases so far, the functions already has a generic
-signature of the form ``*args, **kwargs``, which means we don't need to worry
-about potential inconsistency between how functions are called and what we pass
-to ``__array_function__``. (In C, arguments for all Python functions are parsed
-from a tuple ``*args`` and dict ``**kwargs``.) This shouldn't stop us from
-writing overrides for functions with non-generic signatures that can't use the
-decorator, but we should consider these cases carefully.
+The example usage illustrates several best practices for writing dispatchers
+relevant to NumPy contributors:
+
+- We passed the ``module`` argument, which in turn sets the ``__module__``
+ attribute on the generated function. This is for the benefit of better error
+ messages, here for errors raised internally by NumPy when no implementation
+ is found, e.g.,
+ ``TypeError: no implementation found for 'numpy.broadcast_to'``. Setting
+ ``__module__`` to the canonical location in NumPy's public API encourages
+ users to use NumPy's public API for identifying functions in
+ ``__array_function__``.
+
+- The dispatcher is a function that returns a tuple, rather than an equivalent
+ (and equally valid) generator using ``yield``:
+
+ .. code:: python
+
+ # example usage
+ def broadcast_to(array, shape, subok=None):
+ yield array
+
+ This is no accident: NumPy's implementation of dispatch for
+ ``__array_function__`` is fastest when dispatcher functions return a builtin
+ sequence type (``tuple`` or ``list``).
+
+ On a related note, it's perfectly fine for dispatchers to return arguments
+ even if in some cases you *know* that they cannot have an
+ ``__array_function__`` method. This can arise for functions with default
+ arguments (e.g., ``None``) or complex signatures. NumPy's dispatching logic
+ sorts out these cases very quickly, so it generally is not worth the trouble
+ of parsing them on your own.
+
+.. note::
+
+ The code for ``array_function_dispatch`` above has been updated from the
+ original version of this NEP to match the actual
+ `implementation in NumPy <https://github.com/numpy/numpy/blob/e104f03ac8f65ae5b92a9b413b0fa639f39e6de2/numpy/core/overrides.py>`_.
Extensibility
~~~~~~~~~~~~~
@@ -413,10 +472,10 @@ An important virtue of this approach is that it allows for adding new
optional arguments to NumPy functions without breaking code that already
relies on ``__array_function__``.
-This is not a theoretical concern. The implementation of overrides *within*
-functions like ``np.sum()`` rather than defining a new function capturing
-``*args`` and ``**kwargs`` necessitated some awkward gymnastics to ensure that
-the new ``keepdims`` argument is only passed in cases where it is used, e.g.,
+This is not a theoretical concern. NumPy's older, haphazard implementation of
+overrides *within* functions like ``np.sum()`` necessitated some awkward
+gymnastics when we decided to add new optional arguments, e.g., the new
+``keepdims`` argument is only passed in cases where it is used:
.. code:: python
@@ -426,11 +485,12 @@ the new ``keepdims`` argument is only passed in cases where it is used, e.g.,
kwargs['keepdims'] = keepdims
return array.sum(..., **kwargs)
-This also makes it possible to add optional arguments to ``__array_function__``
-implementations incrementally and only in cases where it makes sense. For
-example, a library implementing immutable arrays would not be required to
-explicitly include an unsupported ``out`` argument. Doing this properly for all
-optional arguments is somewhat onerous, e.g.,
+For ``__array_function__`` implementors, this also means that it is possible
+to implement even existing optional arguments incrementally, and only in cases
+where it makes sense. For example, a library implementing immutable arrays
+would not be required to explicitly include an unsupported ``out`` argument in
+the function signature. This can be somewhat onerous to implement properly,
+e.g.,
.. code:: python
@@ -462,10 +522,10 @@ the difference in speed between the ``ndarray.sum()`` method (1.6 us) and
``numpy.sum()`` function (2.6 us).
Fortunately, we expect significantly less overhead with a C implementation of
-``try_array_function_override``, which is where the bulk of the runtime is.
-This would leave the ``array_function_dispatch`` decorator and dispatcher
-function on their own adding about 0.5 microseconds of overhead, for perhaps ~1
-microsecond of overhead in the typical case.
+``implement_array_function``, which is where the bulk of the
+runtime is. This would leave the ``array_function_dispatch`` decorator and
+dispatcher function on their own adding about 0.5 microseconds of overhead,
+for perhaps ~1 microsecond of overhead in the typical case.
In our view, this level of overhead is reasonable to accept for code written
in Python. We're pretty sure that the vast majority of NumPy users aren't
@@ -490,7 +550,7 @@ already wrap a limited subset of SciPy functionality (e.g.,
If we want to do this, we should expose at least the decorator
``array_function_dispatch()`` and possibly also the lower level
-``try_array_function_override()`` as part of NumPy's public API.
+``implement_array_function()`` as part of NumPy's public API.
Non-goals
---------
@@ -540,7 +600,7 @@ Backward compatibility
----------------------
This proposal does not change existing semantics, except for those arguments
-that currently have ``__array_function__`` methods, which should be rare.
+that currently have ``__array_function__`` attributes, which should be rare.
Alternatives
@@ -582,7 +642,7 @@ layer, separating NumPy's high level API from default implementations on
The downsides are that this would require an explicit opt-in from all
existing code, e.g., ``import numpy.api as np``, and in the long term
-would result in the maintainence of two separate NumPy APIs. Also, many
+would result in the maintenance of two separate NumPy APIs. Also, many
functions from ``numpy`` itself are already overloaded (but
inadequately), so confusion about high vs. low level APIs in NumPy would
still persist.
@@ -618,7 +678,7 @@ would be straightforward to write a shim for a default
Implementations in terms of a limited core API
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-The internal implementations of some NumPy functions is extremely simple.
+The internal implementation of some NumPy functions is extremely simple.
For example:
- ``np.stack()`` is implemented in only a few lines of code by combining
@@ -656,8 +716,8 @@ However, to work well this would require the possibility of implementing
*some* but not all functions with ``__array_function__``, e.g., as described
in the next section.
-Coercion to a NumPy array as a catch-all fallback
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Partial implementation of NumPy's API
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
With the current design, classes that implement ``__array_function__``
to overload at least one function implicitly declare an intent to
@@ -674,44 +734,64 @@ that assuredly many pandas users rely on. If pandas implemented
functions like ``np.nanmean`` would suddenly break on pandas objects by
raising TypeError.
+Even libraries that reimplement most of NumPy's public API sometimes rely upon
+using utility functions from NumPy without a wrapper. For example, both CuPy
+and JAX simply `use an alias <https://github.com/numpy/numpy/issues/12974>`_ to
+``np.result_type``, which already supports duck-types with a ``dtype``
+attribute.
+
With ``__array_ufunc__``, it's possible to alleviate this concern by
casting all arguments to numpy arrays and re-calling the ufunc, but the
heterogeneous function signatures supported by ``__array_function__``
make it impossible to implement this generic fallback behavior for
``__array_function__``.
-We could resolve this issue by change the handling of return values in
-``__array_function__`` in either of two possible ways:
+We considered three possible ways to resolve this issue, but none were
+entirely satisfactory:
-1. Change the meaning of all arguments returning ``NotImplemented`` to indicate
- that all arguments should be coerced to NumPy arrays and the operation
- should be retried. However, many array libraries (e.g., scipy.sparse) really
- don't want implicit conversions to NumPy arrays, and often avoid implementing
- ``__array__`` for exactly this reason. Implicit conversions can result in
- silent bugs and performance degradation.
+1. Change the meaning of all arguments returning ``NotImplemented`` from
+ ``__array_function__`` to indicate that all arguments should be coerced to
+ NumPy arrays and the operation should be retried. However, many array
+ libraries (e.g., scipy.sparse) really don't want implicit conversions to
+ NumPy arrays, and often avoid implementing ``__array__`` for exactly this
+ reason. Implicit conversions can result in silent bugs and performance
+ degradation.
Potentially, we could enable this behavior only for types that implement
``__array__``, which would resolve the most problematic cases like
scipy.sparse. But in practice, a large fraction of classes that present a
high level API like NumPy arrays already implement ``__array__``. This would
preclude reliable use of NumPy's high level API on these objects.
+
2. Use another sentinel value of some sort, e.g.,
- ``np.NotImplementedButCoercible``, to indicate that a class implementing part
- of NumPy's higher level array API is coercible as a fallback. This is a more
- appealing option.
-
-With either approach, we would need to define additional rules for *how*
-coercible array arguments are coerced. The only sane rule would be to treat
-these return values as equivalent to not defining an
-``__array_function__`` method at all, which means that NumPy functions would
-fall-back to their current behavior of coercing all array-like arguments.
-
-It is not yet clear to us yet if we need an optional like
-``NotImplementedButCoercible``, so for now we propose to defer this issue.
-We can always implement ``np.NotImplementedButCoercible`` at some later time if
-it proves critical to the NumPy community in the future. Importantly, we don't
-think this will stop critical libraries that desire to implement most of the
-high level NumPy API from adopting this proposal.
+ ``np.NotImplementedButCoercible``, to indicate that a class implementing
+ part of NumPy's higher level array API is coercible as a fallback. If all
+ arguments return ``NotImplementedButCoercible``, arguments would be coerced
+ and the operation would be retried.
+
+ Unfortunately, correct behavior after encountering
+ ``NotImplementedButCoercible`` is not always obvious. Particularly
+ challenging is the "mixed" case where some arguments return
+ ``NotImplementedButCoercible`` and others return ``NotImplemented``.
+ Would dispatching be retried after only coercing the "coercible" arguments?
+ If so, then conceivably we could end up looping through the dispatching
+ logic an arbitrary number of times. Either way, the dispatching rules would
+ definitely get more complex and harder to reason about.
+
+3. Allow access to NumPy's implementation of functions, e.g., in the form of
+ a publicly exposed ``__skip_array_function__`` attribute on the NumPy
+ functions. This would allow for falling back to NumPy's implementation by
+ using ``func.__skip_array_function__`` inside ``__array_function__``
+ methods, and could also potentially be used to be used to avoid the
+ overhead of dispatching. However, it runs the risk of potentially exposing
+ details of NumPy's implementations for NumPy functions that do not call
+ ``np.asarray()`` internally. See
+ `this note <https://mail.python.org/pipermail/numpy-discussion/2019-May/079541.html>`_
+ for a summary of the full discussion.
+
+These solutions would solve real use cases, but at the cost of additional
+complexity. We would like to gain experience with how ``__array_function__`` is
+actually used before making decisions that would be difficult to roll back.
A magic decorator that inspects type annotations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -794,9 +874,9 @@ public API.
``types`` is included because we can compute it almost for free as part of
collecting ``__array_function__`` implementations to call in
-``try_array_function_override``. We also think it will be used by most
-``__array_function__`` methods, which otherwise would need to extract this
-information themselves. It would be equivalently easy to provide single
+``implement_array_function``. We also think it will be used
+by many ``__array_function__`` methods, which otherwise would need to extract
+this information themselves. It would be equivalently easy to provide single
instances of each type, but providing only types seemed cleaner.
Taking this even further, it was suggested that ``__array_function__`` should be
@@ -807,10 +887,10 @@ worth breaking from the precedence of ``__array_ufunc__``.
There are two other arguments that we think *might* be important to pass to
``__array_ufunc__`` implementations:
-- Access to the non-dispatched function (i.e., before wrapping with
+- Access to the non-dispatched implementation (i.e., before wrapping with
``array_function_dispatch``) in ``ndarray.__array_function__`` would allow
- use to drop special case logic for that method from
- ``try_array_function_override``.
+ us to drop special case logic for that method from
+ ``implement_array_function``.
- Access to the ``dispatcher`` function passed into
``array_function_dispatch()`` would allow ``__array_function__``
implementations to determine the list of "array-like" arguments in a generic
@@ -847,7 +927,7 @@ a descriptor.
Given the complexity and the limited use cases, we are also deferring on this
issue for now, but we are confident that ``__array_function__`` could be
-expanded to accomodate these use cases in the future if need be.
+expanded to accommodate these use cases in the future if need be.
Discussion
----------
@@ -864,7 +944,7 @@ it was discussed at a `NumPy developer sprint
Berkeley Institute for Data Science (BIDS) <https://bids.berkeley.edu/>`_.
Detailed discussion of this proposal itself can be found on the
-`the mailing list <https://mail.python.org/pipermail/numpy-discussion/2018-June/078127.html>`_ and relvant pull requests
+`the mailing list <https://mail.python.org/pipermail/numpy-discussion/2018-June/078127.html>`_ and relevant pull requests
(`1 <https://github.com/numpy/numpy/pull/11189>`_,
`2 <https://github.com/numpy/numpy/pull/11303#issuecomment-396638175>`_,
`3 <https://github.com/numpy/numpy/pull/11374>`_)
diff --git a/doc/neps/nep-0019-rng-policy.rst b/doc/neps/nep-0019-rng-policy.rst
index f50897b0f..9704b24ca 100644
--- a/doc/neps/nep-0019-rng-policy.rst
+++ b/doc/neps/nep-0019-rng-policy.rst
@@ -6,7 +6,8 @@ NEP 19 — Random Number Generator Policy
:Status: Accepted
:Type: Standards Track
:Created: 2018-05-24
-:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-June/078126.html
+:Updated: 2019-05-21
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-July/078380.html
Abstract
--------
@@ -91,7 +92,8 @@ those contributors simply walked away.
Implementation
--------------
-Work on a proposed new PRNG subsystem is already underway in the randomgen_
+Work on a proposed new Pseudo Random Number Generator (PRNG) subsystem is
+already underway in the randomgen_
project. The specifics of the new design are out of scope for this NEP and up
for much discussion, but we will discuss general policies that will guide the
evolution of whatever code is adopted. We will also outline just a few of the
@@ -119,37 +121,38 @@ Gaussian variate generation to the faster `Ziggurat algorithm
discouraged improvement would be tweaking the Ziggurat tables just a little bit
for a small performance improvement.
-Any new design for the RNG subsystem will provide a choice of different core
+Any new design for the random subsystem will provide a choice of different core
uniform PRNG algorithms. A promising design choice is to make these core
uniform PRNGs their own lightweight objects with a minimal set of methods
-(randomgen_ calls them “basic RNGs”). The broader set of non-uniform
+(randomgen_ calls them “BitGenerators”). The broader set of non-uniform
distributions will be its own class that holds a reference to one of these core
uniform PRNG objects and simply delegates to the core uniform PRNG object when
-it needs uniform random numbers. To borrow an example from randomgen_, the
-class ``MT19937`` is a basic RNG that implements the classic Mersenne Twister
-algorithm. The class ``RandomGenerator`` wraps around the basic RNG to provide
+it needs uniform random numbers (randomgen_ calls this the Generator). To
+borrow an example from randomgen_, the
+class ``MT19937`` is a BitGenerator that implements the classic Mersenne Twister
+algorithm. The class ``Generator`` wraps around the BitGenerator to provide
all of the non-uniform distribution methods::
# This is not the only way to instantiate this object.
# This is just handy for demonstrating the delegation.
- >>> brng = MT19937(seed)
- >>> rg = RandomGenerator(brng)
+ >>> bg = MT19937(seed)
+ >>> rg = Generator(bg)
>>> x = rg.standard_normal(10)
-We will be more strict about a select subset of methods on these basic RNG
+We will be more strict about a select subset of methods on these BitGenerator
objects. They MUST guarantee stream-compatibility for a specified set
of methods which are chosen to make it easier to compose them to build other
distributions and which are needed to abstract over the implementation details
-of the variety of core PRNG algorithms. Namely,
+of the variety of BitGenerator algorithms. Namely,
* ``.bytes()``
- * ``.random_uintegers()``
- * ``.random_sample()``
+ * ``integers()`` (formerly ``.random_integers()``)
+ * ``random()`` (formerly ``.random_sample()``)
-The distributions class (``RandomGenerator``) SHOULD have all of the same
+The distributions class (``Generator``) SHOULD have all of the same
distribution methods as ``RandomState`` with close-enough function signatures
such that almost all code that currently works with ``RandomState`` instances
-will work with ``RandomGenerator`` instances (ignoring the precise stream
+will work with ``Generator`` instances (ignoring the precise stream
values). Some variance will be allowed for integer distributions: in order to
avoid some of the cross-platform problems described above, these SHOULD be
rewritten to work with ``uint64`` numbers on all platforms.
@@ -183,9 +186,10 @@ reproducible across numpy versions.
This legacy distributions class MUST be accessible under the name
``numpy.random.RandomState`` for backwards compatibility. All current ways of
instantiating ``numpy.random.RandomState`` with a given state should
-instantiate the Mersenne Twister basic RNG with the same state. The legacy
-distributions class MUST be capable of accepting other basic RNGs. The purpose
-here is to ensure that one can write a program with a consistent basic RNG
+instantiate the Mersenne Twister BitGenerator with the same state. The legacy
+distributions class MUST be capable of accepting other BitGenerators. The
+purpose
+here is to ensure that one can write a program with a consistent BitGenerator
state with a mixture of libraries that may or may not have upgraded from
``RandomState``. Instances of the legacy distributions class MUST respond
``True`` to ``isinstance(rg, numpy.random.RandomState)`` because there is
@@ -209,27 +213,27 @@ consistently and usefully, but a very common usage is in unit tests where many
of the problems of global state are less likely.
This NEP does not propose removing these functions or changing them to use the
-less-stable ``RandomGenerator`` distribution implementations. Future NEPs
+less-stable ``Generator`` distribution implementations. Future NEPs
might.
Specifically, the initial release of the new PRNG subsystem SHALL leave these
convenience functions as aliases to the methods on a global ``RandomState``
-that is initialized with a Mersenne Twister basic RNG object. A call to
-``numpy.random.seed()`` will be forwarded to that basic RNG object. In
+that is initialized with a Mersenne Twister BitGenerator object. A call to
+``numpy.random.seed()`` will be forwarded to that BitGenerator object. In
addition, the global ``RandomState`` instance MUST be accessible in this
initial release by the name ``numpy.random.mtrand._rand``: Robert Kern long ago
promised ``scikit-learn`` that this name would be stable. Whoops.
-In order to allow certain workarounds, it MUST be possible to replace the basic
-RNG underneath the global ``RandomState`` with any other basic RNG object (we
-leave the precise API details up to the new subsystem). Calling
+In order to allow certain workarounds, it MUST be possible to replace the
+BitGenerator underneath the global ``RandomState`` with any other BitGenerator
+object (we leave the precise API details up to the new subsystem). Calling
``numpy.random.seed()`` thereafter SHOULD just pass the given seed to the
-current basic RNG object and not attempt to reset the basic RNG to the Mersenne
-Twister. The set of ``numpy.random.*`` convenience functions SHALL remain the
-same as they currently are. They SHALL be aliases to the ``RandomState``
-methods and not the new less-stable distributions class (``RandomGenerator``,
-in the examples above). Users who want to get the fastest, best distributions
-can follow best practices and instantiate generator objects explicitly.
+current BitGenerator object and not attempt to reset the BitGenerator to the
+Mersenne Twister. The set of ``numpy.random.*`` convenience functions SHALL
+remain the same as they currently are. They SHALL be aliases to the
+``RandomState`` methods and not the new less-stable distributions class
+(``Generator``, in the examples above). Users who want to get the fastest, best
+distributions can follow best practices and instantiate generator objects explicitly.
This NEP does not propose that these requirements remain in perpetuity. After
we have experience with the new PRNG subsystem, we can and should revisit these
@@ -292,14 +296,14 @@ satisfactory subset. At least some projects used a fairly broad selection of
the ``RandomState`` methods in unit tests.
Downstream project owners would have been forced to modify their code to
-accomodate the new PRNG subsystem. Some modifications might be simply
+accommodate the new PRNG subsystem. Some modifications might be simply
mechanical, but the bulk of the work would have been tedious churn for no
positive improvement to the downstream project, just avoiding being broken.
Furthermore, under this old proposal, we would have had a quite lengthy
deprecation period where ``RandomState`` existed alongside the new system of
-basic RNGs and distribution classes. Leaving the implementation of
-``RandomState`` fixed meant that it could not use the new basic RNG state
+BitGenerator and Generator classes. Leaving the implementation of
+``RandomState`` fixed meant that it could not use the new BitGenerator state
objects. Developing programs that use a mixture of libraries that have and
have not upgraded would require managing two sets of PRNG states. This would
notionally have been time-limited, but we intended the deprecation to be very
@@ -308,9 +312,9 @@ long.
The current proposal solves all of these problems. All current usages of
``RandomState`` will continue to work in perpetuity, though some may be
discouraged through documentation. Unit tests can continue to use the full
-complement of ``RandomState`` methods. Mixed ``RandomState/RandomGenerator``
-code can safely share the common basic RNG state. Unmodified ``RandomState``
-code can make use of the new features of alternative basic RNGs like settable
+complement of ``RandomState`` methods. Mixed ``RandomState/Generator``
+code can safely share the common BitGenerator state. Unmodified ``RandomState``
+code can make use of the new features of alternative BitGenerator-like settable
streams.
diff --git a/doc/neps/nep-0020-gufunc-signature-enhancement.rst b/doc/neps/nep-0020-gufunc-signature-enhancement.rst
index 38a9fd53b..a7a992cf1 100644
--- a/doc/neps/nep-0020-gufunc-signature-enhancement.rst
+++ b/doc/neps/nep-0020-gufunc-signature-enhancement.rst
@@ -3,7 +3,7 @@ NEP 20 — Expansion of Generalized Universal Function Signatures
===============================================================
:Author: Marten van Kerkwijk <mhvk@astro.utoronto.ca>
-:Status: Accepted
+:Status: Final
:Type: Standards Track
:Created: 2018-06-10
:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-April/077959.html,
diff --git a/doc/neps/nep-0021-advanced-indexing.rst b/doc/neps/nep-0021-advanced-indexing.rst
index 5acabbf16..dab9ab022 100644
--- a/doc/neps/nep-0021-advanced-indexing.rst
+++ b/doc/neps/nep-0021-advanced-indexing.rst
@@ -630,7 +630,7 @@ At this point we have left the straight forward world of ``oindex`` but can
do random picking of any element from the array. Note that in the last example
a method such as mentioned in the ``Related Questions`` section could be more
straight forward. But this approach is even more flexible, since ``rows``
-does not have to be a simple ``arange``, but could be ``intersting_times``::
+does not have to be a simple ``arange``, but could be ``interesting_times``::
>>> interesting_times = np.array([0, 4, 8, 9, 10])
>>> correct_sensors_at_it = correct_sensors[interesting_times, :]
diff --git a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst
index 04e4a14b7..077166453 100644
--- a/doc/neps/nep-0022-ndarray-duck-typing-overview.rst
+++ b/doc/neps/nep-0022-ndarray-duck-typing-overview.rst
@@ -3,9 +3,10 @@ NEP 22 — Duck typing for NumPy arrays – high level overview
===========================================================
:Author: Stephan Hoyer <shoyer@google.com>, Nathaniel J. Smith <njs@pobox.com>
-:Status: Draft
+:Status: Final
:Type: Informational
:Created: 2018-03-22
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-September/078752.html
Abstract
--------
diff --git a/doc/neps/nep-0024-missing-data-2.rst b/doc/neps/nep-0024-missing-data-2.rst
index c8b19561f..f4414e0a0 100644
--- a/doc/neps/nep-0024-missing-data-2.rst
+++ b/doc/neps/nep-0024-missing-data-2.rst
@@ -28,7 +28,7 @@ Detailed description
Rationale
^^^^^^^^^
-The purpose of this aNEP is to define two interfaces -- one for handling
+The purpose of this NEP is to define two interfaces -- one for handling
'missing values', and one for handling 'masked arrays'.
An ordinary value is something like an integer or a floating point number. A
diff --git a/doc/neps/nep-0026-missing-data-summary.rst b/doc/neps/nep-0026-missing-data-summary.rst
index e99138cdd..78fe999df 100644
--- a/doc/neps/nep-0026-missing-data-summary.rst
+++ b/doc/neps/nep-0026-missing-data-summary.rst
@@ -669,7 +669,7 @@ NumPy could more easily be overtaken by another project.
In the case of the existing NA contribution at issue, how we resolve
this disagreement represents a decision about how NumPy's
-developers, contributers, and users should interact. If we create
+developers, contributors, and users should interact. If we create
a document describing a dispute resolution process, how do we
design it so that it doesn't introduce a large burden and excessive
uncertainty on developers that could prevent them from productively
@@ -677,7 +677,7 @@ contributing code?
If we go this route of writing up a decision process which includes
such a dispute resolution mechanism, I think the meat of it should
-be a roadmap that potential contributers and developers can follow
+be a roadmap that potential contributors and developers can follow
to gain influence over NumPy. NumPy development needs broad support
beyond code contributions, and tying influence in the project to
contributions seems to me like it would be a good way to encourage
diff --git a/doc/neps/nep-0027-zero-rank-arrarys.rst b/doc/neps/nep-0027-zero-rank-arrarys.rst
new file mode 100644
index 000000000..430397235
--- /dev/null
+++ b/doc/neps/nep-0027-zero-rank-arrarys.rst
@@ -0,0 +1,254 @@
+=========================
+NEP 27 — Zero Rank Arrays
+=========================
+
+:Author: Alexander Belopolsky (sasha), transcribed Matt Picus <matti.picus@gmail.com>
+:Status: Final
+:Type: Informational
+:Created: 2006-06-10
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2018-October/078824.html
+
+.. note ::
+
+ NumPy has both zero rank arrays and scalars. This design document, adapted
+ from a `2006 wiki entry`_, describes what zero rank arrays are and why they
+ exist. It was transcribed 2018-10-13 into a NEP and links were updated.
+ The pull request sparked `a lively discussion`_ about the continued need
+ for zero rank arrays and scalars in NumPy.
+
+ Some of the information here is dated, for instance indexing of 0-D arrays
+ now is now implemented and does not error.
+
+Zero-Rank Arrays
+----------------
+
+Zero-rank arrays are arrays with shape=(). For example:
+
+ >>> x = array(1)
+ >>> x.shape
+ ()
+
+
+Zero-Rank Arrays and Array Scalars
+----------------------------------
+
+Array scalars are similar to zero-rank arrays in many aspects::
+
+
+ >>> int_(1).shape
+ ()
+
+They even print the same::
+
+
+ >>> print int_(1)
+ 1
+ >>> print array(1)
+ 1
+
+
+However there are some important differences:
+
+* Array scalars are immutable
+* Array scalars have different python type for different data types
+
+Motivation for Array Scalars
+----------------------------
+
+Numpy's design decision to provide 0-d arrays and array scalars in addition to
+native python types goes against one of the fundamental python design
+principles that there should be only one obvious way to do it. In this section
+we will try to explain why it is necessary to have three different ways to
+represent a number.
+
+There were several numpy-discussion threads:
+
+
+* `rank-0 arrays`_ in a 2002 mailing list thread.
+* Thoughts about zero dimensional arrays vs Python scalars in a `2005 mailing list thread`_]
+
+It has been suggested several times that NumPy just use rank-0 arrays to
+represent scalar quantities in all case. Pros and cons of converting rank-0
+arrays to scalars were summarized as follows:
+
+- Pros:
+
+ - Some cases when Python expects an integer (the most
+ dramatic is when slicing and indexing a sequence:
+ _PyEval_SliceIndex in ceval.c) it will not try to
+ convert it to an integer first before raising an error.
+ Therefore it is convenient to have 0-dim arrays that
+ are integers converted for you by the array object.
+
+ - No risk of user confusion by having two types that
+ are nearly but not exactly the same and whose separate
+ existence can only be explained by the history of
+ Python and NumPy development.
+
+ - No problems with code that does explicit typechecks
+ ``(isinstance(x, float)`` or ``type(x) == types.FloatType)``. Although
+ explicit typechecks are considered bad practice in general, there are a
+ couple of valid reasons to use them.
+
+ - No creation of a dependency on Numeric in pickle
+ files (though this could also be done by a special case
+ in the pickling code for arrays)
+
+- Cons:
+
+ - It is difficult to write generic code because scalars
+ do not have the same methods and attributes as arrays.
+ (such as ``.type`` or ``.shape``). Also Python scalars have
+ different numeric behavior as well.
+
+ - This results in a special-case checking that is not
+ pleasant. Fundamentally it lets the user believe that
+ somehow multidimensional homoegeneous arrays
+ are something like Python lists (which except for
+ Object arrays they are not).
+
+Numpy implements a solution that is designed to have all the pros and none of the cons above.
+
+ Create Python scalar types for all of the 21 types and also
+ inherit from the three that already exist. Define equivalent
+ methods and attributes for these Python scalar types.
+
+The Need for Zero-Rank Arrays
+-----------------------------
+
+Once the idea to use zero-rank arrays to represent scalars was rejected, it was
+natural to consider whether zero-rank arrays can be eliminated altogether.
+However there are some important use cases where zero-rank arrays cannot be
+replaced by array scalars. See also `A case for rank-0 arrays`_ from February
+2006.
+
+* Output arguments::
+
+ >>> y = int_(5)
+ >>> add(5,5,x)
+ array(10)
+ >>> x
+ array(10)
+ >>> add(5,5,y)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TypeError: return arrays must be of ArrayType
+
+* Shared data::
+
+ >>> x = array([1,2])
+ >>> y = x[1:2]
+ >>> y.shape = ()
+ >>> y
+ array(2)
+ >>> x[1] = 20
+ >>> y
+ array(20)
+
+Indexing of Zero-Rank Arrays
+----------------------------
+
+As of NumPy release 0.9.3, zero-rank arrays do not support any indexing::
+
+ >>> x[...]
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ IndexError: 0-d arrays can't be indexed.
+
+On the other hand there are several cases that make sense for rank-zero arrays.
+
+Ellipsis and empty tuple
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Alexander started a `Jan 2006 discussion`_ on scipy-dev
+with the following proposal:
+
+ ... it may be reasonable to allow ``a[...]``. This way
+ ellipsis can be interpereted as any number of ``:`` s including zero.
+ Another subscript operation that makes sense for scalars would be
+ ``a[...,newaxis]`` or even ``a[{newaxis, }* ..., {newaxis,}*]``, where
+ ``{newaxis,}*`` stands for any number of comma-separated newaxis tokens.
+ This will allow one to use ellipsis in generic code that would work on
+ any numpy type.
+
+Francesc Altet supported the idea of ``[...]`` on zero-rank arrays and
+`suggested`_ that ``[()]`` be supported as well.
+
+Francesc's proposal was::
+
+ In [65]: type(numpy.array(0)[...])
+ Out[65]: <type 'numpy.ndarray'>
+
+ In [66]: type(numpy.array(0)[()]) # Indexing a la numarray
+ Out[66]: <type 'int32_arrtype'>
+
+ In [67]: type(numpy.array(0).item()) # already works
+ Out[67]: <type 'int'>
+
+There is a consensus that for a zero-rank array ``x``, both ``x[...]`` and ``x[()]`` should be valid, but the question
+remains on what should be the type of the result - zero rank ndarray or ``x.dtype``?
+
+(Alexander)
+ First, whatever choice is made for ``x[...]`` and ``x[()]`` they should be
+ the same because ``...`` is just syntactic sugar for "as many `:` as
+ necessary", which in the case of zero rank leads to ``... = (:,)*0 = ()``.
+ Second, rank zero arrays and numpy scalar types are interchangeable within
+ numpy, but numpy scalars can be use in some python constructs where ndarrays
+ can't. For example::
+
+ >>> (1,)[array(0)]
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in ?
+ TypeError: tuple indices must be integers
+ >>> (1,)[int32(0)]
+ 1
+
+Since most if not all numpy function automatically convert zero-rank arrays to scalars on return, there is no reason for
+``[...]`` and ``[()]`` operations to be different.
+
+See SVN changeset 1864 (which became git commit `9024ff0`_) for
+implementation of ``x[...]`` and ``x[()]`` returning numpy scalars.
+
+See SVN changeset 1866 (which became git commit `743d922`_) for
+implementation of ``x[...] = v`` and ``x[()] = v``
+
+Increasing rank with newaxis
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Everyone who commented liked this feature, so as of SVN changeset 1871 (which became git commit `b32744e`_) any number of ellipses and
+newaxis tokens can be placed as a subscript argument for a zero-rank array. For
+example::
+
+ >>> x = array(1)
+ >>> x[newaxis,...,newaxis,...]
+ array([[1]])
+
+It is not clear why more than one ellipsis should be allowed, but this is the
+behavior of higher rank arrays that we are trying to preserve.
+
+Refactoring
+~~~~~~~~~~~
+
+Currently all indexing on zero-rank arrays is implemented in a special ``if (nd
+== 0)`` branch of code that used to always raise an index error. This ensures
+that the changes do not affect any existing usage (except, the usage that
+relies on exceptions). On the other hand part of motivation for these changes
+was to make behavior of ndarrays more uniform and this should allow to
+eliminate ``if (nd == 0)`` checks altogether.
+
+Copyright
+---------
+
+The original document appeared on the scipy.org wiki, with no Copyright notice, and its `history`_ attributes it to sasha.
+
+.. _`2006 wiki entry`: https://web.archive.org/web/20100503065506/http://projects.scipy.org:80/numpy/wiki/ZeroRankArray
+.. _`history`: https://web.archive.org/web/20100503065506/http://projects.scipy.org:80/numpy/wiki/ZeroRankArray?action=history
+.. _`2005 mailing list thread`: https://sourceforge.net/p/numpy/mailman/message/11299166
+.. _`suggested`: https://mail.python.org/pipermail/numpy-discussion/2006-January/005572.html
+.. _`Jan 2006 discussion`: https://mail.python.org/pipermail/numpy-discussion/2006-January/005579.html
+.. _`A case for rank-0 arrays`: https://mail.python.org/pipermail/numpy-discussion/2006-February/006384.html
+.. _`rank-0 arrays`: https://mail.python.org/pipermail/numpy-discussion/2002-September/001600.html
+.. _`9024ff0`: https://github.com/numpy/numpy/commit/9024ff0dc052888b5922dde0f3e615607a9e99d7
+.. _`743d922`: https://github.com/numpy/numpy/commit/743d922bf5893acf00ac92e823fe12f460726f90
+.. _`b32744e`: https://github.com/numpy/numpy/commit/b32744e3fc5b40bdfbd626dcc1f72907d77c01c4
+.. _`a lively discussion`: https://github.com/numpy/numpy/pull/12166
diff --git a/doc/neps/nep-0028-website-redesign.rst b/doc/neps/nep-0028-website-redesign.rst
new file mode 100644
index 000000000..b418ca831
--- /dev/null
+++ b/doc/neps/nep-0028-website-redesign.rst
@@ -0,0 +1,334 @@
+===================================
+NEP 28 — numpy.org website redesign
+===================================
+
+:Author: Ralf Gommers <ralf.gommers@gmail.com>
+:Author: Joe LaChance <joe@boldmetrics.com>
+:Author: Shekhar Rajak <shekharrajak.1994@gmail.com>
+:Status: Accepted
+:Type: Informational
+:Created: 2019-07-16
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-August/079889.html
+
+
+Abstract
+--------
+
+NumPy is the fundamental library for numerical and scientific computing with
+Python. It is used by millions and has a large team of maintainers and
+contributors. Despite that, its `numpy.org <http://numpy.org>`_ website has
+never received the attention it needed and deserved. We hope and intend to
+change that soon. This document describes ideas and requirements for how to
+design a replacement for the current website, to better serve the needs of
+our diverse community.
+
+At a high level, what we're aiming for is:
+
+- a modern, clean look
+- an easy to deploy static site
+- a structure that's easy to navigate
+- content that addresses all types of stakeholders
+- Possible multilingual translations / i18n
+
+This website serves a couple of roles:
+
+- it's the entry point to the project for new users
+- it should link to the documentation (which is hosted separately, now on
+ http://docs.scipy.org/ and in the near future on http://numpy.org/doc).
+- it should address various aspects of the project (e.g. what NumPy is and
+ why you'd want to use it, community, project organization, funding,
+ relationship with NumFOCUS and possibly other organizations)
+- it should link out to other places, so every type of stakeholder
+ (beginning and advanced user, educators, packagers, funders, etc.)
+ can find their way
+
+
+Motivation and Scope
+--------------------
+
+The current numpy.org website has almost no content and its design is poor.
+This affects many users, who come there looking for information. It also
+affects many other aspects of the NumPy project, from finding new contributors
+to fundraising.
+
+The scope of the proposed redesign is the top-level numpy.org site, which
+now contains only a couple of pages and may contain on the order of ten
+pages after the redesign. Changing the documentation (user guide, reference
+guide, and some other pages in the NumPy Manual) is out of scope for
+this proposal.
+
+
+Detailed description
+--------------------
+
+User Experience
+~~~~~~~~~~~~~~~
+
+Besides the NumPy logo, there is little that can or needs to be kept from the
+current website. We will rely to a large extent on ideas and proposals by the
+designer(s) of the new website.
+
+As reference points we can use the `Jupyter website <https://jupyter.org/>`_,
+which is probably the best designed site in our ecosystem, and the
+`QuantEcon <https://quantecon.org>`_ and `Julia <https://julialang.org>`_
+sites which are well-designed too.
+
+The Website
+~~~~~~~~~~~
+
+A static site is a must. There are many high-quality static site generators.
+The current website uses Sphinx, however that is not the best choice - it's
+hard to theme and results in sites that are too text-heavy due to Sphinx'
+primary aim being documentation.
+
+The following should be considered when choosing a static site generator:
+
+1. *How widely used is it?* This is important when looking for help maintaining
+ or improving the site. More popular frameworks are usually also better
+ maintained, so less chance of bugs or obsolescence.
+2. *Ease of deployment.* Most generators meet this criterion, however things
+ like built-in support for GitHub Pages helps.
+3. *Preferences of who implements the new site.* Everyone has their own
+ preferences. And it's a significant amount of work to build a new site.
+ So we should take the opinion of those doing the work into account.
+
+Traffic
+```````
+
+The current site receives on the order of 500,000 unique visitors per month.
+With a redesigned site and relevant content, there is potential for visitor
+counts to reach 5-6 million -- a similar level as
+`scipy.org <http://scipy.org>`_ or `matplotlib.org <http://matplotlib.org>`_ --
+or more.
+
+Possible options for static site generators
+```````````````````````````````````````````
+
+1. *Jekyll.* This is a well maintained option with 855 Github contributors,
+ with contributions within the last month. Jekyll is written in Ruby, and
+ has a simple CLI interface. Jekyll also has a large directory of
+ `themes <https://jekyllthemes.io>`__, although a majority cost money.
+ There are several themes (`serif <https://jekyllthemes.io/theme/serif>`_,
+ `uBuild <https://jekyllthemes.io/theme/ubuild-jekyll-theme>`_,
+ `Just The Docs <https://jekyllthemes.io/theme/just-the-docs>`_) that are
+ appropriate and free. Most themes are likely responsive for mobile, and
+ that should be a requirement. Jekyll uses a combination of liquid templating
+ and YAML to render HTML, and content is written in Markdown. i18n
+ functionality is not native to Jekyll, but can be added easily.
+ One nice benefit of Jekyll is that it can be run automatically by GitHub
+ Pages, so deployment via a CI system doesn't need to be implemented.
+2. *Hugo.* This is another well maintained option with 554 contributors, with
+ contributions within the last month. Hugo is written in Go, and similar to
+ Jekyll, has a simple to use CLI interface to generate static sites. Again,
+ similar to Jekyll, Hugo has a large directory of
+ `themes <https://themes.gohugo.io>`_. These themes appear to be free,
+ unlike some of Jekyll's themes.
+ (`Sample landing page theme <https://themes.gohugo.io/hugo-hero-theme>`_,
+ `docs theme <https://themes.gohugo.io/hugo-whisper-theme>`_). Hugo uses Jade
+ as its templating language, and content is also written in Markdown. i18n
+ functionality is native to Hugo.
+3. *Docusaurus.* Docusaurus is a responsive static site generator made by Facebook.
+ Unlike the previous options, Docusaurus doesn't come with themes, and thus we
+ would not want to use this for our landing page. This is an excellent docs
+ option written in React. Docusaurus natively has support for i18n (via
+ Crowdin_, document versioning, and document search.
+
+Both Jekyll and Hugo are excellent options that should be supported into the
+future and are good choices for NumPy. Docusaurus has several bonus features
+such as versioning and search that Jekyll and Hugo don't have, but is likely
+a poor candidate for a landing page - it could be a good option for a
+high-level docs site later on though.
+
+Deployment
+~~~~~~~~~~
+
+There is no need for running a server, and doing so is in our experience a
+significant drain on the time of maintainers.
+
+1. *Netlify.* Using netlify is free until 100GB of bandwidth is used. Additional
+ bandwidth costs $20/100GB. They support a global CDN system, which will keep
+ load times quick for users in other regions. Netlify also has Github integration,
+ which will allow for easy deployment. When a pull request is merged, Netlify
+ will automatically deploy the changes. DNS is simple, and HTTPS is also supported.
+2. *Github Pages.* Github Pages also has a 100GB bandwidth limit, and is unclear if
+ additional bandwidth can be purchased. It is also unclear where sites are deployed,
+ and should be assumed sites aren't deployed globally. Github Pages has an easy to
+ use CI & DNS, similar to to Netlify. HTTPS is supported.
+3. *Cloudflare.* An excellent option, additional CI is likely needed for the same
+ ease of deployment.
+
+All of the above options are appropriate for the NumPy site based on current
+traffic. Updating to a new deployment strategy, if needed, is a minor amount of
+work compared to developing the website itself. If a provider such as
+Cloudflare is chosen, additional CI may be required, such as CircleCI, to
+have a similar deployment to GitHub Pages or Netlify.
+
+Analytics
+~~~~~~~~~
+
+It's benefical to maintainers to know how many visitors are coming to
+numpy.org. Google Analytics offers visitor counts and locations. This will
+help to support and deploy more strategically, and help maintainers
+understand where traffic is coming from.
+
+Google Analytics is free. A script, provided by Google, must be added to the home page.
+
+Website Structure
+~~~~~~~~~~~~~~~~~
+
+We aim to keep the first version of the new website small in terms of amount
+of content. New pages can be added later on, it's more important right now to
+get the site design right and get some essential information up. Note that in
+the second half of 2019 we expect to get 1 or 2 tech writers involved in the
+project via Google Season of Docs. They will likely help improve the content
+and organization of that content.
+
+We propose the following structure:
+
+0. Front page: essentials of what NumPy is (compare e.g. jupyter.org), one or
+ a couple key user stories (compare e.g. julialang.org)
+1. Install
+2. Documentation
+3. Array computing
+4. Community
+5. Learning
+6. About Us
+7. Contribute
+8. Donate
+
+There may be a few other pages, e.g. a page on performance, that are linked
+from one of the main pages.
+
+Stakeholder Content
+~~~~~~~~~~~~~~~~~~~
+
+This should have as little content as possible *within the site*. Somewhere
+on the site we should link out to content that's specific to:
+
+- beginning users (quickstart, tutorial)
+- advanced users
+- educators
+- packagers
+- package authors that depend on NumPy
+- funders (governance, roadmap)
+
+Translation (multilingual / i18n)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+NumPy has users all over the world. Most of those users are not native
+English speakers, and many don't speak English well or at all. Therefore
+having content in multiple languages is potentially addressing a large unmet
+need. It would likely also help make the NumPy project more diverse and
+welcoming.
+
+On the other hand, there are good reasons why few projects have a
+multi-lingual site. It's potentially a lot of extra work. Extra work for
+maintainers is costly - they're already struggling to keep up with the work
+load. Therefore we have to very carefully consider whether a multi-lingual
+site is feasible and weight costs and benefits.
+
+We start with an assertion: maintaining translations of all documentation, or
+even the whole user guide, as part of the NumPy project is not feasible. One
+simply has to look at the volume of our documentation and the frequency with
+which we change it to realize that that's the case. Perhaps it will be
+feasible though to translate just the top-level pages of the website. Those
+do not change very often, and it will be a limited amount of content (order
+of magnitude 5-10 pages of text).
+
+We propose the following requirements for adding a language:
+
+- The language must have a dedicated maintainer
+- There must be a way to validate content changes (e.g. a second
+ maintainer/reviewer, or high quality language support in a freely
+ available machine translation tool)
+- The language must have a reasonable size target audience (to be
+ assessed by the NumPy maintainers)
+
+Furthermore we propose a policy for when to remove support for a language again
+(preferably by hiding it rather than deleting content). This may be done when
+the language no longer has a maintainer, and coverage of translations falls
+below an acceptable threshold (say 80%).
+
+Benefits of having translations include:
+
+- Better serve many existing and potential users
+- Potentially attract a culturally and geographically more diverse set of contributors
+
+The tradeoffs are:
+
+- Cost of maintaining a more complex code base
+- Cost of making decisions about whether or not to add a new language
+- Higher cost to making content changes, creates work for language maintainers
+- Any content change should be rolled out with enough delay to have translations in place
+
+Can we define a small enough set of pages and content that it makes sense to do this?
+Probably yes.
+
+Is there an easy to use tool to maintain translations and add them to the website?
+To be discussed - it needs investigating, and may depend on the choice of static site
+generator. One potential option is Crowdin_, which is free for open source projects.
+
+
+Style and graphic design
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+Beyond the "a modern, clean look" goal we choose to not specify too much. A
+designer may have much better ideas than the authors of this proposal, hence we
+will work with the designer(s) during the implementation phase.
+
+The NumPy logo could use a touch-up. The logo widely recognized and its colors and
+design are good, however the look-and-feel is perhaps a little dated.
+
+
+Other aspects
+~~~~~~~~~~~~~
+
+A search box would be nice to have. The Sphinx documentation already has a
+search box, however a search box on the main site which provides search results
+for the docs, the website, and perhaps other domains that are relevant for
+NumPy would make sense.
+
+
+Backward compatibility
+----------------------
+
+Given a static site generator is chosen, we will migrate away from Sphinx for
+numpy.org (the website, *not including the docs*). The current deployment can
+be preserved until a future deprecation date is decided (potentially based on
+the comfort level of our new site).
+
+All site generators listed above have visibility into the HTML and Javascript
+that is generated, and can continue to be maintained in the event a given
+project ceases to be maintained.
+
+
+Alternatives
+------------
+
+Alternatives we considered for the overall design of the website:
+
+1. *Update current site.* A new Sphinx theme could be chosen. This would likely
+ take the least amount of resources initially, however, Sphinx does not have
+ the features we are looking for moving forward such as i18n, responsive design,
+ and a clean, modern look.
+ Note that updating the docs Sphinx theme is likely still a good idea - it's
+ orthogonal to this NEP though.
+2. *Create custom site.* This would take the most amount of resources, and is
+ likely to have additional benefit in comparison to a static site generator.
+ All features would be able to be added at the cost of developer time.
+
+
+Discussion
+----------
+
+Mailing list thread discussing this NEP: TODO
+
+
+References and Footnotes
+------------------------
+.. _Crowdin: https://crowdin.com/pricing#annual
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0029-deprecation_policy.rst b/doc/neps/nep-0029-deprecation_policy.rst
new file mode 100644
index 000000000..0dea0a96f
--- /dev/null
+++ b/doc/neps/nep-0029-deprecation_policy.rst
@@ -0,0 +1,301 @@
+==================================================================================
+NEP 29 — Recommend Python and Numpy version support as a community policy standard
+==================================================================================
+
+
+:Author: Thomas A Caswell <tcaswell@gmail.com>, Andreas Mueller, Brian Granger, Madicken Munk, Ralf Gommers, Matt Haberland <mhaberla@calpoly.edu>, Matthias Bussonnier <bussonniermatthias@gmail.com>, Stefan van der Walt <stefanv@berkeley.edu>
+:Status: Draft
+:Type: Informational Track
+:Created: 2019-07-13
+
+
+Abstract
+--------
+
+This NEP recommends that all projects across the Scientific
+Python ecosystem adopt a common "time window-based" policy for
+support of Python and NumPy versions. Standardizing a recommendation
+for project support of minimum Python and NumPy versions will improve
+downstream project planning.
+
+This is an unusual NEP in that it offers recommendations for
+community-wide policy and not for changes to NumPy itself. Since a
+common place for SPEEPs (Scientific Python Ecosystem Enhancement
+Proposals) does not exist and given NumPy's central role in the
+ecosystem, a NEP provides a visible place to document the proposed
+policy.
+
+This NEP is being put forward by maintainers of Matplotlib, scikit-learn,
+IPython, Jupyter, yt, SciPy, NumPy, and scikit-image.
+
+
+
+Detailed description
+--------------------
+
+For the purposes of this NEP we assume semantic versioning and define:
+
+*major version*
+ A release that changes the first number (e.g. X.0.0)
+
+*minor version*
+ A release that changes the second number (e.g 1.Y.0)
+
+*patch version*
+ A release that changes the third number (e.g. 1.1.Z)
+
+
+When a project releases a new major or minor version, we recommend that
+they support at least all minor versions of Python
+introduced and released in the prior 42 months *from the
+anticipated release date* with a minimum of 2 minor versions of
+Python, and all minor versions of NumPy released in the prior 24
+months *from the anticipated release date* with a minimum of 3
+minor versions of NumPy.
+
+
+Consider the following timeline::
+
+ Jan 16 Jan 17 Jan 18 Jan 19 Jan 20
+ | | | | |
+ +++++|+++++++++++|+++++++++++|+++++++++++|+++++++++++|++++++++++++
+ | | | |
+ py 3.5.0 py 3.6.0 py 3.7.0 py 3.8.0
+ |-----------------------------------------> Feb19
+ |-----------------------------------------> Dec19
+ |-----------------------------------------> Nov20
+
+It shows the 42 month support windows for Python. A project with a
+major or minor version release in February 2019 should support Python 3.5 and newer,
+a project with a major or minor version released in December 2019 should
+support Python 3.6 and newer, and a project with a major or minor version
+release in November 2020 should support Python 3.7 and newer.
+
+The current Python release cadence is 18 months so a 42 month window
+ensures that there will always be at least two minor versions of Python
+in the window. The window is extended 6 months beyond the anticipated two-release
+interval for Python to provides resilience against small fluctuations /
+delays in its release schedule.
+
+Because Python minor version support is based only on historical
+release dates, a 42 month time window, and a planned project release
+date, one can predict with high confidence when a project will be able
+to drop any given minor version of Python. This, in turn, could save
+months of unnecessary maintenance burden.
+
+If a project releases immediately after a minor version of Python
+drops out of the support window, there will inevitably be some
+mismatch in supported versions—but this situation should only last
+until other projects in the ecosystem make releases.
+
+Otherwise, once a project does a minor or major release, it is
+guaranteed that there will be a stable release of all other projects
+that, at the source level, support the same set of Python versions
+supported by the new release.
+
+If there is a Python 4 or a NumPy 2 this policy will have to be
+reviewed in light of the community's and projects' best interests.
+
+
+Support Table
+~~~~~~~~~~~~~
+
+============ ====== =====
+Date Python NumPy
+------------ ------ -----
+Jan 07, 2020 3.6+ 1.15+
+Jun 23, 2020 3.7+ 1.15+
+Jul 23, 2020 3.7+ 1.16+
+Jan 13, 2021 3.7+ 1.17+
+Jul 26, 2021 3.7+ 1.18+
+Dec 26, 2021 3.8+ 1.18+
+============ ====== =====
+
+
+Drop Schedule
+~~~~~~~~~~~~~
+
+::
+
+ On next release, drop support for Python 3.5 (initially released on Sep 13, 2015)
+ On Jan 07, 2020 drop support for Numpy 1.14 (initially released on Jan 06, 2018)
+ On Jun 23, 2020 drop support for Python 3.6 (initially released on Dec 23, 2016)
+ On Jul 23, 2020 drop support for Numpy 1.15 (initially released on Jul 23, 2018)
+ On Jan 13, 2021 drop support for Numpy 1.16 (initially released on Jan 13, 2019)
+ On Jul 26, 2021 drop support for Numpy 1.17 (initially released on Jul 26, 2019)
+ On Dec 26, 2021 drop support for Python 3.7 (initially released on Jun 27, 2018)
+
+
+Implementation
+--------------
+
+We suggest that all projects adopt the following language into their
+development guidelines:
+
+ This project supports:
+
+ - All minor versions of Python released 42 months prior to the
+ project, and at minimum the two latest minor versions.
+ - All minor versions of ``numpy`` released in the 24 months prior
+ to the project, and at minimum the last three minor versions.
+
+ In ``setup.py``, the ``python_requires`` variable should be set to
+ the minimum supported version of Python. All supported minor
+ versions of Python should be in the test matrix and have binary
+ artifacts built for the release.
+
+ Minimum Python and NumPy version support should be adjusted upward
+ on every major and minor release, but never on a patch release.
+
+
+Backward compatibility
+----------------------
+
+No backward compatibility issues.
+
+Alternatives
+------------
+
+Ad-Hoc version support
+~~~~~~~~~~~~~~~~~~~~~~
+
+A project could, on every release, evaluate whether to increase
+the minimum version of Python supported.
+As a major downside, an ad-hoc approach makes it hard for downstream users to predict what
+the future minimum versions will be. As there is no objective threshold
+to when the minimum version should be dropped, it is easy for these
+version support discussions to devolve into [bike shedding](https://en.wikipedia.org/wiki/Wikipedia:Avoid_Parkinson%27s_bicycle-shed_effect) and acrimony.
+
+
+All CPython supported versions
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The CPython supported versions of Python are listed in the Python
+Developers Guide and the Python PEPs. Supporting these is a very clear
+and conservative approach. However, it means that there exists a four
+year lag between when a new features is introduced into the language
+and when a project is able to use it. Additionally, for projects with
+compiled extensions this requires building many binary artifacts for
+each release.
+
+For the case of NumPy, many projects carry workarounds to bugs that
+are fixed in subsequent versions of NumPy. Being proactive about
+increasing the minimum version of NumPy allows downstream
+packages to carry fewer version-specific patches.
+
+
+
+Default version on Linux distribution
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The policy could be to support the version of Python that ships by
+default in the latest Ubuntu LTS or CentOS/RHEL release. However, we
+would still have to standardize across the community which
+distribution to follow.
+
+By following the versions supported by major Linux distributions, we
+are giving up technical control of our projects to external
+organizations that may have different motivations and concerns than we
+do.
+
+
+N minor versions of Python
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Given the current release cadence of the Python, the proposed time (42
+months) is roughly equivalent to "the last two" Python minor versions.
+However, if Python changes their release cadence substantially, any
+rule based solely on the number of minor releases may need to be
+changed to remain sensible.
+
+A more fundamental problem with a policy based on number of Python
+releases is that it is hard to predict when support for a given minor
+version of Python will be dropped as that requires correctly
+predicting the release schedule of Python for the next 3-4 years. A
+time-based rule, in contrast, only depends on past events
+and the length of the support window.
+
+
+Time window from the X.Y.1 Python release
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+This is equivalent to a few month longer support window from the X.Y.0
+release. This is because X.Y.1 bug-fix release is typically a few
+months after the X.Y.0 release, thus a N month window from X.Y.1 is
+roughly equivalent to a N+3 month from X.Y.0.
+
+The X.Y.0 release is naturally a special release. If we were to
+anchor the window on X.Y.1 we would then have the discussion of why
+not X.Y.M?
+
+
+Discussion
+----------
+
+
+References and Footnotes
+------------------------
+
+Code to generate support and drop schedule tables ::
+
+ from datetime import datetime, timedelta
+
+ data = """Jan 15, 2017: Numpy 1.12
+ Sep 13, 2015: Python 3.5
+ Jun 27, 2018: Python 3.7
+ Dec 23, 2016: Python 3.6
+ Jun 07, 2017: Numpy 1.13
+ Jan 06, 2018: Numpy 1.14
+ Jul 23, 2018: Numpy 1.15
+ Jan 13, 2019: Numpy 1.16
+ Jul 26, 2019: Numpy 1.17
+ """
+
+ releases = []
+
+ plus42 = timedelta(days=int(365*3.5 + 1))
+ plus24 = timedelta(days=int(365*2 + 1))
+
+ for line in data.splitlines():
+ date, project_version = line.split(':')
+ project, version = project_version.strip().split(' ')
+ release = datetime.strptime(date, '%b %d, %Y')
+ if project.lower() == 'numpy':
+ drop = release + plus24
+ else:
+ drop = release + plus42
+ releases.append((drop, project, version, release))
+
+ releases = sorted(releases, key=lambda x: x[0])
+
+ minpy = '3.8+'
+ minnum = '1.18+'
+
+ toprint_drop_dates = ['']
+ toprint_support_table = []
+ for d, p, v, r in releases[::-1]:
+ df = d.strftime('%b %d, %Y')
+ toprint_drop_dates.append(
+ f'On {df} drop support for {p} {v} '
+ f'(initially released on {r.strftime("%b %d, %Y")})')
+ toprint_support_table.append(f'{df} {minpy:<6} {minnum:<5}')
+ if p.lower() == 'numpy':
+ minnum = v+'+'
+ else:
+ minpy = v+'+'
+
+ for e in toprint_drop_dates[::-1]:
+ print(e)
+
+ print('============ ====== =====')
+ print('Date Python NumPy')
+ print('------------ ------ -----')
+ for e in toprint_support_table[::-1]:
+ print(e)
+ print('============ ====== =====')
+
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0030-duck-array-protocol.rst b/doc/neps/nep-0030-duck-array-protocol.rst
new file mode 100644
index 000000000..353c5df1e
--- /dev/null
+++ b/doc/neps/nep-0030-duck-array-protocol.rst
@@ -0,0 +1,183 @@
+======================================================
+NEP 30 — Duck Typing for NumPy Arrays - Implementation
+======================================================
+
+:Author: Peter Andreas Entschev <pentschev@nvidia.com>
+:Author: Stephan Hoyer <shoyer@google.com>
+:Status: Draft
+:Type: Standards Track
+:Created: 2019-07-31
+:Updated: 2019-07-31
+:Resolution:
+
+Abstract
+--------
+
+We propose the ``__duckarray__`` protocol, following the high-level overview
+described in NEP 22, allowing downstream libraries to return arrays of their
+defined types, in contrast to ``np.asarray``, that coerces those ``array_like``
+objects to NumPy arrays.
+
+Detailed description
+--------------------
+
+NumPy's API, including array definitions, is implemented and mimicked in
+countless other projects. By definition, many of those arrays are fairly
+similar in how they operate to the NumPy standard. The introduction of
+``__array_function__`` allowed dispathing of functions implemented by several
+of these projects directly via NumPy's API. This introduces a new requirement,
+returning the NumPy-like array itself, rather than forcing a coercion into a
+pure NumPy array.
+
+For the purpose above, NEP 22 introduced the concept of duck typing to NumPy
+arrays. The suggested solution described in the NEP allows libraries to avoid
+coercion of a NumPy-like array to a pure NumPy array where necessary, while
+still allowing that NumPy-like array libraries that do not wish to implement
+the protocol to coerce arrays to a pure Numpy array via ``np.asarray``.
+
+Usage Guidance
+~~~~~~~~~~~~~~
+
+Code that uses np.duckarray is meant for supporting other ndarray-like objects
+that "follow the NumPy API". That is an ill-defined concept at the moment --
+every known library implements the NumPy API only partly, and many deviate
+intentionally in at least some minor ways. This cannot be easily remedied, so
+for users of ``__duckarray__`` we recommend the following strategy: check if the
+NumPy functionality used by the code that follows your use of ``__duckarray__``
+is present in Dask, CuPy and Sparse. If so, it's reasonable to expect any duck
+array to work here. If not, we suggest you indicate in your docstring what kinds
+of duck arrays are accepted, or what properties they need to have.
+
+To exemplify the usage of duck arrays, suppose one wants to take the ``mean()``
+of an array-like object ``arr``. Using NumPy to achieve that, one could write
+``np.asarray(arr).mean()`` to achieve the intended result. However, libraries
+may expect ``arr`` to be a NumPy-like array, and at the same time, the array may
+or may not be an object compliant to the NumPy API (either in full or partially)
+such as a CuPy, Sparse or a Dask array. In the case where ``arr`` is already an
+object compliant to the NumPy API, we would simply return it (and prevent it
+from being coerced into a pure NumPy array), otherwise, it would then be coerced
+into a NumPy array.
+
+Implementation
+--------------
+
+The implementation idea is fairly straightforward, requiring a new function
+``duckarray`` to be introduced in NumPy, and a new method ``__duckarray__`` in
+NumPy-like array classes. The new ``__duckarray__`` method shall return the
+downstream array-like object itself, such as the ``self`` object. If appropriate,
+an ``__array__`` method may be implemented that returns a NumPy array or possibly
+raise a ``TypeError`` with a helpful message.
+
+The new NumPy ``duckarray`` function can be implemented as follows:
+
+.. code:: python
+
+ def duckarray(array_like):
+ if hasattr(array_like, '__duckarray__'):
+ return array_like.__duckarray__()
+ return np.asarray(array_like)
+
+Example for a project implementing NumPy-like arrays
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Now consider a library that implements a NumPy-compatible array class called
+``NumPyLikeArray``, this class shall implement the methods described above, and
+a complete implementation would look like the following:
+
+.. code:: python
+
+ class NumPyLikeArray:
+ def __duckarray__(self):
+ return self
+
+ def __array__(self):
+ return TypeError("NumPyLikeArray can not be converted to a numpy array. "
+ "You may want to use np.duckarray.")
+
+The implementation above exemplifies the simplest case, but the overall idea
+is that libraries will implement a ``__duckarray__`` method that returns the
+original object, and an ``__array__`` method that either creates and returns an
+appropriate NumPy array, or raises a``TypeError`` to prevent unintentional use
+as an object in a NumPy array (if ``np.asarray`` is called on an arbitrary
+object that does not implement ``__array__``, it will create a NumPy array
+scalar).
+
+In case of existing libraries that don't already implement ``__array__`` but
+would like to use duck array typing, it is advised that they introduce
+both ``__array__`` and``__duckarray__`` methods.
+
+Usage
+-----
+
+An example of how the ``__duckarray__`` protocol could be used to write a
+``stack`` function based on ``concatenate``, and its produced outcome, can be
+seen below. The example here was chosen not only to demonstrate the usage of
+the ``duckarray`` function, but also to demonstrate its dependency on the NumPy
+API, demonstrated by checks on the array's ``shape`` attribute. Note that the
+example is merely a simplified version of NumPy's actualy implementation of
+``stack`` working on the first axis, and it is assumed that Dask has implemented
+the ``__duckarray__`` method.
+
+.. code:: python
+
+ def duckarray_stack(arrays):
+ arrays = [np.duckarray(arr) for arr in arrays]
+
+ shapes = {arr.shape for arr in arrays}
+ if len(shapes) != 1:
+ raise ValueError('all input arrays must have the same shape')
+
+ expanded_arrays = [arr[np.newaxis, ...] for arr in arrays]
+ return np.concatenate(expanded_arrays, axis=0)
+
+ dask_arr = dask.array.arange(10)
+ np_arr = np.arange(10)
+ np_like = list(range(10))
+
+ duckarray_stack((dask_arr, dask_arr)) # Returns dask.array
+ duckarray_stack((dask_arr, np_arr)) # Returns dask.array
+ duckarray_stack((dask_arr, np_like)) # Returns dask.array
+
+In contrast, using only ``np.asarray`` (at the time of writing of this NEP, this
+is the usual method employed by library developers to ensure arrays are
+NumPy-like) has a different outcome:
+
+.. code:: python
+
+ def asarray_stack(arrays):
+ arrays = [np.asanyarray(arr) for arr in arrays]
+
+ # The remaining implementation is the same as that of
+ # ``duckarray_stack`` above
+
+ asarray_stack((dask_arr, dask_arr)) # Returns np.ndarray
+ asarray_stack((dask_arr, np_arr)) # Returns np.ndarray
+ asarray_stack((dask_arr, np_like)) # Returns np.ndarray
+
+Backward compatibility
+----------------------
+
+This proposal does not raise any backward compatibility issues within NumPy,
+given that it only introduces a new function. However, downstream libraries
+that opt to introduce the ``__duckarray__`` protocol may choose to remove the
+ability of coercing arrays back to a NumPy array via ``np.array`` or
+``np.asarray`` functions, preventing unintended effects of coercion of such
+arrays back to a pure NumPy array (as some libraries already do, such as CuPy
+and Sparse), but still leaving libraries not implementing the protocol with the
+choice of utilizing ``np.duckarray`` to promote ``array_like`` objects to pure
+NumPy arrays.
+
+Previous proposals and discussion
+---------------------------------
+
+The duck typing protocol proposed here was described in a high level in
+`NEP 22 <https://numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html>`_.
+
+Additionally, longer discussions about the protocol and related proposals
+took place in
+`numpy/numpy #13831 <https://github.com/numpy/numpy/issues/13831>`_
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-0032-remove-financial-functions.rst b/doc/neps/nep-0032-remove-financial-functions.rst
new file mode 100644
index 000000000..a78b11fea
--- /dev/null
+++ b/doc/neps/nep-0032-remove-financial-functions.rst
@@ -0,0 +1,214 @@
+==================================================
+NEP 32 — Remove the financial functions from NumPy
+==================================================
+
+:Author: Warren Weckesser <warren.weckesser@gmail.com>
+:Status: Accepted
+:Type: Standards Track
+:Created: 2019-08-30
+:Resolution: https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html
+
+
+Abstract
+--------
+
+We propose deprecating and ultimately removing the financial functions [1]_
+from NumPy. The functions will be moved to an independent repository,
+and provided to the community as a separate package with the name
+``numpy_financial``.
+
+
+Motivation and scope
+--------------------
+
+The NumPy financial functions [1]_ are the 10 functions ``fv``, ``ipmt``,
+``irr``, ``mirr``, ``nper``, ``npv``, ``pmt``, ``ppmt``, ``pv`` and ``rate``.
+The functions provide elementary financial calculations such as future value,
+net present value, etc. These functions were added to NumPy in 2008 [2]_.
+
+In May, 2009, a request by Joe Harrington to add a function called ``xirr`` to
+the financial functions triggered a long thread about these functions [3]_.
+One important point that came up in that thread is that a "real" financial
+library must be able to handle real dates. The NumPy financial functions do
+not work with actual dates or calendars. The preference for a more capable
+library independent of NumPy was expressed several times in that thread.
+
+In June, 2009, D. L. Goldsmith expressed concerns about the correctness of the
+implementations of some of the financial functions [4]_. It was suggested then
+to move the financial functions out of NumPy to an independent package.
+
+In a GitHub issue in 2013 [5]_, Nathaniel Smith suggested moving the financial
+functions from the top-level namespace to ``numpy.financial``. He also
+suggested giving the functions better names. Responses at that time included
+the suggestion to deprecate them and move them from NumPy to a separate
+package. This issue is still open.
+
+Later in 2013 [6]_, it was suggested on the mailing list that these functions
+be removed from NumPy.
+
+The arguments for the removal of these functions from NumPy:
+
+* They are too specialized for NumPy.
+* They are not actually useful for "real world" financial calculations, because
+ they do not handle real dates and calendars.
+* The definition of "correctness" for some of these functions seems to be a
+ matter of convention, and the current NumPy developers do not have the
+ background to judge their correctness.
+* There has been little interest among past and present NumPy developers
+ in maintaining these functions.
+
+The main arguments for keeping the functions in NumPy are:
+
+* Removing these functions will be disruptive for some users. Current users
+ will have to add the new ``numpy_financial`` package to their dependencies,
+ and then modify their code to use the new package.
+* The functions provided, while not "industrial strength", are apparently
+ similar to functions provided by spreadsheets and some calculators. Having
+ them available in NumPy makes it easier for some developers to migrate their
+ software to Python and NumPy.
+
+It is clear from comments in the mailing list discussions and in the GitHub
+issues that many current NumPy developers believe the benefits of removing
+the functions outweigh the costs. For example, from [5]_::
+
+ The financial functions should probably be part of a separate package
+ -- Charles Harris
+
+ If there's a better package we can point people to we could just deprecate
+ them and then remove them entirely... I'd be fine with that too...
+ -- Nathaniel Smith
+
+ +1 to deprecate them. If no other package exists, it can be created if
+ someone feels the need for that.
+ -- Ralf Gommers
+
+ I feel pretty strongly that we should deprecate these. If nobody on numpy’s
+ core team is interested in maintaining them, then it is purely a drag on
+ development for NumPy.
+ -- Stephan Hoyer
+
+And from the 2013 mailing list discussion, about removing the functions from
+NumPy::
+
+ I am +1 as well, I don't think they should have been included in the first
+ place.
+ -- David Cournapeau
+
+But not everyone was in favor of removal::
+
+ The fin routines are tiny and don't require much maintenance once
+ written. If we made an effort (putting up pages with examples of common
+ financial calculations and collecting those under a topical web page,
+ then linking to that page from various places and talking it up), I
+ would think they could attract users looking for a free way to play with
+ financial scenarios. [...]
+ So, I would say we keep them. If ours are not the best, we should bring
+ them up to snuff.
+ -- Joe Harrington
+
+For an idea of the maintenance burden of the financial functions, one can
+look for all the GitHub issues [7]_ and pull requests [8]_ that have the tag
+``component: numpy.lib.financial``.
+
+One method for measuring the effect of removing these functions is to find
+all the packages on GitHub that use them. Such a search can be performed
+with the ``python-api-inspect`` service [9]_. A search for all uses of the
+NumPy financial functions finds just eight repositories. (See the comments
+in [5]_ for the actual SQL query.)
+
+
+Implementation
+--------------
+
+* Create a new Python package, ``numpy_financial``, to be maintained in the
+ top-level NumPy github organization. This repository will contain the
+ definitions and unit tests for the financial functions. The package will
+ be added to PyPI so it can be installed with ``pip``.
+* Deprecate the financial functions in the ``numpy`` namespace, beginning in
+ NumPy version 1.18. Remove the financial functions from NumPy version 1.20.
+
+
+Backward compatibility
+----------------------
+
+The removal of these functions breaks backward compatibility, as explained
+earlier. The effects are mitigated by providing the ``numpy_financial``
+library.
+
+
+Alternatives
+------------
+
+The following alternatives were mentioned in [5]_:
+
+* *Maintain the functions as they are (i.e. do nothing).*
+ A review of the history makes clear that this is not the preference of many
+ NumPy developers. A recurring comment is that the functions simply do not
+ belong in NumPy. When that sentiment is combined with the history of bug
+ reports and the ongoing questions about the correctness of the functions, the
+ conclusion is that the cleanest solution is deprecation and removal.
+* *Move the functions from the ``numpy`` namespace to ``numpy.financial``.*
+ This was the initial suggestion in [5]_. Such a change does not address the
+ maintenance issues, and doesn't change the misfit that many developers see
+ between these functions and NumPy. It causes disruption for the current
+ users of these functions without addressing what many developers see as the
+ fundamental problem.
+
+
+Discussion
+----------
+
+Links to past mailing list discussions, and to relevant GitHub issues and pull
+requests, have already been given. The announcement of this NEP was made on
+the NumPy-Discussion mailing list on 3 September 2019 [10]_, and on the
+PyData mailing list on 8 September 2019 [11]_. The formal proposal to accept
+the NEP was made on 19 September 2019 [12]_; a notification was also sent to
+PyData (same thread as [11]_). There have been no substantive objections.
+
+
+References and footnotes
+------------------------
+
+.. [1] Financial functions,
+ https://numpy.org/doc/1.17/reference/routines.financial.html
+
+.. [2] Numpy-discussion mailing list, "Simple financial functions for NumPy",
+ https://mail.python.org/pipermail/numpy-discussion/2008-April/032353.html
+
+.. [3] Numpy-discussion mailing list, "add xirr to numpy financial functions?",
+ https://mail.python.org/pipermail/numpy-discussion/2009-May/042645.html
+
+.. [4] Numpy-discussion mailing list, "Definitions of pv, fv, nper, pmt, and rate",
+ https://mail.python.org/pipermail/numpy-discussion/2009-June/043188.html
+
+.. [5] Get financial functions out of main namespace,
+ https://github.com/numpy/numpy/issues/2880
+
+.. [6] Numpy-discussion mailing list, "Deprecation of financial routines",
+ https://mail.python.org/pipermail/numpy-discussion/2013-August/067409.html
+
+.. [7] ``component: numpy.lib.financial`` issues,
+ https://github.com/numpy/numpy/issues?utf8=%E2%9C%93&q=is%3Aissue+label%3A%22component%3A+numpy.lib.financial%22+
+
+.. [8] ``component: numpy.lib.financial`` pull requests,
+ https://github.com/numpy/numpy/pulls?utf8=%E2%9C%93&q=is%3Apr+label%3A%22component%3A+numpy.lib.financial%22+
+
+.. [9] Quansight-Labs/python-api-inspect,
+ https://github.com/Quansight-Labs/python-api-inspect/
+
+.. [10] Numpy-discussion mailing list, "NEP 32: Remove the financial functions
+ from NumPy"
+ https://mail.python.org/pipermail/numpy-discussion/2019-September/079965.html
+
+.. [11] PyData mailing list (pydata@googlegroups.com), "NumPy proposal to
+ remove the financial functions.
+ https://mail.google.com/mail/u/0/h/1w0mjgixc4rpe/?&th=16d5c38be45f77c4&q=nep+32&v=c&s=q
+
+.. [12] Numpy-discussion mailing list, "Proposal to accept NEP 32: Remove the
+ financial functions from NumPy"
+ https://mail.python.org/pipermail/numpy-discussion/2019-September/080074.html
+
+Copyright
+---------
+
+This document has been placed in the public domain.
diff --git a/doc/neps/nep-template.rst b/doc/neps/nep-template.rst
index e869ebae3..c3d34ea46 100644
--- a/doc/neps/nep-template.rst
+++ b/doc/neps/nep-template.rst
@@ -1,6 +1,6 @@
-=============================
-NEP Template and Instructions
-=============================
+=================================
+NEP X — Template and Instructions
+=================================
:Author: <list of authors' real names and optionally, email addresses>
:Status: <Draft | Active | Accepted | Deferred | Rejected | Withdrawn | Final | Superseded>
@@ -8,19 +8,37 @@ NEP Template and Instructions
:Created: <date created on, in yyyy-mm-dd format>
:Resolution: <url> (required for Accepted | Rejected | Withdrawn)
+
Abstract
--------
The abstract should be a short description of what the NEP will achieve.
+Note that the — in the title is an elongated dash, not -.
+
+Motivation and Scope
+--------------------
+
+This section describes the need for the proposed change. It should describe
+the existing problem, who it affects, what it is trying to solve, and why.
+This section should explicitly address the scope of and key requirements for
+the proposed change.
+
Detailed description
--------------------
-This section describes the need for the NEP. It should describe the existing
-problem that it is trying to solve and why this NEP makes the situation better.
-It should include examples of how the new functionality would be used and
-perhaps some use cases.
+This section should provide a detailed description of the proposed change.
+It should include examples of how the new functionality would be used,
+intended use-cases and pseudo-code illustrating its use.
+
+
+Related Work
+------------
+
+This section should list relevant and/or similar technologies, possibly in other
+libraries. It does not need to be comprehensive, just list the major examples of
+prior and relevant art.
Implementation
@@ -28,8 +46,8 @@ Implementation
This section lists the major steps required to implement the NEP. Where
possible, it should be noted where one step is dependent on another, and which
-steps may be optionally omitted. Where it makes sense, each step should
-include a link related pull requests as the implementation progresses.
+steps may be optionally omitted. Where it makes sense, each step should
+include a link to related pull requests as the implementation progresses.
Any pull requests or development branches containing work on this NEP should
be linked to from here. (A NEP does not need to be implemented in a single
diff --git a/doc/neps/roadmap.rst b/doc/neps/roadmap.rst
index a45423711..2ec0b7520 100644
--- a/doc/neps/roadmap.rst
+++ b/doc/neps/roadmap.rst
@@ -6,74 +6,78 @@ This is a live snapshot of tasks and features we will be investing resources
in. It may be used to encourage and inspire developers and to search for
funding.
-Interoperability protocols & duck typing
-----------------------------------------
-
-- `__array_function__`
-
- See `NEP 18`_ and a sample implementation_
-
-- Array Duck-Typing
-
- `NEP 22`_ `np.asduckarray()`
-
-- Mixins like `NDArrayOperatorsMixin`:
+Interoperability
+----------------
+
+We aim to make it easier to interoperate with NumPy. There are many NumPy-like
+packages that add interesting new capabilities to the Python ecosystem, as well
+as many libraries that extend NumPy's model in various ways. Work in NumPy to
+facilitate interoperability with all such packages, and the code that uses them,
+may include (among other things) interoperability protocols, better duck typing
+support and ndarray subclass handling.
+
+- The ``__array_function__`` protocol is currently experimental and needs to be
+ matured. See `NEP 18`_ for details.
+- New protocols for overriding other functionality in NumPy may be needed.
+- Array duck typing, or handling "duck arrays", needs improvements. See
+ `NEP 22`_ for details.
+
+Extensibility
+-------------
- - for mutable arrays
- - for reduction methods implemented as ufuncs
+We aim to make it much easier to extend NumPy. The primary topic here is to
+improve the dtype system.
-Better dtypes
--------------
+- Easier custom dtypes:
-- Easier custom dtypes
- Simplify and/or wrap the current C-API
- More consistent support for dtype metadata
- Support for writing a dtype in Python
-- New string dtype(s):
- - Encoded strings with fixed-width storage (utf8, latin1, ...) and/or
- - Variable length strings (could share implementation with dtype=object, but are explicitly type-checked)
- - One of these should probably be the default for text data. The current behavior on Python 3 is neither efficient nor user friendly.
-- `np.int` should not be platform dependent
-- better coercion for string + number
-Random number generation policy & rewrite
------------------------------------------
+- New string dtype(s):
-`NEP 19`_ and a `reference implementation`_
+ - Encoded strings with fixed-width storage (utf8, latin1, ...) and/or
+ - Variable length strings (could share implementation with dtype=object,
+ but are explicitly type-checked)
+ - One of these should probably be the default for text data. The current
+ behavior on Python 3 is neither efficient nor user friendly.
-Indexing
---------
+- `np.int` should not be platform dependent
+- Better coercion for string + number
-vindex/oindex `NEP 21`_
+Performance
+-----------
-Infrastructure
---------------
+We want to further improve NumPy's performance, through:
-NumPy is much more than just the code base itself, we also maintain
-docs, CI, benchmarks, etc.
+- Better use of SIMD instructions, also on platforms other than x86.
+- Reducing ufunc overhead.
+- Optimizations in individual functions.
-- Rewrite numpy.org
-- Benchmarking: improve the extent of the existing suite, and run & render
- the results as part of the docs or website.
+Furthermore we would like to improve the benchmarking system, in terms of coverage,
+easy of use, and publication of the results (now
+`here <https://pv.github.io/numpy-bench>`__) as part of the docs or website.
- - Hardware: find a machine that can reliably run serial benchmarks
- - ASV produces graphs, could we set up a site? Currently at
- https://pv.github.io/numpy-bench/, should that become a community resource?
+Website and documentation
+-------------------------
-Functionality outside core
---------------------------
+Our website (https://numpy.org) is in very poor shape and needs to be rewritten
+completely.
-Some things inside NumPy do not actually match the `Scope of NumPy`.
+The NumPy `documentation <https://www.numpy.org/devdocs/user/index.html>`__ is
+of varying quality - in particular the User Guide needs major improvements.
-- A backend system for `numpy.fft` (so that e.g. `fft-mkl` doesn't need to monkeypatch numpy)
+Random number generation policy & rewrite
+-----------------------------------------
-- Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project?
-- MaskedArray as a duck-array type, and/or
-- dtypes that support missing values
+A new random number generation framework with higher performance generators is
+close to completion, see `NEP 19`_ and `PR 13163`_.
-- Write a strategy on how to deal with overlap between numpy and scipy for `linalg` and `fft` (and implement it).
+Indexing
+--------
-- Deprecate `np.matrix`
+We intend to add new indexing modes for "vectorized indexing" and "outer indexing",
+see `NEP 21`_.
Continuous Integration
----------------------
@@ -81,31 +85,25 @@ Continuous Integration
We depend on CI to discover problems as we continue to develop NumPy before the
code reaches downstream users.
-- CI for more exotic platforms (e.g. ARM is now available from
- http://www.shippable.com/, but it is not free).
+- CI for more exotic platforms (if available as a service).
- Multi-package testing
- Add an official channel for numpy dev builds for CI usage by other projects so
they may confirm new builds do not break their package.
-Typing
-------
+Other functionality
+-------------------
-Python type annotation syntax should support ndarrays and dtypes.
+- ``MaskedArray`` needs to be improved, ideas include:
-- Type annotations for NumPy: github.com/numpy/numpy-stubs
-- Support for typing shape and dtype in multi-dimensional arrays in Python more generally
-
-NumPy scalars
--------------
+ - Rewrite masked arrays to not be a ndarray subclass -- maybe in a separate project?
+ - MaskedArray as a duck-array type, and/or
+ - dtypes that support missing values
-Numpy has both scalars and zero-dimensional arrays.
+- A backend system for ``numpy.fft`` (so that e.g. ``fft-mkl`` doesn't need to monkeypatch numpy)
+- Write a strategy on how to deal with overlap between NumPy and SciPy for ``linalg``
+ and ``fft`` (and implement it).
+- Deprecate ``np.matrix`` (very slowly)
-- The current implementation adds a large maintenance burden -- can we remove
- scalars and/or simplify it internally?
-- Zero dimensional arrays get converted into scalars by most NumPy
- functions (i.e., output of `np.sin(x)` depends on whether `x` is
- zero-dimensional or not). This inconsistency should be addressed,
- so that one could, e.g., write sane type annotations.
.. _`NEP 19`: https://www.numpy.org/neps/nep-0019-rng-policy.html
.. _`NEP 22`: http://www.numpy.org/neps/nep-0022-ndarray-duck-typing-overview.html
@@ -113,3 +111,4 @@ Numpy has both scalars and zero-dimensional arrays.
.. _implementation: https://gist.github.com/shoyer/1f0a308a06cd96df20879a1ddb8f0006
.. _`reference implementation`: https://github.com/bashtage/randomgen
.. _`NEP 21`: https://www.numpy.org/neps/nep-0021-advanced-indexing.html
+.. _`PR 13163`: https://github.com/numpy/numpy/pull/13163
diff --git a/doc/records.rst.txt b/doc/records.rst.txt
index a608880d7..3c0d55216 100644
--- a/doc/records.rst.txt
+++ b/doc/records.rst.txt
@@ -50,7 +50,7 @@ New possibilities for the "data-type"
**Dictionary (keys "names", "titles", and "formats")**
- This will be converted to a ``PyArray_VOID`` type with corresponding
+ This will be converted to a ``NPY_VOID`` type with corresponding
fields parameter (the formats list will be converted to actual
``PyArray_Descr *`` objects).
@@ -58,10 +58,10 @@ New possibilities for the "data-type"
**Objects (anything with an .itemsize and .fields attribute)**
If its an instance of (a sub-class of) void type, then a new
``PyArray_Descr*`` structure is created corresponding to its
- typeobject (and ``PyArray_VOID``) typenumber. If the type is
+ typeobject (and ``NPY_VOID``) typenumber. If the type is
registered, then the registered type-number is used.
- Otherwise a new ``PyArray_VOID PyArray_Descr*`` structure is created
+ Otherwise a new ``NPY_VOID PyArray_Descr*`` structure is created
and filled ->elsize and ->fields filled in appropriately.
The itemsize attribute must return a number > 0. The fields
diff --git a/doc/release/1.16.0-notes.rst b/doc/release/1.16.0-notes.rst
deleted file mode 100644
index 13ea24cfd..000000000
--- a/doc/release/1.16.0-notes.rst
+++ /dev/null
@@ -1,217 +0,0 @@
-==========================
-NumPy 1.16.0 Release Notes
-==========================
-
-This NumPy release is the last one to support Python 2.7. It will be maintained
-as a long term release with bug fixes only through 2020. To that end, the
-planned code reorganization detailed in NEP-0015 has been made in order to
-facilitate backporting fixes from future releases, which will now have the
-same code organization.
-
-Support for Python 3.4 been dropped in this release, the supported Python
-versions are 2.7 and 3.5-3.7. The wheels are linked with OpenBLAS v0.3.0 .
-
-
-Highlights
-==========
-
-
-New functions
-=============
-
-
-Deprecations
-============
-
-`typeNA` and `sctypeNA` have been deprecated
---------------------------------------------
-
-The type dictionaries `numpy.core.typeNA` and `numpy.core.sctypeNA` were buggy
-and not documented. They will be removed in the 1.18 release. Use
-`numpy.sctypeDict` instead.
-
-
-``np.PackageLoader`` and ``np.pkgload`` have been removed
----------------------------------------------------------
-These were deprecated in 1.10, had no tests, and seem to no longer work in
-1.15 anyway.
-
-
-Future Changes
-==============
-
-* NumPy 1.17 will drop support for Python 2.7.
-
-Expired deprecations
-====================
-
-* NaT comparisons now return ``False`` without a warning, finishing a
- deprecation cycle begun in NumPy 1.11.
-
-Compatibility notes
-===================
-
-f2py script on Windows
-----------------------
-On Windows, the installed script for running f2py is now an ``.exe`` file
-rather than a ``*.py`` file and should be run from the command line as ``f2py``
-whenever the ``Scripts`` directory is in the path. Folks needing compatibility
-with earler versions of Numpy should run ``f2py`` as a module: ``python -m
-numpy.f2py [...]``.
-
-NaT comparisons
----------------
-Consistent with the behavior of NaN, all comparisons other than inequality
-checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
-return ``False``, and inequality checks with NaT now always return ``True``.
-This includes comparisons beteween NaT values. For compatibility with the
-old behavior, use ``np.isnat`` to explicitly check for NaT or convert
-datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
-comparisons.
-
-complex64/128 alignment has changed
------------------------------------
-The memory alignment of complex types is now the same as a C-struct composed of
-two floating point values, while before it was equal to the size of the type.
-For many users (for instance on x64/unix/gcc) this means that complex64 is now
-4-byte aligned instead of 8-byte aligned. An important consequence is that
-aligned structured dtypes may now have a different size. For instance,
-``np.dtype('c8,u1', align=True)`` used to have an itemsize of 16 (on x64/gcc)
-but now it is 12.
-
-More in detail, the complex64 type now has the same alignment as a C-struct
-``struct {float r, i;}``, according to the compiler used to compile numpy, and
-similarly for the complex128 and complex256 types.
-
-
-C API changes
-=============
-
-
-New Features
-============
-
-``max_rows`` keyword added for ``np.loadtxt``
----------------------------------------------
-New keyword ``max_rows`` in `numpy.loadtxt` sets the maximum rows of the
-content to be read after ``skiprows``, as in `numpy.genfromtxt`.
-
-
-Improvements
-============
-
-build shell independence
-------------------------
-NumPy builds should no longer interact with the host machine
-shell directly. ``exec_command`` has been replaced with
-``subprocess.check_output`` where appropriate.
-
-
-`np.polynomial.Polynomial` classes render in LaTeX in Jupyter notebooks
------------------------------------------------------------------------
-
-When used in a front-end that supports it, `Polynomial` instances are now
-rendered through LaTeX. The current format is experimental, and is subject to
-change.
-
-``randint`` and ``choice`` now work on empty distributions
-----------------------------------------------------------
-Even when no elements needed to be drawn, ``np.random.randint`` and
-``np.random.choice`` raised an error when the arguments described an empty
-distribution. This has been fixed so that e.g.
-``np.random.choice([], 0) == np.array([], dtype=float64)``.
-
-``linalg.lstsq`` and ``linalg.qr`` now work with empty matrices
----------------------------------------------------------------
-Previously, a ``LinAlgError`` would be raised when an empty matrix/empty
-matrices (with zero rows and/or columns) is/are passed in. Now outputs of
-appropriate shapes are returned.
-
-``np.diff`` Added kwargs prepend and append
--------------------------------------------
-Add kwargs prepend and append, allowing for values to be inserted
-on either end of the differences. Similar to options for ediff1d.
-Allows for the inverse of cumsum easily via prepend=0
-
-ARM support updated
--------------------
-Support for ARM CPUs has been updated to accommodate 32 and 64 bit targets,
-and also big and little endian byte ordering. AARCH32 memory alignment issues
-have been addressed.
-
-Appending to build flags
-------------------------
-`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and
-other similar such environment variables for compiling Fortran extensions.
-Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the
-behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`,
-`F90FLAGS`, `FREEFLAGS`, `FOPT`, `FDEBUG`, and `FFLAGS`. See gh-11525 for more
-details.
-
-``np.clip`` and the ``clip`` method check for memory overlap
-------------------------------------------------------------
-The ``out`` argument to these functions is now always tested for memory overlap
-to avoid corrupted results when memory overlap occurs.
-
-Detailed docstrings for scalar numeric types
---------------------------------------------
-The ``help`` function, when applied to numeric types such as `np.intc`,
-`np.int_`, and `np.longlong`, now lists all of the aliased names for that type,
-distinguishing between platform -dependent and -independent aliases.
-
-Large allocations marked as suitable for transparent hugepages
---------------------------------------------------------------
-On systems that support transparent hugepages over the madvise system call
-numpy now marks that large memory allocations can be backed by hugepages which
-reduces page fault overhead and can in some fault heavy cases improve
-performance significantly.
-On Linux for huge pages to be used the setting
-`/sys/kernel/mm/transparent_hugepage/enabled` must be at least `madvise`.
-Systems which already have it set to `always` will not see much difference as
-the kernel will automatically use huge pages where appropriate.
-
-Users of very old Linux kernels (~3.x and older) should make sure that
-`/sys/kernel/mm/transparent_hugepage/defrag` is not set to `always` to avoid
-performance problems due concurrency issues in the memory defragmentation.
-
-
-Changes
-=======
-
-Comparison ufuncs will now error rather than return NotImplemented
-------------------------------------------------------------------
-Previously, comparison ufuncs such as ``np.equal`` would return
-`NotImplemented` if their arguments had structured dtypes, to help comparison
-operators such as ``__eq__`` deal with those. This is no longer needed, as the
-relevant logic has moved to the comparison operators proper (which thus do
-continue to return `NotImplemented` as needed). Hence, like all other ufuncs,
-the comparison ufuncs will now error on structured dtypes.
-
-Positive will now raise a deprecation warning for non-numerical arrays
-----------------------------------------------------------------------
-Previously, ``+array`` unconditionally returned a copy. Now, it will
-raise a ``DeprecationWarning`` if the array is not numerical (i.e.,
-if ``np.positive(array)`` raises a ``TypeError``. For ``ndarray``
-subclasses that override the default ``__array_ufunc__`` implementation,
-the ``TypeError`` is passed on.
-
-``maximum`` and ``minimum`` set invalid float status for more dtypes
---------------------------------------------------------------------
-Previously only ``float32`` and ``float64`` set invalid float status (by
-default emitting a `RuntimeWarning`) when a Nan is encountered in
-`numpy.maximum` and `numpy.minimum`. Now ``float16``, ``complex64``,
-``complex128`` and ``complex256`` will do so as well.
-
-Umath and multiarray c-extension modules merged into a single module
---------------------------------------------------------------------
-The two modules were merged, according to the first step in `NEP 15`_.
-Previously `np.core.umath` and `np.core.multiarray` were the c-extension
-modules, they are now python wrappers to the single `np.core/_multiarray_math`
-c-extension module.
-
-``getfield`` validity checks extended
-----------------------------------------
-`numpy.ndarray.getfield` now checks the dtype and offset arguments to prevent
-accessing invalid memory locations.
-
-.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
diff --git a/doc/release/time_based_proposal.rst b/doc/release/time_based_proposal.rst
deleted file mode 100644
index 2eb13562d..000000000
--- a/doc/release/time_based_proposal.rst
+++ /dev/null
@@ -1,129 +0,0 @@
-.. vim:syntax=rst
-
-Introduction
-============
-
-This document proposes some enhancements for numpy and scipy releases.
-Successive numpy and scipy releases are too far apart from a time point of
-view - some people who are in the numpy release team feel that it cannot
-improve without a bit more formal release process. The main proposal is to
-follow a time-based release, with expected dates for code freeze, beta and rc.
-The goal is two folds: make release more predictable, and move the code forward.
-
-Rationale
-=========
-
-Right now, the release process of numpy is relatively organic. When some
-features are there, we may decide to make a new release. Because there is not
-fixed schedule, people don't really know when new features and bug fixes will
-go into a release. More significantly, having an expected release schedule
-helps to *coordinate* efforts: at the beginning of a cycle, everybody can jump
-in and put new code, even break things if needed. But after some point, only
-bug fixes are accepted: this makes beta and RC releases much easier; calming
-things down toward the release date helps focusing on bugs and regressions
-
-Proposal
-========
-
-Time schedule
--------------
-
-The proposed schedule is to release numpy every 9 weeks - the exact period can
-be tweaked if it ends up not working as expected. There will be several stages
-for the cycle:
-
- * Development: anything can happen (by anything, we mean as currently
- done). The focus is on new features, refactoring, etc...
-
- * Beta: no new features. No bug fixing which requires heavy changes.
- regression fixes which appear on supported platforms and were not
- caught earlier.
-
- * Polish/RC: only docstring changes and blocker regressions are allowed.
-
-The schedule would be as follows:
-
- +------+-----------------+-----------------+------------------+
- | Week | 1.3.0 | 1.4.0 | Release time |
- +======+=================+=================+==================+
- | 1 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 2 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 3 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 4 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 5 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 6 | Development | | |
- +------+-----------------+-----------------+------------------+
- | 7 | Beta | | |
- +------+-----------------+-----------------+------------------+
- | 8 | Beta | | |
- +------+-----------------+-----------------+------------------+
- | 9 | Beta | | 1.3.0 released |
- +------+-----------------+-----------------+------------------+
- | 10 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 11 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 12 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 13 | Polish | Development | |
- +------+-----------------+-----------------+------------------+
- | 14 | | Development | |
- +------+-----------------+-----------------+------------------+
- | 15 | | Development | |
- +------+-----------------+-----------------+------------------+
- | 16 | | Beta | |
- +------+-----------------+-----------------+------------------+
- | 17 | | Beta | |
- +------+-----------------+-----------------+------------------+
- | 18 | | Beta | 1.4.0 released |
- +------+-----------------+-----------------+------------------+
-
-Each stage can be defined as follows:
-
- +------------------+-------------+----------------+----------------+
- | | Development | Beta | Polish |
- +==================+=============+================+================+
- | Python Frozen | | slushy | Y |
- +------------------+-------------+----------------+----------------+
- | Docstring Frozen | | slushy | thicker slush |
- +------------------+-------------+----------------+----------------+
- | C code Frozen | | thicker slush | thicker slush |
- +------------------+-------------+----------------+----------------+
-
-Terminology:
-
- * slushy: you can change it if you beg the release team and it's really
- important and you coordinate with docs/translations; no "big"
- changes.
-
- * thicker slush: you can change it if it's an open bug marked
- showstopper for the Polish release, you beg the release team, the
- change is very very small yet very very important, and you feel
- extremely guilty about your transgressions.
-
-The different frozen states are intended to be gradients. The exact meaning is
-decided by the release manager: he has the last word on what's go in, what
-doesn't. The proposed schedule means that there would be at most 12 weeks
-between putting code into the source code repository and being released.
-
-Release team
-------------
-
-For every release, there would be at least one release manager. We propose to
-rotate the release manager: rotation means it is not always the same person
-doing the dirty job, and it should also keep the release manager honest.
-
-References
-==========
-
- * Proposed schedule for Gnome from Havoc Pennington (one of the core
- GTK and Gnome manager):
- https://mail.gnome.org/archives/gnome-hackers/2002-June/msg00041.html
- The proposed schedule is heavily based on this email
-
- * https://wiki.gnome.org/ReleasePlanning/Freezes
diff --git a/doc/release/upcoming_changes/10151.improvement.rst b/doc/release/upcoming_changes/10151.improvement.rst
new file mode 100644
index 000000000..3706a5132
--- /dev/null
+++ b/doc/release/upcoming_changes/10151.improvement.rst
@@ -0,0 +1,9 @@
+Different C numeric types of the same size have unique names
+------------------------------------------------------------
+On any given platform, two of ``np.intc``, ``np.int_``, and ``np.longlong``
+would previously appear indistinguishable through their ``repr``, despite
+their corresponding ``dtype`` having different properties.
+A similar problem existed for the unsigned counterparts to these types, and on
+some platforms for ``np.double`` and ``np.longdouble``
+
+These types now always print with a unique ``__name__``.
diff --git a/doc/release/upcoming_changes/12284.new_feature.rst b/doc/release/upcoming_changes/12284.new_feature.rst
new file mode 100644
index 000000000..25321cd9b
--- /dev/null
+++ b/doc/release/upcoming_changes/12284.new_feature.rst
@@ -0,0 +1,5 @@
+
+Add our own ``*.pxd`` cython import file
+--------------------------------------------
+Added a ``numpy/__init__.pxd`` file. It will be used for `cimport numpy`
+
diff --git a/doc/release/upcoming_changes/13605.deprecation.rst b/doc/release/upcoming_changes/13605.deprecation.rst
new file mode 100644
index 000000000..bff12e965
--- /dev/null
+++ b/doc/release/upcoming_changes/13605.deprecation.rst
@@ -0,0 +1,9 @@
+`np.fromfile` and `np.fromstring` will error on bad data
+--------------------------------------------------------
+
+In future numpy releases, the functions `np.fromfile` and `np.fromstring`
+will throw an error when parsing bad data.
+This will now give a ``DeprecationWarning`` where previously partial or
+even invalid data was silently returned. This deprecation also affects
+the C defined functions c:func:`PyArray_FromString`` and
+c:func:`PyArray_FromFile`
diff --git a/doc/release/upcoming_changes/13610.improvement.rst b/doc/release/upcoming_changes/13610.improvement.rst
new file mode 100644
index 000000000..6f97b43ad
--- /dev/null
+++ b/doc/release/upcoming_changes/13610.improvement.rst
@@ -0,0 +1,5 @@
+``argwhere`` now produces a consistent result on 0d arrays
+----------------------------------------------------------
+On N-d arrays, `numpy.argwhere` now always produces an array of shape
+``(n_non_zero, arr.ndim)``, even when ``arr.ndim == 0``. Previously, the
+last axis would have a dimension of 1 in this case.
diff --git a/doc/release/upcoming_changes/13899.change.rst b/doc/release/upcoming_changes/13899.change.rst
new file mode 100644
index 000000000..da8277347
--- /dev/null
+++ b/doc/release/upcoming_changes/13899.change.rst
@@ -0,0 +1,4 @@
+Incorrect ``threshold`` in ``np.set_printoptions`` raises ``TypeError`` or ``ValueError``
+-----------------------------------------------------------------------------------------
+Previously an incorrect ``threshold`` raised ``ValueError``; it now raises ``TypeError``
+for non-numeric types and ``ValueError`` for ``nan`` values.
diff --git a/doc/release/upcoming_changes/14036.deprecation.rst b/doc/release/upcoming_changes/14036.deprecation.rst
new file mode 100644
index 000000000..3d997b9a2
--- /dev/null
+++ b/doc/release/upcoming_changes/14036.deprecation.rst
@@ -0,0 +1,4 @@
+Deprecate `PyArray_As1D`, `PyArray_As2D`
+----------------------------------------
+`PyArray_As1D`, `PyArray_As2D` are deprecated, use
+`PyArray_AsCArray` instead \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14036.expired.rst b/doc/release/upcoming_changes/14036.expired.rst
new file mode 100644
index 000000000..05164aa38
--- /dev/null
+++ b/doc/release/upcoming_changes/14036.expired.rst
@@ -0,0 +1,2 @@
+* ``PyArray_As1D`` and ``PyArray_As2D`` have been removed in favor of
+ ``PyArray_AsCArray``
diff --git a/doc/release/upcoming_changes/14039.expired.rst b/doc/release/upcoming_changes/14039.expired.rst
new file mode 100644
index 000000000..effee0626
--- /dev/null
+++ b/doc/release/upcoming_changes/14039.expired.rst
@@ -0,0 +1,2 @@
+* ``np.rank`` has been removed. This was deprecated in NumPy 1.10
+ and has been replaced by ``np.ndim``.
diff --git a/doc/release/upcoming_changes/14100.expired.rst b/doc/release/upcoming_changes/14100.expired.rst
new file mode 100644
index 000000000..e9ea9eeb4
--- /dev/null
+++ b/doc/release/upcoming_changes/14100.expired.rst
@@ -0,0 +1,3 @@
+* ``PyArray_FromDimsAndDataAndDescr`` and ``PyArray_FromDims`` have been
+ removed (they will always raise an error). Use ``PyArray_NewFromDescr``
+ and ``PyArray_SimpleNew`` instead.
diff --git a/doc/release/upcoming_changes/14181.deprecation.rst b/doc/release/upcoming_changes/14181.deprecation.rst
new file mode 100644
index 000000000..9979b2246
--- /dev/null
+++ b/doc/release/upcoming_changes/14181.deprecation.rst
@@ -0,0 +1,3 @@
+Deprecate `np.alen`
+-------------------
+`np.alen` was deprecated. Use `len` instead.
diff --git a/doc/release/upcoming_changes/14248.change.rst b/doc/release/upcoming_changes/14248.change.rst
new file mode 100644
index 000000000..9ae0f16bc
--- /dev/null
+++ b/doc/release/upcoming_changes/14248.change.rst
@@ -0,0 +1,10 @@
+`numpy.distutils`: append behavior changed for LDFLAGS and similar
+------------------------------------------------------------------
+`numpy.distutils` has always overridden rather than appended to ``LDFLAGS`` and
+other similar such environment variables for compiling Fortran extensions. Now
+the default behavior has changed to appending - which is the expected behavior
+in most situations. To preserve the old (overwriting) behavior, set the
+``NPY_DISTUTILS_APPEND_FLAGS`` environment variable to 0. This applies to:
+``LDFLAGS``, ``F77FLAGS``, ``F90FLAGS``, ``FREEFLAGS``, ``FOPT``, ``FDEBUG``,
+and ``FFLAGS``. NumPy 1.16 and 1.17 gave build warnings in situations where this
+change in behavior would have affected the compile flags used.
diff --git a/doc/release/upcoming_changes/14255.improvement.rst b/doc/release/upcoming_changes/14255.improvement.rst
new file mode 100644
index 000000000..e17835efd
--- /dev/null
+++ b/doc/release/upcoming_changes/14255.improvement.rst
@@ -0,0 +1,4 @@
+`numpy.unique` has consistent axes order (except the chosen one) when ``axis`` is not None
+------------------------------------------------------------------------------------------
+Using ``moveaxis`` instead of ``swapaxes`` in `numpy.unique`, so that the ordering of axes
+except the axis in arguments will not be broken.
diff --git a/doc/release/upcoming_changes/14256.expired.rst b/doc/release/upcoming_changes/14256.expired.rst
new file mode 100644
index 000000000..229514171
--- /dev/null
+++ b/doc/release/upcoming_changes/14256.expired.rst
@@ -0,0 +1,3 @@
+* ``numeric.loads``, ``numeric.load``, ``np.ma.dump``,
+ ``np.ma.dumps``, ``np.ma.load``, ``np.ma.loads`` are removed,
+ use ``pickle`` methods instead \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14259.expired.rst b/doc/release/upcoming_changes/14259.expired.rst
new file mode 100644
index 000000000..fee44419b
--- /dev/null
+++ b/doc/release/upcoming_changes/14259.expired.rst
@@ -0,0 +1,6 @@
+* ``arrayprint.FloatFormat``, ``arrayprint.LongFloatFormat`` has been removed,
+ use ``FloatingFormat`` instead
+* ``arrayprint.ComplexFormat``, ``arrayprint.LongComplexFormat`` has been
+ removed, use ``ComplexFloatingFormat`` instead
+* ``arrayprint.StructureFormat`` has been removed, use ``StructureVoidFormat``
+ instead \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14325.expired.rst b/doc/release/upcoming_changes/14325.expired.rst
new file mode 100644
index 000000000..348b3d524
--- /dev/null
+++ b/doc/release/upcoming_changes/14325.expired.rst
@@ -0,0 +1,2 @@
+* ``np.testing.rand`` has been removed. This was deprecated in NumPy 1.11
+ and has been replaced by ``np.random.rand``.
diff --git a/doc/release/upcoming_changes/14335.expired.rst b/doc/release/upcoming_changes/14335.expired.rst
new file mode 100644
index 000000000..53598cea1
--- /dev/null
+++ b/doc/release/upcoming_changes/14335.expired.rst
@@ -0,0 +1,2 @@
+* Class ``SafeEval`` in ``numpy/lib/utils.py`` has been removed. This was deprecated in NumPy 1.10.
+ Use ``np.safe_eval`` instead. \ No newline at end of file
diff --git a/doc/release/upcoming_changes/14393.c_api.rst b/doc/release/upcoming_changes/14393.c_api.rst
new file mode 100644
index 000000000..0afd27584
--- /dev/null
+++ b/doc/release/upcoming_changes/14393.c_api.rst
@@ -0,0 +1,5 @@
+PyDataType_ISUNSIZED(descr) now returns False for structured datatypes
+----------------------------------------------------------------------
+Previously this returned True for any datatype of itemsize 0, but now this
+returns false for the non-flexible datatype with itemsize 0, ``np.dtype([])``.
+
diff --git a/doc/release/upcoming_changes/14464.improvement.rst b/doc/release/upcoming_changes/14464.improvement.rst
new file mode 100644
index 000000000..36ee4090b
--- /dev/null
+++ b/doc/release/upcoming_changes/14464.improvement.rst
@@ -0,0 +1,6 @@
+`numpy.matmul` with boolean output now converts to boolean values
+-----------------------------------------------------------------
+Calling `numpy.matmul` where the output is a boolean array would fill the array
+with uint8 equivalents of the result, rather than 0/1. Now it forces the output
+to 0 or 1 (``NPY_TRUE`` or ``NPY_FALSE``).
+
diff --git a/doc/release/upcoming_changes/14498.change.rst b/doc/release/upcoming_changes/14498.change.rst
new file mode 100644
index 000000000..fd784e289
--- /dev/null
+++ b/doc/release/upcoming_changes/14498.change.rst
@@ -0,0 +1,7 @@
+Remove ``numpy.random.entropy`` without a deprecation
+-----------------------------------------------------
+
+``numpy.random.entropy`` was added to the `numpy.random` namespace in 1.17.0.
+It was meant to be a private c-extension module, but was exposed as public.
+It has been replaced by `numpy.random.SeedSequence` so the module was
+completely removed.
diff --git a/doc/release/upcoming_changes/14501.improvement.rst b/doc/release/upcoming_changes/14501.improvement.rst
new file mode 100644
index 000000000..f397ecccf
--- /dev/null
+++ b/doc/release/upcoming_changes/14501.improvement.rst
@@ -0,0 +1,6 @@
+`numpy.random.randint` produced incorrect value when the range was ``2**32``
+----------------------------------------------------------------------------
+The implementation introduced in 1.17.0 had an incorrect check when
+determining whether to use the 32-bit path or the full 64-bit
+path that incorrectly redirected random integer generation with a high - low
+range of ``2**32`` to the 64-bit generator.
diff --git a/doc/release/upcoming_changes/14510.compatibility.rst b/doc/release/upcoming_changes/14510.compatibility.rst
new file mode 100644
index 000000000..63d46d2f7
--- /dev/null
+++ b/doc/release/upcoming_changes/14510.compatibility.rst
@@ -0,0 +1,12 @@
+`numpy.lib.recfunctions.drop_fields` can no longer return `None`
+----------------------------------------------------------------
+If ``drop_fields`` is used to drop all fields, previously the array would
+be completely discarded and `None` returned. Now it returns an array of the
+same shape as the input, but with no fields. The old behavior can be retained
+with::
+
+ dropped_arr = drop_fields(arr, ['a', 'b'])
+ if dropped_arr.dtype.names == ():
+ dropped_arr = None
+
+converting the empty recarray to `None`
diff --git a/doc/release/upcoming_changes/14518.change.rst b/doc/release/upcoming_changes/14518.change.rst
new file mode 100644
index 000000000..ba3844c85
--- /dev/null
+++ b/doc/release/upcoming_changes/14518.change.rst
@@ -0,0 +1,18 @@
+Add options to quiet build configuration and build with ``-Werror``
+-------------------------------------------------------------------
+Added two new configuration options. During the ``build_src`` subcommand, as
+part of configuring NumPy, the files ``_numpyconfig.h`` and ``config.h`` are
+created by probing support for various runtime functions and routines.
+Previously, the very verbose compiler output during this stage clouded more
+important information. By default the output is silenced. Running ``runtests.py
+--debug-info`` will add ``--verbose-cfg`` to the ``build_src`` subcommand,
+which will restore the previous behaviour.
+
+Adding ``CFLAGS=-Werror`` to turn warnings into errors would trigger errors
+during the configuration. Now ``runtests.py --warn-error`` will add
+``--warn-error`` to the ``build`` subcommand, which will percolate to the
+``build_ext`` and ``build_lib`` subcommands. This will add the compiler flag
+to those stages and turn compiler warnings into errors while actually building
+NumPy itself, avoiding the ``build_src`` subcommand compiler calls.
+
+(`gh-14527 <https://github.com/numpy/numpy/pull/14527>`__)
diff --git a/doc/release/upcoming_changes/14567.expired.rst b/doc/release/upcoming_changes/14567.expired.rst
new file mode 100644
index 000000000..59cb600fb
--- /dev/null
+++ b/doc/release/upcoming_changes/14567.expired.rst
@@ -0,0 +1,5 @@
+The files ``numpy/testing/decorators.py``, ``numpy/testing/noseclasses.py``
+and ``numpy/testing/nosetester.py`` have been removed. They were never
+meant to be public (all relevant objects are present in the
+``numpy.testing`` namespace), and importing them has given a deprecation
+warning since NumPy 1.15.0
diff --git a/doc/release/upcoming_changes/14583.expired.rst b/doc/release/upcoming_changes/14583.expired.rst
new file mode 100644
index 000000000..1fad06309
--- /dev/null
+++ b/doc/release/upcoming_changes/14583.expired.rst
@@ -0,0 +1,2 @@
+* Remove deprecated support for boolean and empty condition lists in
+ `numpy.select`
diff --git a/doc/release/upcoming_changes/14596.expired.rst b/doc/release/upcoming_changes/14596.expired.rst
new file mode 100644
index 000000000..3831d5401
--- /dev/null
+++ b/doc/release/upcoming_changes/14596.expired.rst
@@ -0,0 +1,2 @@
+* Array order only accepts 'C', 'F', 'A', and 'K'. More permissive options
+ were deprecated in NumPy 1.11.
diff --git a/doc/release/upcoming_changes/14620.expired.rst b/doc/release/upcoming_changes/14620.expired.rst
new file mode 100644
index 000000000..e35589b53
--- /dev/null
+++ b/doc/release/upcoming_changes/14620.expired.rst
@@ -0,0 +1 @@
+* np.linspace param num must be an integer. This was deprecated in NumPy 1.12.
diff --git a/doc/release/upcoming_changes/14682.expired.rst b/doc/release/upcoming_changes/14682.expired.rst
new file mode 100644
index 000000000..e9a8107ec
--- /dev/null
+++ b/doc/release/upcoming_changes/14682.expired.rst
@@ -0,0 +1,2 @@
+* UFuncs with multiple outputs must use a tuple for the `out` kwarg. This
+ finishes a deprecation started in NumPy 1.10.
diff --git a/doc/release/upcoming_changes/README.rst b/doc/release/upcoming_changes/README.rst
new file mode 100644
index 000000000..7f6476bda
--- /dev/null
+++ b/doc/release/upcoming_changes/README.rst
@@ -0,0 +1,55 @@
+:orphan:
+
+Changelog
+=========
+
+This directory contains "news fragments" which are short files that contain a
+small **ReST**-formatted text that will be added to the next what's new page.
+
+Make sure to use full sentences with correct case and punctuation, and please
+try to use Sphinx intersphinx using backticks. The fragment should have a
+header line and an underline using ``------``
+
+Each file should be named like ``<PULL REQUEST>.<TYPE>.rst``, where
+``<PULL REQUEST>`` is a pull request number, and ``<TYPE>`` is one of:
+
+* ``new_function``: New user facing functions.
+* ``deprecation``: Changes existing code to emit a DeprecationWarning.
+* ``future``: Changes existing code to emit a FutureWarning.
+* ``expired``: Removal of a deprecated part of the API.
+* ``compatibility``: A change which requires users to change code and is not
+ backwards compatible. (Not to be used for removal of deprecated features.)
+* ``c_api``: Changes in the Numpy C-API exported functions
+* ``new_feature``: New user facing features like ``kwargs``.
+* ``improvement``: Performance and edge-case changes
+* ``change``: Other changes
+* ``highlight``: Adds a highlight bullet point to use as a possibly highlight
+ of the release.
+
+Most categories should be formatted as paragraphs with a heading.
+So for example: ``123.new_feature.rst`` would have the content::
+
+ ``my_new_feature`` option for `my_favorite_function`
+ ----------------------------------------------------
+ The ``my_new_feature`` option is now available for `my_favorite_function`.
+ To use it, write ``np.my_favorite_function(..., my_new_feature=True)``.
+
+``highlight`` is usually formatted as bulled points making the fragment
+``* This is a highlight``.
+
+Note the use of single-backticks to get an internal link (assuming
+``my_favorite_function`` is exported from the ``numpy`` namespace),
+and double-backticks for code.
+
+If you are unsure what pull request type to use, don't hesitate to ask in your
+PR.
+
+You can install ``towncrier`` and run ``towncrier --draft --version 1.18``
+if you want to get a preview of how your change will look in the final release
+notes.
+
+.. note::
+
+ This README was adapted from the pytest changelog readme under the terms of
+ the MIT licence.
+
diff --git a/doc/release/upcoming_changes/template.rst b/doc/release/upcoming_changes/template.rst
new file mode 100644
index 000000000..9c8a3b5fc
--- /dev/null
+++ b/doc/release/upcoming_changes/template.rst
@@ -0,0 +1,38 @@
+{% set title = "NumPy {} Release Notes".format(versiondata.version) %}
+{{ "=" * title|length }}
+{{ title }}
+{{ "=" * title|length }}
+
+{% for section, _ in sections.items() %}
+{% set underline = underlines[0] %}{% if section %}{{ section }}
+{{ underline * section|length }}{% set underline = underlines[1] %}
+
+{% endif %}
+{% if sections[section] %}
+{% for category, val in definitions.items() if category in sections[section] %}
+
+{{ definitions[category]['name'] }}
+{{ underline * definitions[category]['name']|length }}
+
+{% if definitions[category]['showcontent'] %}
+{% for text, values in sections[section][category].items() %}
+{{ text }}
+{{ get_indent(text) }}({{values|join(', ') }})
+
+{% endfor %}
+{% else %}
+- {{ sections[section][category]['']|join(', ') }}
+
+{% endif %}
+{% if sections[section][category]|length == 0 %}
+No significant changes.
+
+{% else %}
+{% endif %}
+{% endfor %}
+{% else %}
+No significant changes.
+
+
+{% endif %}
+{% endfor %}
diff --git a/doc/scipy-sphinx-theme b/doc/scipy-sphinx-theme
-Subproject d990ab9134199f6496b9ac8567f10791f04a720
+Subproject f0d96ae2bf3b010ce53adadde1e38997497a513
diff --git a/doc/source/_static/numpy_logo.png b/doc/source/_static/numpy_logo.png
new file mode 100644
index 000000000..af8cbe323
--- /dev/null
+++ b/doc/source/_static/numpy_logo.png
Binary files differ
diff --git a/doc/source/_templates/autosummary/base.rst b/doc/source/_templates/autosummary/base.rst
new file mode 100644
index 000000000..0331154a7
--- /dev/null
+++ b/doc/source/_templates/autosummary/base.rst
@@ -0,0 +1,14 @@
+{% if objtype == 'property' %}
+:orphan:
+{% endif %}
+
+{{ fullname | escape | underline}}
+
+.. currentmodule:: {{ module }}
+
+{% if objtype == 'property' %}
+property
+{% endif %}
+
+.. auto{{ objtype }}:: {{ objname }}
+
diff --git a/doc/source/_templates/indexcontent.html b/doc/source/_templates/indexcontent.html
index 008eaaa7c..294d39233 100644
--- a/doc/source/_templates/indexcontent.html
+++ b/doc/source/_templates/indexcontent.html
@@ -7,6 +7,8 @@
<span class="linkdescr">start here</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("reference/index") }}">NumPy Reference</a><br/>
<span class="linkdescr">reference documentation</span></p>
+ <p class="biglink"><a class="biglink" href="{{ pathto("benchmarking") }}">Benchmarking</a><br/>
+ <span class="linkdescr">benchmarking NumPy</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("f2py/index") }}">F2Py Guide</a><br/>
<span class="linkdescr">f2py documentation</span></p>
<p class="biglink"><a class="biglink" href="{{ pathto("dev/index") }}">NumPy Developer Guide</a><br/>
diff --git a/doc/source/_templates/indexsidebar.html b/doc/source/_templates/indexsidebar.html
index 51e7c4308..4707fc0e8 100644
--- a/doc/source/_templates/indexsidebar.html
+++ b/doc/source/_templates/indexsidebar.html
@@ -1,4 +1,5 @@
<h3>Resources</h3>
<ul>
+ <li><a href="https://numpy.org/">NumPy.org website</a></li>
<li><a href="https://scipy.org/">Scipy.org website</a></li>
</ul>
diff --git a/doc/source/_templates/layout.html b/doc/source/_templates/layout.html
index 77da54a00..beaa297db 100644
--- a/doc/source/_templates/layout.html
+++ b/doc/source/_templates/layout.html
@@ -1,5 +1,15 @@
{% extends "!layout.html" %}
+{%- block header %}
+<div class="container">
+ <div class="top-scipy-org-logo-header" style="background-color: #a2bae8;">
+ <a href="{{ pathto('index') }}">
+ <img border=0 alt="NumPy" src="{{ pathto('_static/numpy_logo.png', 1) }}"></a>
+ </div>
+ </div>
+</div>
+
+{% endblock %}
{% block rootrellink %}
{% if pagename != 'index' %}
<li class="active"><a href="{{ pathto('index') }}">{{ shorttitle|e }}</a></li>
diff --git a/doc/source/about.rst b/doc/source/about.rst
index 5ac4facbb..3e83833d1 100644
--- a/doc/source/about.rst
+++ b/doc/source/about.rst
@@ -8,7 +8,7 @@ needed for scientific computing with Python. This package contains:
- sophisticated :ref:`(broadcasting) functions <ufuncs>`
- basic :ref:`linear algebra functions <routines.linalg>`
- basic :ref:`Fourier transforms <routines.fft>`
-- sophisticated :ref:`random number capabilities <routines.random>`
+- sophisticated :ref:`random number capabilities <numpyrandom>`
- tools for integrating Fortran code
- tools for integrating C/C++ code
diff --git a/doc/source/benchmarking.rst b/doc/source/benchmarking.rst
new file mode 100644
index 000000000..9f0eeb03a
--- /dev/null
+++ b/doc/source/benchmarking.rst
@@ -0,0 +1 @@
+.. include:: ../../benchmarks/README.rst
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 455e9748b..83cecc917 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -3,12 +3,8 @@ from __future__ import division, absolute_import, print_function
import sys, os, re
-# Check Sphinx version
-import sphinx
-if sphinx.__version__ < "1.2.1":
- raise RuntimeError("Sphinx 1.2.1 or newer required")
-
-needs_sphinx = '1.0'
+# Minimum version, enforced by sphinx
+needs_sphinx = '2.2.0'
# -----------------------------------------------------------------------------
# General configuration
@@ -19,17 +15,22 @@ needs_sphinx = '1.0'
sys.path.insert(0, os.path.abspath('../sphinxext'))
-extensions = ['sphinx.ext.autodoc', 'numpydoc',
- 'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
- 'sphinx.ext.doctest', 'sphinx.ext.autosummary',
- 'sphinx.ext.graphviz', 'sphinx.ext.ifconfig',
- 'matplotlib.sphinxext.plot_directive']
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'numpydoc',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.graphviz',
+ 'sphinx.ext.ifconfig',
+ 'matplotlib.sphinxext.plot_directive',
+ 'IPython.sphinxext.ipython_console_highlighting',
+ 'IPython.sphinxext.ipython_directive',
+ 'sphinx.ext.imgmath',
+]
-if sphinx.__version__ >= "1.4":
- extensions.append('sphinx.ext.imgmath')
- imgmath_image_format = 'svg'
-else:
- extensions.append('sphinx.ext.pngmath')
+imgmath_image_format = 'svg'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
@@ -37,9 +38,11 @@ templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
+master_doc = 'contents'
+
# General substitutions.
project = 'NumPy'
-copyright = '2008-2018, The SciPy community'
+copyright = '2008-2019, The SciPy community'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
@@ -85,6 +88,7 @@ pygments_style = 'sphinx'
def setup(app):
# add a config value for `ifconfig` directives
app.add_config_value('python_version_major', str(sys.version_info.major), 'env')
+ app.add_lexer('NumPyC', NumPyLexer(stripnl=False))
# -----------------------------------------------------------------------------
# HTML output
@@ -113,7 +117,9 @@ else:
"edit_link": False,
"sidebar": "left",
"scipy_org_logo": False,
- "rootlinks": []
+ "rootlinks": [("https://numpy.org/", "NumPy.org"),
+ ("https://numpy.org/doc", "Docs"),
+ ]
}
html_sidebars = {'index': ['indexsidebar.html', 'searchbox.html']}
@@ -167,6 +173,10 @@ latex_documents = [
# not chapters.
#latex_use_parts = False
+latex_elements = {
+ 'fontenc': r'\usepackage[LGR,T1]{fontenc}'
+}
+
# Additional stuff for the LaTeX preamble.
latex_preamble = r'''
\usepackage{amsmath}
@@ -234,7 +244,7 @@ numpydoc_use_plots = True
# -----------------------------------------------------------------------------
import glob
-autosummary_generate = glob.glob("reference/*.rst")
+autosummary_generate = True
# -----------------------------------------------------------------------------
# Coverage checker
@@ -355,3 +365,18 @@ def linkcode_resolve(domain, info):
else:
return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
+
+from pygments.lexers import CLexer
+from pygments import token
+import copy
+
+class NumPyLexer(CLexer):
+ name = 'NUMPYLEXER'
+
+ tokens = copy.deepcopy(CLexer.tokens)
+ # Extend the regex for valid identifiers with @
+ for k, val in tokens.items():
+ for i, v in enumerate(val):
+ if isinstance(v, tuple):
+ if isinstance(v[0], str):
+ val[i] = (v[0].replace('a-zA-Z', 'a-zA-Z@'),) + v[1:]
diff --git a/doc/source/dev/conduct/code_of_conduct.rst b/doc/source/dev/conduct/code_of_conduct.rst
index 604f14662..aca39d8a7 100644
--- a/doc/source/dev/conduct/code_of_conduct.rst
+++ b/doc/source/dev/conduct/code_of_conduct.rst
@@ -121,8 +121,7 @@ a conflict of interest in handling it, then they will recuse themselves from
considering your report. Alternatively, if for any reason you feel
uncomfortable making a report to the committee, then you can also contact:
-- NumFOCUS Executive Director: Leah Silen
-- NumFOCUS President: Andy Terrel
+- Senior `NumFOCUS staff <https://numfocus.org/code-of-conduct#persons-responsible>`__: conduct@numfocus.org
Incident reporting resolution & Code of Conduct enforcement
diff --git a/doc/source/dev/conduct/report_handling_manual.rst b/doc/source/dev/conduct/report_handling_manual.rst
index 5f5e32f13..d39b615bb 100644
--- a/doc/source/dev/conduct/report_handling_manual.rst
+++ b/doc/source/dev/conduct/report_handling_manual.rst
@@ -1,3 +1,5 @@
+:orphan:
+
.. _CoC_reporting_manual:
NumPy Code of Conduct - How to follow up on a report
diff --git a/doc/source/dev/development_environment.rst b/doc/source/dev/development_environment.rst
index aa4326f63..9d618cc9f 100644
--- a/doc/source/dev/development_environment.rst
+++ b/doc/source/dev/development_environment.rst
@@ -3,18 +3,27 @@
Setting up and using your development environment
=================================================
+.. _recommended-development-setup:
+
Recommended development setup
-----------------------------
Since NumPy contains parts written in C and Cython that need to be
compiled before use, make sure you have the necessary compilers and Python
-development headers installed - see :ref:`building-from-source`.
+development headers installed - see :ref:`building-from-source`. Building
+NumPy as of version ``1.17`` requires a C99 compliant compiler. For
+some older compilers this may require ``export CFLAGS='-std=c99'``.
Having compiled code also means that importing NumPy from the development
sources needs some additional steps, which are explained below. For the rest
of this chapter we assume that you have set up your git repo as described in
:ref:`using-git`.
+.. _testing-builds:
+
+Testing builds
+--------------
+
To build the development version of NumPy and run tests, spawn
interactive shells with the Python import paths properly set up etc.,
do one of::
@@ -43,6 +52,10 @@ When using pytest as a target (the default), you can
$ python runtests.py -v -t numpy/core/tests/test_multiarray.py -- -k "MatMul and not vector"
+.. note::
+
+ Remember that all tests of NumPy should pass before commiting your changes.
+
Using ``runtests.py`` is the recommended approach to running tests.
There are also a number of alternatives to it, for example in-place
build or installing to a virtualenv. See the FAQ below for details.
@@ -83,19 +96,31 @@ installs a ``.egg-link`` file into your site-packages as well as adjusts the
Other build options
-------------------
+Build options can be discovered by running any of::
+
+ $ python setup.py --help
+ $ python setup.py --help-commands
+
It's possible to do a parallel build with ``numpy.distutils`` with the ``-j`` option;
see :ref:`parallel-builds` for more details.
-In order to install the development version of NumPy in ``site-packages``, use
-``python setup.py install --user``.
-
A similar approach to in-place builds and use of ``PYTHONPATH`` but outside the
source tree is to use::
- $ python setup.py install --prefix /some/owned/folder
+ $ pip install . --prefix /some/owned/folder
$ export PYTHONPATH=/some/owned/folder/lib/python3.4/site-packages
+NumPy uses a series of tests to probe the compiler and libc libraries for
+funtions. The results are stored in ``_numpyconfig.h`` and ``config.h`` files
+using ``HAVE_XXX`` definitions. These tests are run during the ``build_src``
+phase of the ``_multiarray_umath`` module in the ``generate_config_h`` and
+``generate_numpyconfig_h`` functions. Since the output of these calls includes
+many compiler warnings and errors, by default it is run quietly. If you wish
+to see this output, you can run the ``build_src`` stage verbosely::
+
+ $ python build build_src -v
+
Using virtualenvs
-----------------
@@ -125,6 +150,9 @@ the interpreter, tests can be run like this::
>>> np.test('full') # Also run tests marked as slow
>>> np.test('full', verbose=2) # Additionally print test name/file
+ An example of a successful test :
+ ``4686 passed, 362 skipped, 9 xfailed, 5 warnings in 213.99 seconds``
+
Or a similar way from the command line::
$ python -c "import numpy as np; np.test()"
@@ -142,9 +170,9 @@ That also takes extra arguments, like ``--pdb`` which drops you into the Python
debugger when a test fails or an exception is raised.
Running tests with `tox`_ is also supported. For example, to build NumPy and
-run the test suite with Python 3.4, use::
+run the test suite with Python 3.7, use::
- $ tox -e py34
+ $ tox -e py37
For more extensive information, see :ref:`testing-guidelines`
diff --git a/doc/source/dev/gitwash/development_workflow.rst b/doc/source/dev/development_workflow.rst
index 9561e25f7..900431374 100644
--- a/doc/source/dev/gitwash/development_workflow.rst
+++ b/doc/source/dev/development_workflow.rst
@@ -28,7 +28,7 @@ In short:
- *Core developers* If you want to push changes without
further review, see the notes :ref:`below <pushing-to-main>`.
-
+
This way of working helps to keep work well organized and the history
as clear as possible.
@@ -69,7 +69,7 @@ Overview
git status # Optional
git diff # Optional
git add modified_file
- git commit
+ git commit
# push the branch to your own Github repo
git push origin my-new-feature
@@ -112,38 +112,38 @@ In more detail
properly formatted and sufficiently detailed commit message. After saving
your message and closing the editor, your commit will be saved. For trivial
commits, a short commit message can be passed in through the command line
- using the ``-m`` flag. For example, ``git commit -am "ENH: Some message"``.
-
+ using the ``-m`` flag. For example, ``git commit -am "ENH: Some message"``.
+
In some cases, you will see this form of the commit command: ``git commit
-a``. The extra ``-a`` flag automatically commits all modified files and
removes all deleted files. This can save you some typing of numerous ``git
add`` commands; however, it can add unwanted changes to a commit if you're
not careful. For more information, see `why the -a flag?`_ - and the
- helpful use-case description in the `tangled working copy problem`_.
+ helpful use-case description in the `tangled working copy problem`_.
#. Push the changes to your forked repo on github_::
git push origin my-new-feature
For more information, see `git push`_.
-
+
.. note::
-
+
Assuming you have followed the instructions in these pages, git will create
a default link to your github_ repo called ``origin``. In git >= 1.7 you
can ensure that the link to origin is permanently set by using the
``--set-upstream`` option::
-
+
git push --set-upstream origin my-new-feature
-
+
From now on git_ will know that ``my-new-feature`` is related to the
``my-new-feature`` branch in your own github_ repo. Subsequent push calls
are then simplified to the following::
git push
-
+
You have to use ``--set-upstream`` for each new branch that you create.
-
+
It may be the case that while you were working on your edits, new commits have
been added to ``upstream`` that affect your work. In this case, follow the
@@ -194,12 +194,18 @@ Asking for your changes to be merged with the main repo
=======================================================
When you feel your work is finished, you can create a pull request (PR). Github
-has a nice help page that outlines the process for `filing pull requests`_.
+has a nice help page that outlines the process for `filing pull requests`_.
If your changes involve modifications to the API or addition/modification of a
-function, you should initiate a code review. This involves sending an email to
-the `NumPy mailing list`_ with a link to your PR along with a description of
-and a motivation for your changes.
+function, you should
+
+- send an email to the `NumPy mailing list`_ with a link to your PR along with
+ a description of and a motivation for your changes. This may generate
+ changes and feedback. It might be prudent to start with this step if your
+ change may be controversial.
+- add a release note to the ``doc/release/upcoming_changes/`` directory,
+ following the instructions and format in the
+ ``doc/release/upcoming_changes/README.rst`` file.
.. _rebasing-on-master:
@@ -500,11 +506,11 @@ them to ``upstream`` as follows:
git push upstream my-feature-branch:master
-.. note::
+.. note::
It's usually a good idea to use the ``-n`` flag to ``git push`` to check
first that you're about to push the changes you want to the place you
want.
-.. include:: git_links.inc
+.. include:: gitwash/git_links.inc
diff --git a/doc/source/dev/gitwash/development_setup.rst b/doc/source/dev/gitwash/development_setup.rst
index 1ebd4b486..9027dda64 100644
--- a/doc/source/dev/gitwash/development_setup.rst
+++ b/doc/source/dev/gitwash/development_setup.rst
@@ -25,6 +25,8 @@ to the instructions at http://help.github.com/forking/ - please see that
page for more detail. We're repeating some of it here just to give the
specifics for the NumPy_ project, and to suggest some default names.
+.. _set-up-and-configure-a-github-account:
+
Set up and configure a github_ account
======================================
diff --git a/doc/source/dev/gitwash/following_latest.rst b/doc/source/dev/gitwash/following_latest.rst
index ad497bf9a..0e98b4ec4 100644
--- a/doc/source/dev/gitwash/following_latest.rst
+++ b/doc/source/dev/gitwash/following_latest.rst
@@ -1,9 +1,5 @@
.. _following-latest:
-=============================
- Following the latest source
-=============================
-
These are the instructions if you just want to follow the latest
*NumPy* source, but you don't need to do any development for now.
If you do want to contribute a patch (excellent!) or do more extensive
diff --git a/doc/source/dev/gitwash/git_development.rst b/doc/source/dev/gitwash/git_development.rst
deleted file mode 100644
index 5d7d47f89..000000000
--- a/doc/source/dev/gitwash/git_development.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. _git-development:
-
-=====================
- Git for development
-=====================
-
-Contents:
-
-.. toctree::
- :maxdepth: 2
-
- development_setup
- configure_git
- dot2_dot3
diff --git a/doc/source/dev/gitwash/git_intro.rst b/doc/source/dev/gitwash/git_intro.rst
index 3ce322f8f..9d596d4d4 100644
--- a/doc/source/dev/gitwash/git_intro.rst
+++ b/doc/source/dev/gitwash/git_intro.rst
@@ -1,42 +1,8 @@
-============
-Introduction
-============
-
-These pages describe a git_ and github_ workflow for the NumPy_
-project.
-
-There are several different workflows here, for different ways of
-working with *NumPy*.
-
-This is not a comprehensive git_ reference, it's just a workflow for our
-own project. It's tailored to the github_ hosting service. You may well
-find better or quicker ways of getting stuff done with git_, but these
-should get you started.
-
-For general resources for learning git_ see :ref:`git-resources`.
-
-.. _install-git:
-
Install git
===========
-Overview
---------
-
-================ =============
-Debian / Ubuntu ``sudo apt-get install git-core``
-Fedora ``sudo yum install git-core``
-Windows Download and install msysGit_
-OS X Use the git-osx-installer_
-================ =============
-
-In detail
----------
-
-See the git_ page for the most recent information.
-
-Have a look at the github_ install help pages available from `github help`_
-
-There are good instructions here: http://book.git-scm.com/2_installing_git.html
+Developing with git can be done entirely without github. Git is a distributed
+version control system. In order to use git on your machine you must `install
+it`_.
.. include:: git_links.inc
diff --git a/doc/source/dev/gitwash/git_links.inc b/doc/source/dev/gitwash/git_links.inc
index cebbb3a67..f69a3cf62 100644
--- a/doc/source/dev/gitwash/git_links.inc
+++ b/doc/source/dev/gitwash/git_links.inc
@@ -10,10 +10,9 @@
.. git stuff
.. _git: https://git-scm.com/
-.. _github: https://github.com
+.. _github: https://github.com/numpy/numpy
.. _github help: https://help.github.com
-.. _msysgit: https://code.google.com/p/msysgit/downloads/list
-.. _git-osx-installer: https://code.google.com/p/git-osx-installer/downloads/list
+.. _`install it`: https://git-scm.com/downloads
.. _subversion: http://subversion.tigris.org/
.. _git cheat sheet: http://cheat.errtheblog.com/s/git
.. _pro git book: https://git-scm.com/book/
diff --git a/doc/source/dev/gitwash/index.rst b/doc/source/dev/gitwash/index.rst
index b867bbd97..afbb5e019 100644
--- a/doc/source/dev/gitwash/index.rst
+++ b/doc/source/dev/gitwash/index.rst
@@ -1,7 +1,22 @@
.. _using-git:
+.. _git-development:
+
+=====================
+ Git for development
+=====================
+
+These pages describe a general git_ and github_ workflow.
+
+This is not a comprehensive git_ reference. It's tailored to the github_
+hosting service. You may well find better or quicker ways of getting stuff done
+with git_, but these should get you started.
+
+For general resources for learning git_ see :ref:`git-resources`.
+
+Have a look at the github_ install help pages available from `github help`_
+
+.. _install-git:
-Working with *NumPy* source code
-================================
Contents:
@@ -10,6 +25,9 @@ Contents:
git_intro
following_latest
- git_development
- development_workflow
+ development_setup
+ configure_git
+ dot2_dot3
git_resources
+
+.. include:: git_links.inc
diff --git a/doc/source/dev/governance/people.rst b/doc/source/dev/governance/people.rst
index 7b8d3cab0..10af7f221 100644
--- a/doc/source/dev/governance/people.rst
+++ b/doc/source/dev/governance/people.rst
@@ -48,7 +48,7 @@ NumFOCUS Subcommittee
* Jaime Fernández del Río
-* Nathaniel Smith
+* Sebastian Berg
* External member: Thomas Caswell
@@ -56,10 +56,7 @@ NumFOCUS Subcommittee
Institutional Partners
----------------------
-* UC Berkeley (Stefan van der Walt)
+* UC Berkeley (Stefan van der Walt, Matti Picus, Tyler Reddy, Sebastian Berg)
+* Quansight (Ralf Gommers, Hameer Abbasi)
-Document history
-----------------
-
-https://github.com/numpy/numpy/commits/master/doc/source/dev/governance/governance.rst
diff --git a/doc/source/dev/index.rst b/doc/source/dev/index.rst
index 9ce04cc1b..306c15069 100644
--- a/doc/source/dev/index.rst
+++ b/doc/source/dev/index.rst
@@ -2,13 +2,263 @@
Contributing to NumPy
#####################
+Not a coder? Not a problem! NumPy is multi-faceted, and we can use a lot of help.
+These are all activities we'd like to get help with (they're all important, so
+we list them in alphabetical order):
+
+- Code maintenance and development
+- Community coordination
+- DevOps
+- Developing educational content & narrative documentation
+- Writing technical documentation
+- Fundraising
+- Project management
+- Marketing
+- Translating content
+- Website design and development
+
+The rest of this document discusses working on the NumPy code base and documentation.
+We're in the process of updating our descriptions of other activities and roles.
+If you are interested in these other activities, please contact us!
+You can do this via
+the `numpy-discussion mailing list <https://scipy.org/scipylib/mailing-lists.html>`__,
+or on GitHub (open an issue or comment on a relevant issue). These are our preferred
+communication channels (open source is open by nature!), however if you prefer
+to discuss in private first, please reach out to our community coordinators
+at `numpy-team@googlegroups.com` or `numpy-team.slack.com` (send an email to
+`numpy-team@googlegroups.com` for an invite the first time).
+
+
+Development process - summary
+=============================
+
+Here's the short summary, complete TOC links are below:
+
+1. If you are a first-time contributor:
+
+ * Go to `https://github.com/numpy/numpy
+ <https://github.com/numpy/numpy>`_ and click the
+ "fork" button to create your own copy of the project.
+
+ * Clone the project to your local computer::
+
+ git clone https://github.com/your-username/numpy.git
+
+ * Change the directory::
+
+ cd numpy
+
+ * Add the upstream repository::
+
+ git remote add upstream https://github.com/numpy/numpy.git
+
+ * Now, `git remote -v` will show two remote repositories named:
+
+ - ``upstream``, which refers to the ``numpy`` repository
+ - ``origin``, which refers to your personal fork
+
+2. Develop your contribution:
+
+ * Pull the latest changes from upstream::
+
+ git checkout master
+ git pull upstream master
+
+ * Create a branch for the feature you want to work on. Since the
+ branch name will appear in the merge message, use a sensible name
+ such as 'linspace-speedups'::
+
+ git checkout -b linspace-speedups
+
+ * Commit locally as you progress (``git add`` and ``git commit``)
+ Use a :ref:`properly formatted<writing-the-commit-message>` commit message,
+ write tests that fail before your change and pass afterward, run all the
+ :ref:`tests locally<development-environment>`. Be sure to document any
+ changed behavior in docstrings, keeping to the NumPy docstring
+ :ref:`standard<howto-document>`.
+
+3. To submit your contribution:
+
+ * Push your changes back to your fork on GitHub::
+
+ git push origin linspace-speedups
+
+ * Enter your GitHub username and password (repeat contributors or advanced
+ users can remove this step by connecting to GitHub with
+ :ref:`SSH<set-up-and-configure-a-github-account>` .
+
+ * Go to GitHub. The new branch will show up with a green Pull Request
+ button. Make sure the title and message are clear, concise, and self-
+ explanatory. Then click the button to submit it.
+
+ * If your commit introduces a new feature or changes functionality, post on
+ the `mailing list`_ to explain your changes. For bug fixes, documentation
+ updates, etc., this is generally not necessary, though if you do not get
+ any reaction, do feel free to ask for review.
+
+4. Review process:
+
+ * Reviewers (the other developers and interested community members) will
+ write inline and/or general comments on your Pull Request (PR) to help
+ you improve its implementation, documentation and style. Every single
+ developer working on the project has their code reviewed, and we've come
+ to see it as friendly conversation from which we all learn and the
+ overall code quality benefits. Therefore, please don't let the review
+ discourage you from contributing: its only aim is to improve the quality
+ of project, not to criticize (we are, after all, very grateful for the
+ time you're donating!).
+
+ * To update your PR, make your changes on your local repository, commit,
+ **run tests, and only if they succeed** push to your fork. As soon as
+ those changes are pushed up (to the same branch as before) the PR will
+ update automatically. If you have no idea how to fix the test failures,
+ you may push your changes anyway and ask for help in a PR comment.
+
+ * Various continuous integration (CI) services are triggered after each PR
+ update to build the code, run unit tests, measure code coverage and check
+ coding style of your branch. The CI tests must pass before your PR can be
+ merged. If CI fails, you can find out why by clicking on the "failed"
+ icon (red cross) and inspecting the build and test log. To avoid overuse
+ and waste of this resource,
+ :ref:`test your work<recommended-development-setup>` locally before
+ committing.
+
+ * A PR must be **approved** by at least one core team member before merging.
+ Approval means the core team member has carefully reviewed the changes,
+ and the PR is ready for merging.
+
+5. Document changes
+
+ Beyond changes to a functions docstring and possible description in the
+ general documentation, if your change introduces any user-facing
+ modifications they may need to be mentioned in the release notes.
+ To add your change to the release notes, you need to create a short file
+ with a summary and place it in ``doc/release/upcoming_changes``.
+ The file ``doc/release/upcoming_changes/README.rst`` details the format and
+ filename conventions.
+
+ If your change introduces a deprecation, make sure to discuss this first on
+ GitHub or the mailing list first. If agreement on the deprecation is
+ reached, follow `NEP 23 deprecation policy <http://www.numpy.org/neps/
+ nep-0023-backwards-compatibility.html>`_ to add the deprecation.
+
+6. Cross referencing issues
+
+ If the PR relates to any issues, you can add the text ``xref gh-xxxx`` where
+ ``xxxx`` is the number of the issue to github comments. Likewise, if the PR
+ solves an issue, replace the ``xref`` with ``closes``, ``fixes`` or any of
+ the other flavors `github accepts <https://help.github.com/en/articles/
+ closing-issues-using-keywords>`_.
+
+ In the source code, be sure to preface any issue or PR reference with
+ ``gh-xxxx``.
+
+For a more detailed discussion, read on and follow the links at the bottom of
+this page.
+
+Divergence between ``upstream/master`` and your feature branch
+--------------------------------------------------------------
+
+If GitHub indicates that the branch of your Pull Request can no longer
+be merged automatically, you have to incorporate changes that have been made
+since you started into your branch. Our recommended way to do this is to
+:ref:`rebase on master<rebasing-on-master>`.
+
+Guidelines
+----------
+
+* All code should have tests (see `test coverage`_ below for more details).
+* All code should be `documented <https://numpydoc.readthedocs.io/
+ en/latest/format.html#docstring-standard>`_.
+* No changes are ever committed without review and approval by a core
+ team member.Please ask politely on the PR or on the `mailing list`_ if you
+ get no response to your pull request within a week.
+
+Stylistic Guidelines
+--------------------
+
+* Set up your editor to follow `PEP 8 <https://www.python.org/dev/peps/
+ pep-0008/>`_ (remove trailing white space, no tabs, etc.). Check code with
+ pyflakes / flake8.
+
+* Use numpy data types instead of strings (``np.uint8`` instead of
+ ``"uint8"``).
+
+* Use the following import conventions::
+
+ import numpy as np
+
+* For C code, see the :ref:`numpy-c-style-guide<style_guide>`
+
+
+Test coverage
+-------------
+
+Pull requests (PRs) that modify code should either have new tests, or modify existing
+tests to fail before the PR and pass afterwards. You should :ref:`run the tests
+<development-environment>` before pushing a PR.
+
+Tests for a module should ideally cover all code in that module,
+i.e., statement coverage should be at 100%.
+
+To measure the test coverage, install
+`pytest-cov <https://pytest-cov.readthedocs.io/en/latest/>`__
+and then run::
+
+ $ python runtests.py --coverage
+
+This will create a report in ``build/coverage``, which can be viewed with::
+
+ $ firefox build/coverage/index.html
+
+Building docs
+-------------
+
+To build docs, run ``make`` from the ``doc`` directory. ``make help`` lists
+all targets. For example, to build the HTML documentation, you can run:
+
+.. code:: sh
+
+ make html
+
+Then, all the HTML files will be generated in ``doc/build/html/``.
+Since the documentation is based on docstrings, the appropriate version of
+numpy must be installed in the host python used to run sphinx.
+
+Requirements
+~~~~~~~~~~~~
+
+`Sphinx <http://www.sphinx-doc.org/en/stable/>`__ is needed to build
+the documentation. Matplotlib, SciPy, and IPython are also required.
+
+Fixing Warnings
+~~~~~~~~~~~~~~~
+
+- "citation not found: R###" There is probably an underscore after a
+ reference in the first line of a docstring (e.g. [1]\_). Use this
+ method to find the source file: $ cd doc/build; grep -rin R####
+
+- "Duplicate citation R###, other instance in..."" There is probably a
+ [2] without a [1] in one of the docstrings
+
+Development process - details
+=============================
+
+The rest of the story
+
.. toctree::
- :maxdepth: 3
+ :maxdepth: 2
conduct/code_of_conduct
- gitwash/index
+ Git Basics <gitwash/index>
development_environment
+ development_workflow
+ ../benchmarking
+ style_guide
releasing
governance/index
-For core developers: see :ref:`development-workflow`.
+NumPy-specific workflow is in :ref:`numpy-development-workflow
+<development-workflow>`.
+
+.. _`mailing list`: https://mail.python.org/mailman/listinfo/numpy-devel
diff --git a/doc/source/dev/gitwash/pull_button.png b/doc/source/dev/pull_button.png
index e5031681b..e5031681b 100644
--- a/doc/source/dev/gitwash/pull_button.png
+++ b/doc/source/dev/pull_button.png
Binary files differ
diff --git a/doc/source/dev/style_guide.rst b/doc/source/dev/style_guide.rst
new file mode 100644
index 000000000..bede3424d
--- /dev/null
+++ b/doc/source/dev/style_guide.rst
@@ -0,0 +1,8 @@
+.. _style_guide:
+
+===================
+NumPy C Style Guide
+===================
+
+.. include:: ../../C_STYLE_GUIDE.rst.txt
+ :start-line: 4
diff --git a/doc/source/docs/howto_build_docs.rst b/doc/source/docs/howto_build_docs.rst
index cdf490c37..6deacda5c 100644
--- a/doc/source/docs/howto_build_docs.rst
+++ b/doc/source/docs/howto_build_docs.rst
@@ -5,7 +5,7 @@ Building the NumPy API and reference docs
=========================================
We currently use Sphinx_ for generating the API and reference
-documentation for NumPy. You will need Sphinx 1.0.1 or newer.
+documentation for NumPy. You will need Sphinx 1.8.3 <= 1.8.5.
If you only want to get the documentation, note that pre-built
versions can be found at
@@ -30,11 +30,9 @@ In addition, building the documentation requires the Sphinx extension
`plot_directive`, which is shipped with Matplotlib_. This Sphinx extension can
be installed by installing Matplotlib. You will also need python3.6.
-Since large parts of the main documentation are stored in
-docstrings, you will need to first build NumPy, and install it so
-that the correct version is imported by
-
- >>> import numpy
+Since large parts of the main documentation are obtained from numpy via
+``import numpy`` and examining the docstrings, you will need to first build
+NumPy, and install it so that the correct version is imported.
Note that you can eg. install NumPy to a temporary location and set
the PYTHONPATH environment variable appropriately.
@@ -46,8 +44,11 @@ generate the docs, so write::
make html
in the ``doc/`` directory. If all goes well, this will generate a
-``build/html`` subdirectory containing the built documentation. Note
-that building the documentation on Windows is currently not actively
+``build/html`` subdirectory containing the built documentation. If you get
+a message about ``installed numpy != current repo git version``, you must
+either override the check by setting ``GITVER`` or re-install NumPy.
+
+Note that building the documentation on Windows is currently not actively
supported, though it should be possible. (See Sphinx_ documentation
for more information.)
diff --git a/doc/source/f2py/distutils.rst b/doc/source/f2py/distutils.rst
index fdcd38468..71f6eab5a 100644
--- a/doc/source/f2py/distutils.rst
+++ b/doc/source/f2py/distutils.rst
@@ -26,7 +26,7 @@ sources, call F2PY to construct extension modules, etc.
:mod:`numpy.distutils` extends ``distutils`` with the following features:
-* ``Extension`` class argument ``sources`` may contain Fortran source
+* :class:`Extension` class argument ``sources`` may contain Fortran source
files. In addition, the list ``sources`` may contain at most one
F2PY signature file, and then the name of an Extension module must
match with the ``<modulename>`` used in signature file. It is
@@ -37,7 +37,7 @@ sources, call F2PY to construct extension modules, etc.
to scan Fortran source files for routine signatures to construct the
wrappers to Fortran codes.
- Additional options to F2PY process can be given using ``Extension``
+ Additional options to F2PY process can be given using :class:`Extension`
class argument ``f2py_options``.
* The following new ``distutils`` commands are defined:
diff --git a/doc/source/f2py/run_main_session.dat b/doc/source/f2py/run_main_session.dat
index b9a7e1b0d..be6cacd22 100644
--- a/doc/source/f2py/run_main_session.dat
+++ b/doc/source/f2py/run_main_session.dat
@@ -8,7 +8,7 @@ Post-processing...
Building modules...
Building module "scalar"...
Wrote C/API module "scalar" to file "./scalarmodule.c"
->>> printr(r)
+>>> print(r)
{'scalar': {'h': ['/home/users/pearu/src_cvs/f2py/src/fortranobject.h'],
'csrc': ['./scalarmodule.c',
'/home/users/pearu/src_cvs/f2py/src/fortranobject.c']}}
diff --git a/doc/source/f2py/usage.rst b/doc/source/f2py/usage.rst
index 0f5068e0e..5043ec430 100644
--- a/doc/source/f2py/usage.rst
+++ b/doc/source/f2py/usage.rst
@@ -214,32 +214,7 @@ Python module ``numpy.f2py``
The current Python interface to the ``f2py`` module is not mature and
may change in the future.
-The following functions are provided by the ``numpy.f2py`` module:
-``run_main(<list>)``
- Equivalent to running::
+.. automodule:: numpy.f2py
+ :members:
- f2py <args>
-
- where ``<args>=string.join(<list>,' ')``, but in Python. Unless
- ``-h`` is used, this function returns a dictionary containing
- information on generated modules and their dependencies on source
- files. For example, the command ``f2py -m scalar scalar.f`` can be
- executed from Python as follows
-
- .. include:: run_main_session.dat
- :literal:
-
- You cannot build extension modules with this function, that is,
- using ``-c`` is not allowed. Use ``compile`` command instead, see
- below.
-
-``compile(source, modulename='untitled', extra_args='', verbose=1, source_fn=None)``
- Build extension module from Fortran 77 source string ``source``.
- Return 0 if successful.
- Note that this function actually calls ``f2py -c ..`` from shell to
- ensure safety of the current Python process.
- For example,
-
- .. include:: compile_session.dat
- :literal:
diff --git a/doc/source/dev/alignment.rst b/doc/source/reference/alignment.rst
index f067f0d03..ebc8f353c 100644
--- a/doc/source/dev/alignment.rst
+++ b/doc/source/reference/alignment.rst
@@ -1,8 +1,10 @@
.. _alignment:
+Memory Alignment
+================
Numpy Alignment Goals
-=====================
+---------------------
There are three use-cases related to memory alignment in numpy (as of 1.14):
@@ -32,8 +34,16 @@ datatype is implemented as ``struct { float real, imag; }``. This has "true"
alignment of 4 and "uint" alignment of 8 (equal to the true alignment of
``uint64``).
+Some cases where uint and true alignment are different (default gcc linux):
+ arch type true-aln uint-aln
+ ---- ---- -------- --------
+ x86_64 complex64 4 8
+ x86_64 float128 16 8
+ x86 float96 4 -
+
+
Variables in Numpy which control and describe alignment
-=======================================================
+-------------------------------------------------------
There are 4 relevant uses of the word ``align`` used in numpy:
@@ -60,7 +70,7 @@ There are 4 relevant uses of the word ``align`` used in numpy:
an analagous way to how ``IsAligned`` checks for true-alignment.
Consequences of alignment
-=========================
+-------------------------
Here is how the variables above are used:
@@ -80,17 +90,15 @@ Here is how the variables above are used:
appropriate N. Otherwise numpy copies by doing ``memcpy(dst, src, N)``.
5. Nditer code: Since this often calls the strided copy code, it must
check for "uint alignment".
- 6. Cast code: if the array is "uint aligned" this will essentially do
- ``*dst = CASTFUNC(*src)``. If not, it does
+ 6. Cast code: This checks for "true" alignment, as it does
+ ``*dst = CASTFUNC(*src)`` if aligned. Otherwise, it does
``memmove(srcval, src); dstval = CASTFUNC(srcval); memmove(dst, dstval)``
where dstval/srcval are aligned.
-Note that in principle, only "true alignment" is required for casting code.
-However, because the casting code and copy code are deeply intertwined they
-both use "uint" alignment. This should be safe assuming uint alignment is
-always larger than true alignment, though it can cause unnecessary buffering if
-an array is "true aligned" but not "uint aligned". If there is ever a big
-rewrite of this code it would be good to allow them to use different
-alignments.
+Note that the strided-copy and strided-cast code are deeply intertwined and so
+any arrays being processed by them must be both uint and true aligned, even
+though the copy-code only needs uint alignment and the cast code only true
+alignment. If there is ever a big rewrite of this code it would be good to
+allow them to use different alignments.
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index f17cb932a..39410b2a4 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -6,8 +6,15 @@ Standard array subclasses
.. currentmodule:: numpy
-The :class:`ndarray` in NumPy is a "new-style" Python
-built-in-type. Therefore, it can be inherited from (in Python or in C)
+.. note::
+
+ Subclassing a ``numpy.ndarray`` is possible but if your goal is to create
+ an array with *modified* behavior, as do dask arrays for distributed
+ computation and cupy arrays for GPU-based computation, subclassing is
+ discouraged. Instead, using numpy's
+ :ref:`dispatch mechanism <basics.dispatch>` is recommended.
+
+The :class:`ndarray` can be inherited from (in Python or in C)
if desired. Therefore, it can form a foundation for many useful
classes. Often whether to sub-class the array object or to simply use
the core array component as an internal part of a new class is a
@@ -43,10 +50,6 @@ NumPy provides several hooks that classes can customize:
.. versionadded:: 1.13
- .. note:: The API is `provisional
- <https://docs.python.org/3/glossary.html#term-provisional-api>`_,
- i.e., we do not yet guarantee backward compatibility.
-
Any class, ndarray subclass or not, can define this method or set it to
:obj:`None` in order to override the behavior of NumPy's ufuncs. This works
quite similarly to Python's ``__mul__`` and other binary operation routines.
@@ -79,7 +82,7 @@ NumPy provides several hooks that classes can customize:
:func:`~numpy.matmul`, which currently is not a Ufunc, but could be
relatively easily be rewritten as a (set of) generalized Ufuncs. The
same may happen with functions such as :func:`~numpy.median`,
- :func:`~numpy.min`, and :func:`~numpy.argsort`.
+ :func:`~numpy.amin`, and :func:`~numpy.argsort`.
Like with some other special methods in python, such as ``__hash__`` and
``__iter__``, it is possible to indicate that your class does *not*
@@ -151,6 +154,121 @@ NumPy provides several hooks that classes can customize:
:func:`__array_prepare__`, :data:`__array_priority__` mechanism
described below for ufuncs (which may eventually be deprecated).
+.. py:method:: class.__array_function__(func, types, args, kwargs)
+
+ .. versionadded:: 1.16
+
+ .. note::
+
+ - In NumPy 1.17, the protocol is enabled by default, but can be disabled
+ with ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=0``.
+ - In NumPy 1.16, you need to set the environment variable
+ ``NUMPY_EXPERIMENTAL_ARRAY_FUNCTION=1`` before importing NumPy to use
+ NumPy function overrides.
+ - Eventually, expect to ``__array_function__`` to always be enabled.
+
+ - ``func`` is an arbitrary callable exposed by NumPy's public API,
+ which was called in the form ``func(*args, **kwargs)``.
+ - ``types`` is a `collection <collections.abc.Collection>`_
+ of unique argument types from the original NumPy function call that
+ implement ``__array_function__``.
+ - The tuple ``args`` and dict ``kwargs`` are directly passed on from the
+ original call.
+
+ As a convenience for ``__array_function__`` implementors, ``types``
+ provides all argument types with an ``'__array_function__'`` attribute.
+ This allows implementors to quickly identify cases where they should defer
+ to ``__array_function__`` implementations on other arguments.
+ Implementations should not rely on the iteration order of ``types``.
+
+ Most implementations of ``__array_function__`` will start with two
+ checks:
+
+ 1. Is the given function something that we know how to overload?
+ 2. Are all arguments of a type that we know how to handle?
+
+ If these conditions hold, ``__array_function__`` should return the result
+ from calling its implementation for ``func(*args, **kwargs)``. Otherwise,
+ it should return the sentinel value ``NotImplemented``, indicating that the
+ function is not implemented by these types.
+
+ There are no general requirements on the return value from
+ ``__array_function__``, although most sensible implementations should
+ probably return array(s) with the same type as one of the function's
+ arguments.
+
+ It may also be convenient to define a custom decorators (``implements``
+ below) for registering ``__array_function__`` implementations.
+
+ .. code:: python
+
+ HANDLED_FUNCTIONS = {}
+
+ class MyArray:
+ def __array_function__(self, func, types, args, kwargs):
+ if func not in HANDLED_FUNCTIONS:
+ return NotImplemented
+ # Note: this allows subclasses that don't override
+ # __array_function__ to handle MyArray objects
+ if not all(issubclass(t, MyArray) for t in types):
+ return NotImplemented
+ return HANDLED_FUNCTIONS[func](*args, **kwargs)
+
+ def implements(numpy_function):
+ """Register an __array_function__ implementation for MyArray objects."""
+ def decorator(func):
+ HANDLED_FUNCTIONS[numpy_function] = func
+ return func
+ return decorator
+
+ @implements(np.concatenate)
+ def concatenate(arrays, axis=0, out=None):
+ ... # implementation of concatenate for MyArray objects
+
+ @implements(np.broadcast_to)
+ def broadcast_to(array, shape):
+ ... # implementation of broadcast_to for MyArray objects
+
+ Note that it is not required for ``__array_function__`` implementations to
+ include *all* of the corresponding NumPy function's optional arguments
+ (e.g., ``broadcast_to`` above omits the irrelevant ``subok`` argument).
+ Optional arguments are only passed in to ``__array_function__`` if they
+ were explicitly used in the NumPy function call.
+
+ Just like the case for builtin special methods like ``__add__``, properly
+ written ``__array_function__`` methods should always return
+ ``NotImplemented`` when an unknown type is encountered. Otherwise, it will
+ be impossible to correctly override NumPy functions from another object
+ if the operation also includes one of your objects.
+
+ For the most part, the rules for dispatch with ``__array_function__``
+ match those for ``__array_ufunc__``. In particular:
+
+ - NumPy will gather implementations of ``__array_function__`` from all
+ specified inputs and call them in order: subclasses before
+ superclasses, and otherwise left to right. Note that in some edge cases
+ involving subclasses, this differs slightly from the
+ `current behavior <https://bugs.python.org/issue30140>`_ of Python.
+ - Implementations of ``__array_function__`` indicate that they can
+ handle the operation by returning any value other than
+ ``NotImplemented``.
+ - If all ``__array_function__`` methods return ``NotImplemented``,
+ NumPy will raise ``TypeError``.
+
+ If no ``__array_function__`` methods exists, NumPy will default to calling
+ its own implementation, intended for use on NumPy arrays. This case arises,
+ for example, when all array-like arguments are Python numbers or lists.
+ (NumPy arrays do have a ``__array_function__`` method, given below, but it
+ always returns ``NotImplemented`` if any argument other than a NumPy array
+ subclass implements ``__array_function__``.)
+
+ One deviation from the current behavior of ``__array_ufunc__`` is that
+ NumPy will only call ``__array_function__`` on the *first* argument of each
+ unique type. This matches Python's `rule for calling reflected methods
+ <https://docs.python.org/3/reference/datamodel.html#object.__ror__>`_, and
+ this ensures that checking overloads has acceptable performance even when
+ there are a large number of overloaded arguments.
+
.. py:method:: class.__array_finalize__(obj)
This method is called whenever the system internally allocates a
@@ -452,7 +570,7 @@ object, then the Python code::
some code involving val
...
-calls ``val = myiter.next()`` repeatedly until :exc:`StopIteration` is
+calls ``val = next(myiter)`` repeatedly until :exc:`StopIteration` is
raised by the iterator. There are several ways to iterate over an
array that may be useful: default iteration, flat iteration, and
:math:`N`-dimensional enumeration.
diff --git a/doc/source/reference/arrays.datetime.rst b/doc/source/reference/arrays.datetime.rst
index e64d0c17e..2225eedb3 100644
--- a/doc/source/reference/arrays.datetime.rst
+++ b/doc/source/reference/arrays.datetime.rst
@@ -26,7 +26,9 @@ be either a :ref:`date unit <arrays.dtypes.dateunits>` or a
:ref:`time unit <arrays.dtypes.timeunits>`. The date units are years ('Y'),
months ('M'), weeks ('W'), and days ('D'), while the time units are
hours ('h'), minutes ('m'), seconds ('s'), milliseconds ('ms'), and
-some additional SI-prefix seconds-based units.
+some additional SI-prefix seconds-based units. The datetime64 data type
+also accepts the string "NAT", in any combination of lowercase/uppercase
+letters, for a "Not A Time" value.
.. admonition:: Example
@@ -50,6 +52,11 @@ some additional SI-prefix seconds-based units.
>>> np.datetime64('2005-02-25T03:30')
numpy.datetime64('2005-02-25T03:30')
+ NAT (not a time):
+
+ >>> numpy.datetime64('nat')
+ numpy.datetime64('NaT')
+
When creating an array of datetimes from a string, it is still possible
to automatically select the unit from the inputs, by using the
datetime type with generic units.
@@ -100,7 +107,21 @@ Datetime and Timedelta Arithmetic
NumPy allows the subtraction of two Datetime values, an operation which
produces a number with a time unit. Because NumPy doesn't have a physical
quantities system in its core, the timedelta64 data type was created
-to complement datetime64.
+to complement datetime64. The arguments for timedelta64 are a number,
+to represent the number of units, and a date/time unit, such as
+(D)ay, (M)onth, (Y)ear, (h)ours, (m)inutes, or (s)econds. The timedelta64
+data type also accepts the string "NAT" in place of the number for a "Not A Time" value.
+
+.. admonition:: Example
+
+ >>> numpy.timedelta64(1, 'D')
+ numpy.timedelta64(1,'D')
+
+ >>> numpy.timedelta64(4, 'h')
+ numpy.timedelta64(4,'h')
+
+ >>> numpy.timedelta64('nAt')
+ numpy.timedelta64('NaT')
Datetimes and Timedeltas work together to provide ways for
simple datetime calculations.
@@ -119,6 +140,15 @@ simple datetime calculations.
>>> np.timedelta64(1,'W') / np.timedelta64(1,'D')
7.0
+ >>> np.timedelta64(1,'W') % np.timedelta64(10,'D')
+ numpy.timedelta64(7,'D')
+
+ >>> numpy.datetime64('nat') - numpy.datetime64('2009-01-01')
+ numpy.timedelta64('NaT','D')
+
+ >>> numpy.datetime64('2009-01-01') + numpy.timedelta64('nat')
+ numpy.datetime64('NaT')
+
There are two Timedelta units ('Y', years and 'M', months) which are treated
specially, because how much time they represent changes depending
on when they are used. While a timedelta day unit is equivalent to
@@ -363,132 +393,4 @@ As a corollary to this change, we no longer prohibit casting between datetimes
with date units and datetimes with timeunits. With timezone naive datetimes,
the rule for casting from dates to times is no longer ambiguous.
-.. _pandas: http://pandas.pydata.org
-
-
-Differences Between 1.6 and 1.7 Datetimes
-=========================================
-
-The NumPy 1.6 release includes a more primitive datetime data type
-than 1.7. This section documents many of the changes that have taken
-place.
-
-String Parsing
-``````````````
-
-The datetime string parser in NumPy 1.6 is very liberal in what it accepts,
-and silently allows invalid input without raising errors. The parser in
-NumPy 1.7 is quite strict about only accepting ISO 8601 dates, with a few
-convenience extensions. 1.6 always creates microsecond (us) units by
-default, whereas 1.7 detects a unit based on the format of the string.
-Here is a comparison.::
-
- # NumPy 1.6.1
- >>> np.datetime64('1979-03-22')
- 1979-03-22 00:00:00
- # NumPy 1.7.0
- >>> np.datetime64('1979-03-22')
- numpy.datetime64('1979-03-22')
-
- # NumPy 1.6.1, unit default microseconds
- >>> np.datetime64('1979-03-22').dtype
- dtype('datetime64[us]')
- # NumPy 1.7.0, unit of days detected from string
- >>> np.datetime64('1979-03-22').dtype
- dtype('<M8[D]')
-
- # NumPy 1.6.1, ignores invalid part of string
- >>> np.datetime64('1979-03-2corruptedstring')
- 1979-03-02 00:00:00
- # NumPy 1.7.0, raises error for invalid input
- >>> np.datetime64('1979-03-2corruptedstring')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: Error parsing datetime string "1979-03-2corruptedstring" at position 8
-
- # NumPy 1.6.1, 'nat' produces today's date
- >>> np.datetime64('nat')
- 2012-04-30 00:00:00
- # NumPy 1.7.0, 'nat' produces not-a-time
- >>> np.datetime64('nat')
- numpy.datetime64('NaT')
-
- # NumPy 1.6.1, 'garbage' produces today's date
- >>> np.datetime64('garbage')
- 2012-04-30 00:00:00
- # NumPy 1.7.0, 'garbage' raises an exception
- >>> np.datetime64('garbage')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: Error parsing datetime string "garbage" at position 0
-
- # NumPy 1.6.1, can't specify unit in scalar constructor
- >>> np.datetime64('1979-03-22T19:00', 'h')
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: function takes at most 1 argument (2 given)
- # NumPy 1.7.0, unit in scalar constructor
- >>> np.datetime64('1979-03-22T19:00', 'h')
- numpy.datetime64('1979-03-22T19:00-0500','h')
-
- # NumPy 1.6.1, reads ISO 8601 strings w/o TZ as UTC
- >>> np.array(['1979-03-22T19:00'], dtype='M8[h]')
- array([1979-03-22 19:00:00], dtype=datetime64[h])
- # NumPy 1.7.0, reads ISO 8601 strings w/o TZ as local (ISO specifies this)
- >>> np.array(['1979-03-22T19:00'], dtype='M8[h]')
- array(['1979-03-22T19-0500'], dtype='datetime64[h]')
-
- # NumPy 1.6.1, doesn't parse all ISO 8601 strings correctly
- >>> np.array(['1979-03-22T12'], dtype='M8[h]')
- array([1979-03-22 00:00:00], dtype=datetime64[h])
- >>> np.array(['1979-03-22T12:00'], dtype='M8[h]')
- array([1979-03-22 12:00:00], dtype=datetime64[h])
- # NumPy 1.7.0, handles this case correctly
- >>> np.array(['1979-03-22T12'], dtype='M8[h]')
- array(['1979-03-22T12-0500'], dtype='datetime64[h]')
- >>> np.array(['1979-03-22T12:00'], dtype='M8[h]')
- array(['1979-03-22T12-0500'], dtype='datetime64[h]')
-
-Unit Conversion
-```````````````
-
-The 1.6 implementation of datetime does not convert between units correctly.::
-
- # NumPy 1.6.1, the representation value is untouched
- >>> np.array(['1979-03-22'], dtype='M8[D]')
- array([1979-03-22 00:00:00], dtype=datetime64[D])
- >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]')
- array([2250-08-01 00:00:00], dtype=datetime64[M])
- # NumPy 1.7.0, the representation is scaled accordingly
- >>> np.array(['1979-03-22'], dtype='M8[D]')
- array(['1979-03-22'], dtype='datetime64[D]')
- >>> np.array(['1979-03-22'], dtype='M8[D]').astype('M8[M]')
- array(['1979-03'], dtype='datetime64[M]')
-
-Datetime Arithmetic
-```````````````````
-
-The 1.6 implementation of datetime only works correctly for a small subset of
-arithmetic operations. Here we show some simple cases.::
-
- # NumPy 1.6.1, produces invalid results if units are incompatible
- >>> a = np.array(['1979-03-22T12'], dtype='M8[h]')
- >>> b = np.array([3*60], dtype='m8[m]')
- >>> a + b
- array([1970-01-01 00:00:00.080988], dtype=datetime64[us])
- # NumPy 1.7.0, promotes to higher-resolution unit
- >>> a = np.array(['1979-03-22T12'], dtype='M8[h]')
- >>> b = np.array([3*60], dtype='m8[m]')
- >>> a + b
- array(['1979-03-22T15:00-0500'], dtype='datetime64[m]')
-
- # NumPy 1.6.1, arithmetic works if everything is microseconds
- >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]')
- >>> b = np.array([3*60*60*1000000], dtype='m8[us]')
- >>> a + b
- array([1979-03-22 15:00:00], dtype=datetime64[us])
- # NumPy 1.7.0
- >>> a = np.array(['1979-03-22T12:00'], dtype='M8[us]')
- >>> b = np.array([3*60*60*1000000], dtype='m8[us]')
- >>> a + b
- array(['1979-03-22T15:00:00.000000-0500'], dtype='datetime64[us]')
+.. _pandas: http://pandas.pydata.org \ No newline at end of file
diff --git a/doc/source/reference/arrays.dtypes.rst b/doc/source/reference/arrays.dtypes.rst
index f2072263f..ab743a8ee 100644
--- a/doc/source/reference/arrays.dtypes.rst
+++ b/doc/source/reference/arrays.dtypes.rst
@@ -14,7 +14,7 @@ following aspects of the data:
1. Type of the data (integer, float, Python object, etc.)
2. Size of the data (how many bytes is in *e.g.* the integer)
3. Byte order of the data (:term:`little-endian` or :term:`big-endian`)
-4. If the data type is :term:`structured`, an aggregate of other
+4. If the data type is :term:`structured data type`, an aggregate of other
data types, (*e.g.*, describing an array item consisting of
an integer and a float),
@@ -42,7 +42,7 @@ needed in NumPy.
pair: dtype; field
Structured data types are formed by creating a data type whose
-:term:`fields` contain other data types. Each field has a name by
+:term:`field` contain other data types. Each field has a name by
which it can be :ref:`accessed <arrays.indexing.fields>`. The parent data
type should be of sufficient size to contain all its fields; the
parent is nearly always based on the :class:`void` type which allows
@@ -145,7 +145,7 @@ Array-scalar types
This is true for their sub-classes as well.
Note that not all data-type information can be supplied with a
- type-object: for example, :term:`flexible` data-types have
+ type-object: for example, `flexible` data-types have
a default *itemsize* of 0, and require an explicitly given size
to be useful.
@@ -511,7 +511,7 @@ Endianness of this data:
dtype.byteorder
-Information about sub-data-types in a :term:`structured` data type:
+Information about sub-data-types in a :term:`structured data type`:
.. autosummary::
:toctree: generated/
@@ -538,6 +538,7 @@ Attributes providing additional information:
dtype.isnative
dtype.descr
dtype.alignment
+ dtype.base
Methods
diff --git a/doc/source/reference/arrays.indexing.rst b/doc/source/reference/arrays.indexing.rst
index 62d36e28c..8ec8d8330 100644
--- a/doc/source/reference/arrays.indexing.rst
+++ b/doc/source/reference/arrays.indexing.rst
@@ -3,6 +3,10 @@
Indexing
========
+.. seealso::
+
+ :ref:`Indexing basics <basics.indexing>`
+
.. sectionauthor:: adapted from "Guide to NumPy" by Travis E. Oliphant
.. currentmodule:: numpy
@@ -57,6 +61,17 @@ interpreted as counting from the end of the array (*i.e.*, if
All arrays generated by basic slicing are always :term:`views <view>`
of the original array.
+.. note::
+
+ NumPy slicing creates a :term:`view` instead of a copy as in the case of
+ builtin Python sequences such as string, tuple and list.
+ Care must be taken when extracting
+ a small portion from a large array which becomes useless after the
+ extraction, because the small portion extracted contains a reference
+ to the large original array whose memory will not be released until
+ all arrays derived from it are garbage-collected. In such cases an
+ explicit ``copy()`` is recommended.
+
The standard rules of sequence slicing apply to basic slicing on a
per-dimension basis (including using a step index). Some useful
concepts to remember include:
@@ -111,9 +126,10 @@ concepts to remember include:
[5],
[6]]])
-- :const:`Ellipsis` expand to the number of ``:`` objects needed to
- make a selection tuple of the same length as ``x.ndim``. There may
- only be a single ellipsis present.
+- :const:`Ellipsis` expands to the number of ``:`` objects needed for the
+ selection tuple to index all dimensions. In most cases, this means that
+ length of the expanded selection tuple is ``x.ndim``. There may only be a
+ single ellipsis present.
.. admonition:: Example
@@ -513,14 +529,10 @@ only the part of the data in the specified field. Also
:ref:`record array <arrays.classes.rec>` scalars can be "indexed" this way.
Indexing into a structured array can also be done with a list of field names,
-*e.g.* ``x[['field-name1','field-name2']]``. Currently this returns a new
-array containing a copy of the values in the fields specified in the list.
-As of NumPy 1.7, returning a copy is being deprecated in favor of returning
-a view. A copy will continue to be returned for now, but a FutureWarning
-will be issued when writing to the copy. If you depend on the current
-behavior, then we suggest copying the returned array explicitly, i.e. use
-x[['field-name1','field-name2']].copy(). This will work with both past and
-future versions of NumPy.
+*e.g.* ``x[['field-name1','field-name2']]``. As of NumPy 1.16 this returns a
+view containing only those fields. In older versions of numpy it returned a
+copy. See the user guide section on :ref:`structured_arrays` for more
+information on multifield indexing.
If the accessed field is a sub-array, the dimensions of the sub-array
are appended to the shape of the result.
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index 4c8bbf66d..8f431bc9c 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -9,7 +9,7 @@ The N-dimensional array (:class:`ndarray`)
An :class:`ndarray` is a (usually fixed-size) multidimensional
container of items of the same type and size. The number of dimensions
and items in an array is defined by its :attr:`shape <ndarray.shape>`,
-which is a :class:`tuple` of *N* positive integers that specify the
+which is a :class:`tuple` of *N* non-negative integers that specify the
sizes of each dimension. The type of items in the array is specified by
a separate :ref:`data-type object (dtype) <arrays.dtypes>`, one of which
is associated with each ndarray.
@@ -82,10 +82,12 @@ Indexing arrays
Arrays can be indexed using an extended Python slicing syntax,
``array[selection]``. Similar syntax is also used for accessing
-fields in a :ref:`structured array <arrays.dtypes.field>`.
+fields in a :term:`structured data type`.
.. seealso:: :ref:`Array Indexing <arrays.indexing>`.
+.. _memory-layout:
+
Internal memory layout of an ndarray
====================================
@@ -127,7 +129,7 @@ strided scheme, and correspond to memory that can be *addressed* by the strides:
where :math:`d_j` `= self.shape[j]`.
Both the C and Fortran orders are :term:`contiguous`, *i.e.,*
-:term:`single-segment`, memory layouts, in which every part of the
+single-segment, memory layouts, in which every part of the
memory block can be accessed by some combination of the indices.
While a C-style and Fortran-style contiguous array, which has the corresponding
@@ -143,14 +145,15 @@ different. This can happen in two cases:
considered C-style and Fortran-style contiguous.
Point 1. means that ``self`` and ``self.squeeze()`` always have the same
-contiguity and :term:`aligned` flags value. This also means that even a high
-dimensional array could be C-style and Fortran-style contiguous at the same
-time.
+contiguity and ``aligned`` flags value. This also means
+that even a high dimensional array could be C-style and Fortran-style
+contiguous at the same time.
.. index:: aligned
An array is considered aligned if the memory offsets for all elements and the
-base offset itself is a multiple of `self.itemsize`.
+base offset itself is a multiple of `self.itemsize`. Understanding
+`memory-alignment` leads to better performance on most hardware.
.. note::
@@ -409,6 +412,7 @@ be performed.
.. autosummary::
:toctree: generated/
+ ndarray.max
ndarray.argmax
ndarray.min
ndarray.argmin
@@ -440,7 +444,7 @@ Each of the arithmetic operations (``+``, ``-``, ``*``, ``/``, ``//``,
``%``, ``divmod()``, ``**`` or ``pow()``, ``<<``, ``>>``, ``&``,
``^``, ``|``, ``~``) and the comparisons (``==``, ``<``, ``>``,
``<=``, ``>=``, ``!=``) is equivalent to the corresponding
-:term:`universal function` (or :term:`ufunc` for short) in NumPy. For
+universal function (or :term:`ufunc` for short) in NumPy. For
more information, see the section on :ref:`Universal Functions
<ufuncs>`.
@@ -461,12 +465,12 @@ Truth value of an array (:func:`bool()`):
.. autosummary::
:toctree: generated/
- ndarray.__nonzero__
+ ndarray.__bool__
.. note::
Truth-value testing of an array invokes
- :meth:`ndarray.__nonzero__`, which raises an error if the number of
+ :meth:`ndarray.__bool__`, which raises an error if the number of
elements in the array is larger than 1, because the truth value
of such arrays is ambiguous. Use :meth:`.any() <ndarray.any>` and
:meth:`.all() <ndarray.all>` instead to be clear about what is meant
@@ -492,7 +496,6 @@ Arithmetic:
ndarray.__add__
ndarray.__sub__
ndarray.__mul__
- ndarray.__div__
ndarray.__truediv__
ndarray.__floordiv__
ndarray.__mod__
@@ -517,7 +520,7 @@ Arithmetic:
``__r{op}__`` special methods are not directly defined.
- The functions called to implement many arithmetic special methods
- for arrays can be modified using :func:`set_numeric_ops`.
+ for arrays can be modified using :class:`__array_ufunc__ <numpy.class.__array_ufunc__>`.
Arithmetic, in-place:
@@ -527,7 +530,6 @@ Arithmetic, in-place:
ndarray.__iadd__
ndarray.__isub__
ndarray.__imul__
- ndarray.__idiv__
ndarray.__itruediv__
ndarray.__ifloordiv__
ndarray.__imod__
@@ -597,19 +599,17 @@ Container customization: (see :ref:`Indexing <arrays.indexing>`)
ndarray.__setitem__
ndarray.__contains__
-Conversion; the operations :func:`complex()`, :func:`int()`,
-:func:`long()`, :func:`float()`, :func:`oct()`, and
-:func:`hex()`. They work only on arrays that have one element in them
+Conversion; the operations :func:`int()`, :func:`float()` and
+:func:`complex()`.
+. They work only on arrays that have one element in them
and return the appropriate scalar.
.. autosummary::
:toctree: generated/
ndarray.__int__
- ndarray.__long__
ndarray.__float__
- ndarray.__oct__
- ndarray.__hex__
+ ndarray.__complex__
String representations:
diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst
index fa8183f75..7dab09a71 100644
--- a/doc/source/reference/arrays.nditer.rst
+++ b/doc/source/reference/arrays.nditer.rst
@@ -115,13 +115,18 @@ context is exited.
array([[ 0, 2, 4],
[ 6, 8, 10]])
+If you are writing code that needs to support older versions of numpy,
+note that prior to 1.15, :class:`nditer` was not a context manager and
+did not have a `close` method. Instead it relied on the destructor to
+initiate the writeback of the buffer.
+
Using an External Loop
----------------------
In all the examples so far, the elements of `a` are provided by the
iterator one at a time, because all the looping logic is internal to the
-iterator. While this is simple and convenient, it is not very efficient. A
-better approach is to move the one-dimensional innermost loop into your
+iterator. While this is simple and convenient, it is not very efficient.
+A better approach is to move the one-dimensional innermost loop into your
code, external to the iterator. This way, NumPy's vectorized operations
can be used on larger chunks of the elements being visited.
@@ -156,41 +161,29 @@ element in a computation. For example, you may want to visit the
elements of an array in memory order, but use a C-order, Fortran-order,
or multidimensional index to look up values in a different array.
-The Python iterator protocol doesn't have a natural way to query these
-additional values from the iterator, so we introduce an alternate syntax
-for iterating with an :class:`nditer`. This syntax explicitly works
-with the iterator object itself, so its properties are readily accessible
-during iteration. With this looping construct, the current value is
-accessible by indexing into the iterator, and the index being tracked
-is the property `index` or `multi_index` depending on what was requested.
-
-The Python interactive interpreter unfortunately prints out the
-values of expressions inside the while loop during each iteration of the
-loop. We have modified the output in the examples using this looping
-construct in order to be more readable.
+The index is tracked by the iterator object itself, and accessible
+through the `index` or `multi_index` properties, depending on what was
+requested. The examples below show printouts demonstrating the
+progression of the index:
.. admonition:: Example
>>> a = np.arange(6).reshape(2,3)
>>> it = np.nditer(a, flags=['f_index'])
- >>> while not it.finished:
- ... print("%d <%d>" % (it[0], it.index), end=' ')
- ... it.iternext()
+ >>> for x in it:
+ ... print("%d <%d>" % (x, it.index), end=' ')
...
0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
>>> it = np.nditer(a, flags=['multi_index'])
- >>> while not it.finished:
- ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
- ... it.iternext()
+ >>> for x in it:
+ ... print("%d <%s>" % (x, it.multi_index), end=' ')
...
0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
- >>> it = np.nditer(a, flags=['multi_index'], op_flags=['writeonly'])
- >>> with it:
- .... while not it.finished:
- ... it[0] = it.multi_index[1] - it.multi_index[0]
- ... it.iternext()
+ >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it:
+ ... for x in it:
+ ... x[...] = it.multi_index[1] - it.multi_index[0]
...
>>> a
array([[ 0, 1, 2],
@@ -199,7 +192,7 @@ construct in order to be more readable.
Tracking an index or multi-index is incompatible with using an external
loop, because it requires a different index value per element. If
you try to combine these flags, the :class:`nditer` object will
-raise an exception
+raise an exception.
.. admonition:: Example
@@ -209,6 +202,42 @@ raise an exception
File "<stdin>", line 1, in <module>
ValueError: Iterator flag EXTERNAL_LOOP cannot be used if an index or multi-index is being tracked
+Alternative Looping and Element Access
+--------------------------------------
+
+To make its properties more readily accessible during iteration,
+:class:`nditer` has an alternative syntax for iterating, which works
+explicitly with the iterator object itself. With this looping construct,
+the current value is accessible by indexing into the iterator. Other
+properties, such as tracked indices remain as before. The examples below
+produce identical results to the ones in the previous section.
+
+.. admonition:: Example
+
+ >>> a = np.arange(6).reshape(2,3)
+ >>> it = np.nditer(a, flags=['f_index'])
+ >>> while not it.finished:
+ ... print("%d <%d>" % (it[0], it.index), end=' ')
+ ... it.iternext()
+ ...
+ 0 <0> 1 <2> 2 <4> 3 <1> 4 <3> 5 <5>
+
+ >>> it = np.nditer(a, flags=['multi_index'])
+ >>> while not it.finished:
+ ... print("%d <%s>" % (it[0], it.multi_index), end=' ')
+ ... it.iternext()
+ ...
+ 0 <(0, 0)> 1 <(0, 1)> 2 <(0, 2)> 3 <(1, 0)> 4 <(1, 1)> 5 <(1, 2)>
+
+ >>> with np.nditer(a, flags=['multi_index'], op_flags=['writeonly']) as it:
+ ... while not it.finished:
+ ... it[0] = it.multi_index[1] - it.multi_index[0]
+ ... it.iternext()
+ ...
+ >>> a
+ array([[ 0, 1, 2],
+ [-1, 0, 1]])
+
Buffering the Array Elements
----------------------------
diff --git a/doc/source/reference/arrays.scalars.rst b/doc/source/reference/arrays.scalars.rst
index 9c4f05f75..d27d61e2c 100644
--- a/doc/source/reference/arrays.scalars.rst
+++ b/doc/source/reference/arrays.scalars.rst
@@ -177,7 +177,7 @@ Any Python object:
.. note::
- The data actually stored in :term:`object arrays <object array>`
+ The data actually stored in object arrays
(*i.e.*, arrays having dtype :class:`object_`) are references to
Python objects, not the objects themselves. Hence, object arrays
behave more like usual Python :class:`lists <list>`, in the sense
@@ -188,8 +188,10 @@ Any Python object:
on item access, but instead returns the actual object that
the array item refers to.
-The following data types are :term:`flexible`. They have no predefined
-size: the data they describe can be of different length in different
+.. index:: flexible
+
+The following data types are **flexible**: they have no predefined
+size and the data they describe can be of different length in different
arrays. (In the character codes ``#`` is an integer denoting how many
elements the data type consists of.)
diff --git a/doc/source/reference/c-api.array.rst b/doc/source/reference/c-api/array.rst
index 9265b1a97..08bf06b00 100644
--- a/doc/source/reference/c-api.array.rst
+++ b/doc/source/reference/c-api/array.rst
@@ -20,27 +20,44 @@ Array API
Array structure and data access
-------------------------------
-These macros all access the :c:type:`PyArrayObject` structure members. The input
-argument, arr, can be any :c:type:`PyObject *<PyObject>` that is directly interpretable
-as a :c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type` and its
-sub-types).
+These macros access the :c:type:`PyArrayObject` structure members and are
+defined in ``ndarraytypes.h``. The input argument, *arr*, can be any
+:c:type:`PyObject *<PyObject>` that is directly interpretable as a
+:c:type:`PyArrayObject *` (any instance of the :c:data:`PyArray_Type`
+and itssub-types).
.. c:function:: int PyArray_NDIM(PyArrayObject *arr)
The number of dimensions in the array.
-.. c:function:: npy_intp *PyArray_DIMS(PyArrayObject *arr)
+.. c:function:: int PyArray_FLAGS(PyArrayObject* arr)
- Returns a pointer to the dimensions/shape of the array. The
- number of elements matches the number of dimensions
- of the array.
+ Returns an integer representing the :ref:`array-flags<array-flags>`.
-.. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr)
+.. c:function:: int PyArray_TYPE(PyArrayObject* arr)
+
+ Return the (builtin) typenumber for the elements of this array.
+
+.. c:function:: int PyArray_SETITEM( \
+ PyArrayObject* arr, void* itemptr, PyObject* obj)
+
+ Convert obj and place it in the ndarray, *arr*, at the place
+ pointed to by itemptr. Return -1 if an error occurs or 0 on
+ success.
+
+.. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags)
.. versionadded:: 1.7
- A synonym for PyArray_DIMS, named to be consistent with the
- 'shape' usage within Python.
+ Enables the specified array flags. This function does no validation,
+ and assumes that you know what you're doing.
+
+.. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags)
+
+ .. versionadded:: 1.7
+
+ Clears the specified array flags. This function does no validation,
+ and assumes that you know what you're doing.
.. c:function:: void *PyArray_DATA(PyArrayObject *arr)
@@ -53,6 +70,19 @@ sub-types).
array then be sure you understand how to access the data in the
array to avoid memory and/or alignment problems.
+.. c:function:: npy_intp *PyArray_DIMS(PyArrayObject *arr)
+
+ Returns a pointer to the dimensions/shape of the array. The
+ number of elements matches the number of dimensions
+ of the array. Can return ``NULL`` for 0-dimensional arrays.
+
+.. c:function:: npy_intp *PyArray_SHAPE(PyArrayObject *arr)
+
+ .. versionadded:: 1.7
+
+ A synonym for :c:func:`PyArray_DIMS`, named to be consistent with the
+ `shape <numpy.ndarray.shape>` usage within Python.
+
.. c:function:: npy_intp *PyArray_STRIDES(PyArrayObject* arr)
Returns a pointer to the strides of the array. The
@@ -67,6 +97,27 @@ sub-types).
Return the stride in the *n* :math:`^{\textrm{th}}` dimension.
+.. c:function:: npy_intp PyArray_ITEMSIZE(PyArrayObject* arr)
+
+ Return the itemsize for the elements of this array.
+
+ Note that, in the old API that was deprecated in version 1.7, this function
+ had the return type ``int``.
+
+.. c:function:: npy_intp PyArray_SIZE(PyArrayObject* arr)
+
+ Returns the total size (in number of elements) of the array.
+
+.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
+
+ Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
+ returns the total number of elements in the array. Safer version
+ of :c:func:`PyArray_SIZE` (*obj*).
+
+.. c:function:: npy_intp PyArray_NBYTES(PyArrayObject* arr)
+
+ Returns the total number of bytes consumed by the array.
+
.. c:function:: PyObject *PyArray_BASE(PyArrayObject* arr)
This returns the base object of the array. In most cases, this
@@ -93,60 +144,12 @@ sub-types).
A synonym for PyArray_DESCR, named to be consistent with the
'dtype' usage within Python.
-.. c:function:: void PyArray_ENABLEFLAGS(PyArrayObject* arr, int flags)
-
- .. versionadded:: 1.7
-
- Enables the specified array flags. This function does no validation,
- and assumes that you know what you're doing.
-
-.. c:function:: void PyArray_CLEARFLAGS(PyArrayObject* arr, int flags)
-
- .. versionadded:: 1.7
-
- Clears the specified array flags. This function does no validation,
- and assumes that you know what you're doing.
-
-.. c:function:: int PyArray_FLAGS(PyArrayObject* arr)
-
-.. c:function:: npy_intp PyArray_ITEMSIZE(PyArrayObject* arr)
-
- Return the itemsize for the elements of this array.
-
- Note that, in the old API that was deprecated in version 1.7, this function
- had the return type ``int``.
-
-.. c:function:: int PyArray_TYPE(PyArrayObject* arr)
-
- Return the (builtin) typenumber for the elements of this array.
-
.. c:function:: PyObject *PyArray_GETITEM(PyArrayObject* arr, void* itemptr)
- Get a Python object of a builtin type from the ndarray, *arr*,
+ Get a Python object of a builtin type from the ndarray, *arr*,
at the location pointed to by itemptr. Return ``NULL`` on failure.
-
- `numpy.ndarray.item` is identical to PyArray_GETITEM.
-
-.. c:function:: int PyArray_SETITEM( \
- PyArrayObject* arr, void* itemptr, PyObject* obj)
-
- Convert obj and place it in the ndarray, *arr*, at the place
- pointed to by itemptr. Return -1 if an error occurs or 0 on
- success.
-
-.. c:function:: npy_intp PyArray_SIZE(PyArrayObject* arr)
-
- Returns the total size (in number of elements) of the array.
-
-.. c:function:: npy_intp PyArray_Size(PyArrayObject* obj)
- Returns 0 if *obj* is not a sub-class of ndarray. Otherwise,
- returns the total number of elements in the array. Safer version
- of :c:func:`PyArray_SIZE` (*obj*).
-
-.. c:function:: npy_intp PyArray_NBYTES(PyArrayObject* arr)
-
- Returns the total number of bytes consumed by the array.
+ `numpy.ndarray.item` is identical to PyArray_GETITEM.
Data access
@@ -199,10 +202,11 @@ From scratch
^^^^^^^^^^^^
.. c:function:: PyObject* PyArray_NewFromDescr( \
- PyTypeObject* subtype, PyArray_Descr* descr, int nd, npy_intp* dims, \
- npy_intp* strides, void* data, int flags, PyObject* obj)
+ PyTypeObject* subtype, PyArray_Descr* descr, int nd, npy_intp const* dims, \
+ npy_intp const* strides, void* data, int flags, PyObject* obj)
- This function steals a reference to *descr*.
+ This function steals a reference to *descr*. The easiest way to get one
+ is using :c:func:`PyArray_DescrFromType`.
This is the main array creation function. Most new arrays are
created with this flexible function.
@@ -216,11 +220,13 @@ From scratch
:c:data:`&PyArray_Type<PyArray_Type>`, then *obj* is the object to pass to
the :obj:`~numpy.class.__array_finalize__` method of the subclass.
- If *data* is ``NULL``, then new memory will be allocated and *flags*
- can be non-zero to indicate a Fortran-style contiguous array. If
- *data* is not ``NULL``, then it is assumed to point to the memory
+ If *data* is ``NULL``, then new unitinialized memory will be allocated and
+ *flags* can be non-zero to indicate a Fortran-style contiguous array. Use
+ :c:func:`PyArray_FILLWBYTE` to initialize the memory.
+
+ If *data* is not ``NULL``, then it is assumed to point to the memory
to be used for the array and the *flags* argument is used as the
- new flags for the array (except the state of :c:data:`NPY_OWNDATA`,
+ new flags for the array (except the state of :c:data:`NPY_ARRAY_OWNDATA`,
:c:data:`NPY_ARRAY_WRITEBACKIFCOPY` and :c:data:`NPY_ARRAY_UPDATEIFCOPY`
flags of the new array will be reset).
@@ -232,6 +238,12 @@ From scratch
provided *dims* and *strides* are copied into newly allocated
dimension and strides arrays for the new array object.
+ :c:func:`PyArray_CheckStrides` can help verify non- ``NULL`` stride
+ information.
+
+ If ``data`` is provided, it must stay alive for the life of the array. One
+ way to manage this is through :c:func:`PyArray_SetBaseObject`
+
.. c:function:: PyObject* PyArray_NewLikeArray( \
PyArrayObject* prototype, NPY_ORDER order, PyArray_Descr* descr, \
int subok)
@@ -257,8 +269,9 @@ From scratch
base-class array.
.. c:function:: PyObject* PyArray_New( \
- PyTypeObject* subtype, int nd, npy_intp* dims, int type_num, \
- npy_intp* strides, void* data, int itemsize, int flags, PyObject* obj)
+ PyTypeObject* subtype, int nd, npy_intp const* dims, int type_num, \
+ npy_intp const* strides, void* data, int itemsize, int flags, \
+ PyObject* obj)
This is similar to :c:func:`PyArray_NewFromDescr` (...) except you
specify the data-type descriptor with *type_num* and *itemsize*,
@@ -279,29 +292,40 @@ From scratch
are passed in they must be consistent with the dimensions, the
itemsize, and the data of the array.
-.. c:function:: PyObject* PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+.. c:function:: PyObject* PyArray_SimpleNew(int nd, npy_intp const* dims, int typenum)
Create a new uninitialized array of type, *typenum*, whose size in
- each of *nd* dimensions is given by the integer array, *dims*.
- This function cannot be used to create a flexible-type array (no
- itemsize given).
+ each of *nd* dimensions is given by the integer array, *dims*.The memory
+ for the array is uninitialized (unless typenum is :c:data:`NPY_OBJECT`
+ in which case each element in the array is set to NULL). The
+ *typenum* argument allows specification of any of the builtin
+ data-types such as :c:data:`NPY_FLOAT` or :c:data:`NPY_LONG`. The
+ memory for the array can be set to zero if desired using
+ :c:func:`PyArray_FILLWBYTE` (return_object, 0).This function cannot be
+ used to create a flexible-type array (no itemsize given).
.. c:function:: PyObject* PyArray_SimpleNewFromData( \
- int nd, npy_intp* dims, int typenum, void* data)
+ int nd, npy_intp const* dims, int typenum, void* data)
Create an array wrapper around *data* pointed to by the given
pointer. The array flags will have a default that the data area is
well-behaved and C-style contiguous. The shape of the array is
given by the *dims* c-array of length *nd*. The data-type of the
- array is indicated by *typenum*.
+ array is indicated by *typenum*. If data comes from another
+ reference-counted Python object, the reference count on this object
+ should be increased after the pointer is passed in, and the base member
+ of the returned ndarray should point to the Python object that owns
+ the data. This will ensure that the provided memory is not
+ freed while the returned array is in existence. To free memory as soon
+ as the ndarray is deallocated, set the OWNDATA flag on the returned ndarray.
.. c:function:: PyObject* PyArray_SimpleNewFromDescr( \
- int nd, npy_intp* dims, PyArray_Descr* descr)
+ int nd, npy_int const* dims, PyArray_Descr* descr)
- This function steals a reference to *descr* if it is not NULL.
+ This function steals a reference to *descr*.
- Create a new array with the provided data-type descriptor, *descr*
- , of the shape determined by *nd* and *dims*.
+ Create a new array with the provided data-type descriptor, *descr*,
+ of the shape determined by *nd* and *dims*.
.. c:function:: PyArray_FILLWBYTE(PyObject* obj, int val)
@@ -310,7 +334,7 @@ From scratch
This macro calls memset, so obj must be contiguous.
.. c:function:: PyObject* PyArray_Zeros( \
- int nd, npy_intp* dims, PyArray_Descr* dtype, int fortran)
+ int nd, npy_intp const* dims, PyArray_Descr* dtype, int fortran)
Construct a new *nd* -dimensional array with shape given by *dims*
and data type given by *dtype*. If *fortran* is non-zero, then a
@@ -319,13 +343,13 @@ From scratch
corresponds to :c:type:`NPY_OBJECT` ).
.. c:function:: PyObject* PyArray_ZEROS( \
- int nd, npy_intp* dims, int type_num, int fortran)
+ int nd, npy_intp const* dims, int type_num, int fortran)
Macro form of :c:func:`PyArray_Zeros` which takes a type-number instead
of a data-type object.
.. c:function:: PyObject* PyArray_Empty( \
- int nd, npy_intp* dims, PyArray_Descr* dtype, int fortran)
+ int nd, npy_intp const* dims, PyArray_Descr* dtype, int fortran)
Construct a new *nd* -dimensional array with shape given by *dims*
and data type given by *dtype*. If *fortran* is non-zero, then a
@@ -335,7 +359,7 @@ From scratch
filled with :c:data:`Py_None`.
.. c:function:: PyObject* PyArray_EMPTY( \
- int nd, npy_intp* dims, int typenum, int fortran)
+ int nd, npy_intp const* dims, int typenum, int fortran)
Macro form of :c:func:`PyArray_Empty` which takes a type-number,
*typenum*, instead of a data-type object.
@@ -407,10 +431,6 @@ From other objects
the array is constructed that way. Almost always this
parameter is ``NULL``.
- In versions 1.6 and earlier of NumPy, the following flags
- did not have the ``_ARRAY_`` macro namespace in them. That form
- of the constant names is deprecated in 1.7.
-
.. c:var:: NPY_ARRAY_C_CONTIGUOUS
Make sure the returned array is C-style contiguous
@@ -505,6 +525,11 @@ From other objects
:c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
:c:data:`NPY_ARRAY_ALIGNED`
+ .. c:var:: NPY_ARRAY_OUT_ARRAY
+
+ :c:data:`NPY_ARRAY_C_CONTIGUOUS` \| :c:data:`NPY_ARRAY_ALIGNED` \|
+ :c:data:`NPY_ARRAY_WRITEABLE`
+
.. c:var:: NPY_ARRAY_OUT_FARRAY
:c:data:`NPY_ARRAY_F_CONTIGUOUS` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
@@ -568,8 +593,9 @@ From other objects
return NULL;
}
if (arr == NULL) {
+ /*
... validate/change dtype, validate flags, ndim, etc ...
- // Could make custom strides here too
+ Could make custom strides here too */
arr = PyArray_NewFromDescr(&PyArray_Type, dtype, ndim,
dims, NULL,
fortran ? NPY_ARRAY_F_CONTIGUOUS : 0,
@@ -583,10 +609,14 @@ From other objects
}
}
else {
+ /*
... in this case the other parameters weren't filled, just
validate and possibly copy arr itself ...
+ */
}
+ /*
... use arr ...
+ */
.. c:function:: PyObject* PyArray_CheckFromAny( \
PyObject* op, PyArray_Descr* dtype, int min_depth, int max_depth, \
@@ -779,7 +809,7 @@ From other objects
PyObject* obj, int typenum, int requirements)
Combination of :c:func:`PyArray_FROM_OF` and :c:func:`PyArray_FROM_OT`
- allowing both a *typenum* and a *flags* argument to be provided..
+ allowing both a *typenum* and a *flags* argument to be provided.
.. c:function:: PyObject* PyArray_FROMANY( \
PyObject* obj, int typenum, int min, int max, int requirements)
@@ -811,17 +841,17 @@ Dealing with types
General check of Python Type
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. c:function:: PyArray_Check(op)
+.. c:function:: PyArray_Check(PyObject *op)
Evaluates true if *op* is a Python object whose type is a sub-type
of :c:data:`PyArray_Type`.
-.. c:function:: PyArray_CheckExact(op)
+.. c:function:: PyArray_CheckExact(PyObject *op)
Evaluates true if *op* is a Python object with type
:c:data:`PyArray_Type`.
-.. c:function:: PyArray_HasArrayInterface(op, out)
+.. c:function:: PyArray_HasArrayInterface(PyObject *op, PyObject *out)
If ``op`` implements any part of the array interface, then ``out``
will contain a new reference to the newly created ndarray using
@@ -967,6 +997,10 @@ argument must be a :c:type:`PyObject *<PyObject>` that can be directly interpret
called on flexible dtypes. Types that are attached to an array will always
be sized, hence the array form of this macro not existing.
+ .. versionchanged:: 1.18
+
+ For structured datatypes with no fields this function now returns False.
+
.. c:function:: PyTypeNum_ISUSERDEF(num)
.. c:function:: PyDataType_ISUSERDEF(descr)
@@ -1370,6 +1404,7 @@ Special functions for NPY_OBJECT
Returns 0 for success, -1 for failure.
+.. _array-flags:
Array flags
-----------
@@ -1547,7 +1582,7 @@ Flag checking
^^^^^^^^^^^^^
For all of these macros *arr* must be an instance of a (subclass of)
-:c:data:`PyArray_Type`, but no checking is done.
+:c:data:`PyArray_Type`.
.. c:function:: PyArray_CHKFLAGS(arr, flags)
@@ -1645,11 +1680,13 @@ Conversion
.. c:function:: PyObject* PyArray_GetField( \
PyArrayObject* self, PyArray_Descr* dtype, int offset)
- Equivalent to :meth:`ndarray.getfield<numpy.ndarray.getfield>` (*self*, *dtype*, *offset*). Return
- a new array of the given *dtype* using the data in the current
- array at a specified *offset* in bytes. The *offset* plus the
- itemsize of the new array type must be less than *self*
- ->descr->elsize or an error is raised. The same shape and strides
+ Equivalent to :meth:`ndarray.getfield<numpy.ndarray.getfield>`
+ (*self*, *dtype*, *offset*). This function `steals a reference
+ <https://docs.python.org/3/c-api/intro.html?reference-count-details>`_
+ to `PyArray_Descr` and returns a new array of the given `dtype` using
+ the data in the current array at a specified `offset` in bytes. The
+ `offset` plus the itemsize of the new array type must be less than ``self
+ ->descr->elsize`` or an error is raised. The same shape and strides
as the original array are used. Therefore, this function has the
effect of returning a field from a structured array. But, it can also
be used to select specific bytes or groups of bytes from any array
@@ -1899,22 +1936,23 @@ Item selection and manipulation
all values are clipped to the region [0, len(*op*) ).
-.. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis)
+.. c:function:: PyObject* PyArray_Sort(PyArrayObject* self, int axis, NPY_SORTKIND kind)
- Equivalent to :meth:`ndarray.sort<numpy.ndarray.sort>` (*self*, *axis*). Return an array with
- the items of *self* sorted along *axis*.
+ Equivalent to :meth:`ndarray.sort<numpy.ndarray.sort>` (*self*, *axis*, *kind*).
+ Return an array with the items of *self* sorted along *axis*. The array
+ is sorted using the algorithm denoted by *kind* , which is an integer/enum pointing
+ to the type of sorting algorithms used.
.. c:function:: PyObject* PyArray_ArgSort(PyArrayObject* self, int axis)
- Equivalent to :meth:`ndarray.argsort<numpy.ndarray.argsort>` (*self*, *axis*). Return an array of
- indices such that selection of these indices along the given
- ``axis`` would return a sorted version of *self*. If *self*
- ->descr is a data-type with fields defined, then
- self->descr->names is used to determine the sort order. A
- comparison where the first field is equal will use the second
- field and so on. To alter the sort order of a structured array, create
- a new data-type with a different order of names and construct a
- view of the array with that new data-type.
+ Equivalent to :meth:`ndarray.argsort<numpy.ndarray.argsort>` (*self*, *axis*).
+ Return an array of indices such that selection of these indices
+ along the given ``axis`` would return a sorted version of *self*. If *self* ->descr
+ is a data-type with fields defined, then self->descr->names is used
+ to determine the sort order. A comparison where the first field is equal
+ will use the second field and so on. To alter the sort order of a
+ structured array, create a new data-type with a different order of names
+ and construct a view of the array with that new data-type.
.. c:function:: PyObject* PyArray_LexSort(PyObject* sort_keys, int axis)
@@ -2018,6 +2056,17 @@ Calculation
effect that is obtained by passing in *axis* = :const:`None` in Python
(treating the array as a 1-d array).
+
+.. note::
+
+ The out argument specifies where to place the result. If out is
+ NULL, then the output array is created, otherwise the output is
+ placed in out which must be the correct size and type. A new
+ reference to the output array is always returned even when out
+ is not NULL. The caller of the routine has the responsibility
+ to ``Py_DECREF`` out if not NULL or a memory-leak will occur.
+
+
.. c:function:: PyObject* PyArray_ArgMax( \
PyArrayObject* self, int axis, PyArrayObject* out)
@@ -2030,18 +2079,6 @@ Calculation
Equivalent to :meth:`ndarray.argmin<numpy.ndarray.argmin>` (*self*, *axis*). Return the index of
the smallest element of *self* along *axis*.
-
-
-
-.. note::
-
- The out argument specifies where to place the result. If out is
- NULL, then the output array is created, otherwise the output is
- placed in out which must be the correct size and type. A new
- reference to the output array is always returned even when out
- is not NULL. The caller of the routine has the responsibility
- to ``DECREF`` out if not NULL or a memory-leak will occur.
-
.. c:function:: PyObject* PyArray_Max( \
PyArrayObject* self, int axis, PyArrayObject* out)
@@ -2328,8 +2365,8 @@ Other functions
^^^^^^^^^^^^^^^
.. c:function:: Bool PyArray_CheckStrides( \
- int elsize, int nd, npy_intp numbytes, npy_intp* dims, \
- npy_intp* newstrides)
+ int elsize, int nd, npy_intp numbytes, npy_intp const* dims, \
+ npy_intp const* newstrides)
Determine if *newstrides* is a strides array consistent with the
memory of an *nd* -dimensional array with shape ``dims`` and
@@ -2341,14 +2378,14 @@ Other functions
*elsize* refer to a single-segment array. Return :c:data:`NPY_TRUE` if
*newstrides* is acceptable, otherwise return :c:data:`NPY_FALSE`.
-.. c:function:: npy_intp PyArray_MultiplyList(npy_intp* seq, int n)
+.. c:function:: npy_intp PyArray_MultiplyList(npy_intp const* seq, int n)
-.. c:function:: int PyArray_MultiplyIntList(int* seq, int n)
+.. c:function:: int PyArray_MultiplyIntList(int const* seq, int n)
Both of these routines multiply an *n* -length array, *seq*, of
integers and return the result. No overflow checking is performed.
-.. c:function:: int PyArray_CompareLists(npy_intp* l1, npy_intp* l2, int n)
+.. c:function:: int PyArray_CompareLists(npy_intp const* l1, npy_intp const* l2, int n)
Given two *n* -length arrays of integers, *l1*, and *l2*, return
1 if the lists are identical; otherwise, return 0.
@@ -2621,22 +2658,24 @@ cost of a slight overhead.
The mode should be one of:
- * NPY_NEIGHBORHOOD_ITER_ZERO_PADDING: zero padding. Outside bounds values
- will be 0.
- * NPY_NEIGHBORHOOD_ITER_ONE_PADDING: one padding, Outside bounds values
- will be 1.
- * NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING: constant padding. Outside bounds
- values will be the same as the first item in fill_value.
- * NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING: mirror padding. Outside bounds
- values will be as if the array items were mirrored. For example, for the
- array [1, 2, 3, 4], x[-2] will be 2, x[-2] will be 1, x[4] will be 4,
- x[5] will be 1, etc...
- * NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING: circular padding. Outside bounds
- values will be as if the array was repeated. For example, for the
- array [1, 2, 3, 4], x[-2] will be 3, x[-2] will be 4, x[4] will be 1,
- x[5] will be 2, etc...
-
- If the mode is constant filling (NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING),
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_ZERO_PADDING
+ Zero padding. Outside bounds values will be 0.
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_ONE_PADDING
+ One padding, Outside bounds values will be 1.
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING
+ Constant padding. Outside bounds values will be the
+ same as the first item in fill_value.
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_MIRROR_PADDING
+ Mirror padding. Outside bounds values will be as if the
+ array items were mirrored. For example, for the array [1, 2, 3, 4],
+ x[-2] will be 2, x[-2] will be 1, x[4] will be 4, x[5] will be 1,
+ etc...
+ .. c:macro:: NPY_NEIGHBORHOOD_ITER_CIRCULAR_PADDING
+ Circular padding. Outside bounds values will be as if the array
+ was repeated. For example, for the array [1, 2, 3, 4], x[-2] will
+ be 3, x[-2] will be 4, x[4] will be 1, x[5] will be 2, etc...
+
+ If the mode is constant filling (`NPY_NEIGHBORHOOD_ITER_CONSTANT_PADDING`),
fill_value should point to an array object which holds the filling value
(the first item will be the filling value if the array contains more than
one item). For other cases, fill_value may be NULL.
@@ -2654,22 +2693,22 @@ cost of a slight overhead.
.. code-block:: c
- PyArrayIterObject \*iter;
- PyArrayNeighborhoodIterObject \*neigh_iter;
+ PyArrayIterObject *iter;
+ PyArrayNeighborhoodIterObject *neigh_iter;
iter = PyArray_IterNew(x);
- //For a 3x3 kernel
+ /*For a 3x3 kernel */
bounds = {-1, 1, -1, 1};
neigh_iter = (PyArrayNeighborhoodIterObject*)PyArrayNeighborhoodIter_New(
iter, bounds, NPY_NEIGHBORHOOD_ITER_ZERO_PADDING, NULL);
for(i = 0; i < iter->size; ++i) {
for (j = 0; j < neigh_iter->size; ++j) {
- // Walk around the item currently pointed by iter->dataptr
+ /* Walk around the item currently pointed by iter->dataptr */
PyArrayNeighborhoodIter_Next(neigh_iter);
}
- // Move to the next point of iter
+ /* Move to the next point of iter */
PyArrayIter_Next(iter);
PyArrayNeighborhoodIter_Reset(neigh_iter);
}
@@ -2758,10 +2797,7 @@ Array Scalars
*arr* is not ``NULL`` and the first element is negative then
:c:data:`NPY_INTNEG_SCALAR` is returned, otherwise
:c:data:`NPY_INTPOS_SCALAR` is returned. The possible return values
- are :c:data:`NPY_{kind}_SCALAR` where ``{kind}`` can be **INTPOS**,
- **INTNEG**, **FLOAT**, **COMPLEX**, **BOOL**, or **OBJECT**.
- :c:data:`NPY_NOSCALAR` is also an enumerated value
- :c:type:`NPY_SCALARKIND` variables can take on.
+ are the enumerated values in :c:type:`NPY_SCALARKIND`.
.. c:function:: int PyArray_CanCoerceScalar( \
char thistype, char neededtype, NPY_SCALARKIND scalar)
@@ -2849,7 +2885,10 @@ Data-type descriptors
Returns a data-type object corresponding to *typenum*. The
*typenum* can be one of the enumerated types, a character code for
- one of the enumerated types, or a user-defined type.
+ one of the enumerated types, or a user-defined type. If you want to use a
+ flexible size array, then you need to ``flexible typenum`` and set the
+ results ``elsize`` parameter to the desired size. The typenum is one of the
+ :c:data:`NPY_TYPES`.
.. c:function:: int PyArray_DescrConverter(PyObject* obj, PyArray_Descr** dtype)
@@ -2980,8 +3019,11 @@ to.
.. c:function:: int PyArray_SortkindConverter(PyObject* obj, NPY_SORTKIND* sort)
Convert Python strings into one of :c:data:`NPY_QUICKSORT` (starts
- with 'q' or 'Q') , :c:data:`NPY_HEAPSORT` (starts with 'h' or 'H'),
- or :c:data:`NPY_MERGESORT` (starts with 'm' or 'M').
+ with 'q' or 'Q'), :c:data:`NPY_HEAPSORT` (starts with 'h' or 'H'),
+ :c:data:`NPY_MERGESORT` (starts with 'm' or 'M') or :c:data:`NPY_STABLESORT`
+ (starts with 't' or 'T'). :c:data:`NPY_MERGESORT` and :c:data:`NPY_STABLESORT`
+ are aliased to each other for backwards compatibility and may refer to one
+ of several stable sorting algorithms depending on the data type.
.. c:function:: int PyArray_SearchsideConverter( \
PyObject* obj, NPY_SEARCHSIDE* side)
@@ -3209,12 +3251,16 @@ Internal Flexibility
setting a Python Error) if one of the objects being assigned is not
callable.
+ .. deprecated:: 1.16
+
.. c:function:: PyObject* PyArray_GetNumericOps(void)
Return a Python dictionary containing the callable Python objects
stored in the internal arithmetic operation table. The keys of
this dictionary are given in the explanation for :c:func:`PyArray_SetNumericOps`.
+ .. deprecated:: 1.16
+
.. c:function:: void PyArray_SetStringFunction(PyObject* op, int repr)
This function allows you to alter the tp_str and tp_repr methods
@@ -3240,19 +3286,19 @@ Memory management
Macros to allocate, free, and reallocate memory. These macros are used
internally to create arrays.
-.. c:function:: npy_intp* PyDimMem_NEW(nd)
+.. c:function:: npy_intp* PyDimMem_NEW(int nd)
-.. c:function:: PyDimMem_FREE(npy_intp* ptr)
+.. c:function:: PyDimMem_FREE(char* ptr)
-.. c:function:: npy_intp* PyDimMem_RENEW(npy_intp* ptr, npy_intp newnd)
+.. c:function:: npy_intp* PyDimMem_RENEW(void* ptr, size_t newnd)
Macros to allocate, free, and reallocate dimension and strides memory.
-.. c:function:: PyArray_malloc(nbytes)
+.. c:function:: void* PyArray_malloc(size_t nbytes)
-.. c:function:: PyArray_free(ptr)
+.. c:function:: PyArray_free(void* ptr)
-.. c:function:: PyArray_realloc(ptr, nbytes)
+.. c:function:: void* PyArray_realloc(npy_intp* ptr, size_t nbytes)
These macros use different memory allocators, depending on the
constant :c:data:`NPY_USE_PYMEM`. The system malloc is used when
@@ -3332,7 +3378,7 @@ Group 1
Useful to release the GIL only if *dtype* does not contain
arbitrary Python objects which may need the Python interpreter
- during execution of the loop. Equivalent to
+ during execution of the loop.
.. c:function:: NPY_END_THREADS_DESCR(PyArray_Descr *dtype)
@@ -3426,6 +3472,10 @@ Other constants
The maximum number of dimensions allowed in arrays.
+.. c:var:: NPY_MAXARGS
+
+ The maximum number of array arguments that can be used in functions.
+
.. c:var:: NPY_VERSION
The current version of the ndarray object (check to see if this
@@ -3454,31 +3504,31 @@ Other constants
Miscellaneous Macros
^^^^^^^^^^^^^^^^^^^^
-.. c:function:: PyArray_SAMESHAPE(a1, a2)
+.. c:function:: PyArray_SAMESHAPE(PyArrayObject *a1, PyArrayObject *a2)
Evaluates as True if arrays *a1* and *a2* have the same shape.
-.. c:function:: PyArray_MAX(a,b)
+.. c:macro:: PyArray_MAX(a,b)
Returns the maximum of *a* and *b*. If (*a*) or (*b*) are
expressions they are evaluated twice.
-.. c:function:: PyArray_MIN(a,b)
+.. c:macro:: PyArray_MIN(a,b)
Returns the minimum of *a* and *b*. If (*a*) or (*b*) are
expressions they are evaluated twice.
-.. c:function:: PyArray_CLT(a,b)
+.. c:macro:: PyArray_CLT(a,b)
-.. c:function:: PyArray_CGT(a,b)
+.. c:macro:: PyArray_CGT(a,b)
-.. c:function:: PyArray_CLE(a,b)
+.. c:macro:: PyArray_CLE(a,b)
-.. c:function:: PyArray_CGE(a,b)
+.. c:macro:: PyArray_CGE(a,b)
-.. c:function:: PyArray_CEQ(a,b)
+.. c:macro:: PyArray_CEQ(a,b)
-.. c:function:: PyArray_CNE(a,b)
+.. c:macro:: PyArray_CNE(a,b)
Implements the complex comparisons between two complex numbers
(structures with a real and imag member) using NumPy's definition
@@ -3518,24 +3568,46 @@ Enumerated Types
.. c:type:: NPY_SORTKIND
- A special variable-type which can take on the values :c:data:`NPY_{KIND}`
- where ``{KIND}`` is
+ A special variable-type which can take on different values to indicate
+ the sorting algorithm being used.
+
+ .. c:var:: NPY_QUICKSORT
+
+ .. c:var:: NPY_HEAPSORT
- **QUICKSORT**, **HEAPSORT**, **MERGESORT**
+ .. c:var:: NPY_MERGESORT
+
+ .. c:var:: NPY_STABLESORT
+
+ Used as an alias of :c:data:`NPY_MERGESORT` and vica versa.
.. c:var:: NPY_NSORTS
- Defined to be the number of sorts.
+ Defined to be the number of sorts. It is fixed at three by the need for
+ backwards compatibility, and consequently :c:data:`NPY_MERGESORT` and
+ :c:data:`NPY_STABLESORT` are aliased to each other and may refer to one
+ of several stable sorting algorithms depending on the data type.
+
.. c:type:: NPY_SCALARKIND
A special variable type indicating the number of "kinds" of
scalars distinguished in determining scalar-coercion rules. This
- variable can take on the values :c:data:`NPY_{KIND}` where ``{KIND}`` can be
+ variable can take on the values:
+
+ .. c:var:: NPY_NOSCALAR
+
+ .. c:var:: NPY_BOOL_SCALAR
+
+ .. c:var:: NPY_INTPOS_SCALAR
+
+ .. c:var:: NPY_INTNEG_SCALAR
+
+ .. c:var:: NPY_FLOAT_SCALAR
+
+ .. c:var:: NPY_COMPLEX_SCALAR
- **NOSCALAR**, **BOOL_SCALAR**, **INTPOS_SCALAR**,
- **INTNEG_SCALAR**, **FLOAT_SCALAR**, **COMPLEX_SCALAR**,
- **OBJECT_SCALAR**
+ .. c:var:: NPY_OBJECT_SCALAR
.. c:var:: NPY_NSCALARKINDS
diff --git a/doc/source/reference/c-api.config.rst b/doc/source/reference/c-api/config.rst
index 60bf61a32..05e6fe44d 100644
--- a/doc/source/reference/c-api.config.rst
+++ b/doc/source/reference/c-api/config.rst
@@ -101,3 +101,22 @@ Platform information
Returns the endianness of the current platform.
One of :c:data:`NPY_CPU_BIG`, :c:data:`NPY_CPU_LITTLE`,
or :c:data:`NPY_CPU_UNKNOWN_ENDIAN`.
+
+
+Compiler directives
+-------------------
+
+.. c:var:: NPY_LIKELY
+.. c:var:: NPY_UNLIKELY
+.. c:var:: NPY_UNUSED
+
+
+Interrupt Handling
+------------------
+
+.. c:var:: NPY_INTERRUPT_H
+.. c:var:: NPY_SIGSETJMP
+.. c:var:: NPY_SIGLONGJMP
+.. c:var:: NPY_SIGJMP_BUF
+.. c:var:: NPY_SIGINT_ON
+.. c:var:: NPY_SIGINT_OFF
diff --git a/doc/source/reference/c-api.coremath.rst b/doc/source/reference/c-api/coremath.rst
index 691f73287..7e00322f9 100644
--- a/doc/source/reference/c-api.coremath.rst
+++ b/doc/source/reference/c-api/coremath.rst
@@ -80,8 +80,9 @@ Floating point classification
Useful math constants
~~~~~~~~~~~~~~~~~~~~~
-The following math constants are available in npy_math.h. Single and extended
-precision are also available by adding the F and L suffixes respectively.
+The following math constants are available in ``npy_math.h``. Single
+and extended precision are also available by adding the ``f`` and
+``l`` suffixes respectively.
.. c:var:: NPY_E
@@ -184,7 +185,7 @@ Those can be useful for precise floating point comparison.
* NPY_FPE_INVALID
Note that :c:func:`npy_get_floatstatus_barrier` is preferable as it prevents
- agressive compiler optimizations reordering the call relative to
+ aggressive compiler optimizations reordering the call relative to
the code setting the status, which could lead to incorrect results.
.. versionadded:: 1.9.0
@@ -192,7 +193,7 @@ Those can be useful for precise floating point comparison.
.. c:function:: int npy_get_floatstatus_barrier(char*)
Get floating point status. A pointer to a local variable is passed in to
- prevent aggresive compiler optimizations from reodering this function call
+ prevent aggressive compiler optimizations from reodering this function call
relative to the code setting the status, which could lead to incorrect
results.
@@ -210,7 +211,7 @@ Those can be useful for precise floating point comparison.
Clears the floating point status. Returns the previous status mask.
Note that :c:func:`npy_clear_floatstatus_barrier` is preferable as it
- prevents agressive compiler optimizations reordering the call relative to
+ prevents aggressive compiler optimizations reordering the call relative to
the code setting the status, which could lead to incorrect results.
.. versionadded:: 1.9.0
@@ -218,7 +219,7 @@ Those can be useful for precise floating point comparison.
.. c:function:: int npy_clear_floatstatus_barrier(char*)
Clears the floating point status. A pointer to a local variable is passed in to
- prevent aggresive compiler optimizations from reodering this function call.
+ prevent aggressive compiler optimizations from reodering this function call.
Returns the previous status mask.
.. versionadded:: 1.15.0
@@ -258,7 +259,7 @@ and co.
Half-precision functions
~~~~~~~~~~~~~~~~~~~~~~~~
-.. versionadded:: 2.0.0
+.. versionadded:: 1.6.0
The header file <numpy/halffloat.h> provides functions to work with
IEEE 754-2008 16-bit floating point values. While this format is
diff --git a/doc/source/reference/c-api.deprecations.rst b/doc/source/reference/c-api/deprecations.rst
index a382017a2..a382017a2 100644
--- a/doc/source/reference/c-api.deprecations.rst
+++ b/doc/source/reference/c-api/deprecations.rst
diff --git a/doc/source/reference/c-api.dtype.rst b/doc/source/reference/c-api/dtype.rst
index 8af3a9080..72e908861 100644
--- a/doc/source/reference/c-api.dtype.rst
+++ b/doc/source/reference/c-api/dtype.rst
@@ -25,6 +25,8 @@ select the precision desired.
Enumerated Types
----------------
+.. c:var:: NPY_TYPES
+
There is a list of enumerated types defined providing the basic 24
data types plus some useful generic names. Whenever the code requires
a type number, one of these enumerated types is requested. The types
@@ -306,13 +308,45 @@ to the front of the integer name.
(unsigned) char
-.. c:type:: npy_(u)short
+.. c:type:: npy_short
+
+ short
+
+.. c:type:: npy_ushort
+
+ unsigned short
+
+.. c:type:: npy_uint
+
+ unsigned int
+
+.. c:type:: npy_int
+
+ int
+
+.. c:type:: npy_int16
+
+ 16-bit integer
+
+.. c:type:: npy_uint16
- (unsigned) short
+ 16-bit unsigned integer
-.. c:type:: npy_(u)int
+.. c:type:: npy_int32
- (unsigned) int
+ 32-bit integer
+
+.. c:type:: npy_uint32
+
+ 32-bit unsigned integer
+
+.. c:type:: npy_int64
+
+ 64-bit integer
+
+.. c:type:: npy_uint64
+
+ 64-bit unsigned integer
.. c:type:: npy_(u)long
@@ -322,22 +356,31 @@ to the front of the integer name.
(unsigned long long int)
-.. c:type:: npy_(u)intp
+.. c:type:: npy_intp
- (unsigned) Py_intptr_t (an integer that is the size of a pointer on
+ Py_intptr_t (an integer that is the size of a pointer on
+ the platform).
+
+.. c:type:: npy_uintp
+
+ unsigned Py_intptr_t (an integer that is the size of a pointer on
the platform).
(Complex) Floating point
^^^^^^^^^^^^^^^^^^^^^^^^
+.. c:type:: npy_half
+
+ 16-bit float
+
.. c:type:: npy_(c)float
- float
+ 32-bit float
.. c:type:: npy_(c)double
- double
+ 64-bit double
.. c:type:: npy_(c)longdouble
diff --git a/doc/source/reference/c-api.generalized-ufuncs.rst b/doc/source/reference/c-api/generalized-ufuncs.rst
index 2c631531f..b59f077ad 100644
--- a/doc/source/reference/c-api.generalized-ufuncs.rst
+++ b/doc/source/reference/c-api/generalized-ufuncs.rst
@@ -127,38 +127,56 @@ The formal syntax of signatures is as follows::
<Output arguments> ::= <Argument list>
<Argument list> ::= nil | <Argument> | <Argument> "," <Argument list>
<Argument> ::= "(" <Core dimension list> ")"
- <Core dimension list> ::= nil | <Core dimension name> |
- <Core dimension name> "," <Core dimension list>
- <Core dimension name> ::= valid Python variable name
-
+ <Core dimension list> ::= nil | <Core dimension> |
+ <Core dimension> "," <Core dimension list>
+ <Core dimension> ::= <Dimension name> <Dimension modifier>
+ <Dimension name> ::= valid Python variable name | valid integer
+ <Dimension modifier> ::= nil | "?"
Notes:
#. All quotes are for clarity.
-#. Core dimensions that share the same name must have the exact same size.
+#. Unmodified core dimensions that share the same name must have the same size.
Each dimension name typically corresponds to one level of looping in the
elementary function's implementation.
#. White spaces are ignored.
+#. An integer as a dimension name freezes that dimension to the value.
+#. If the name is suffixed with the "?" modifier, the dimension is a core
+ dimension only if it exists on all inputs and outputs that share it;
+ otherwise it is ignored (and replaced by a dimension of size 1 for the
+ elementary function).
Here are some examples of signatures:
-+-------------+------------------------+-----------------------------------+
-| add | ``(),()->()`` | |
-+-------------+------------------------+-----------------------------------+
-| sum1d | ``(i)->()`` | |
-+-------------+------------------------+-----------------------------------+
-| inner1d | ``(i),(i)->()`` | |
-+-------------+------------------------+-----------------------------------+
-| matmat | ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
-+-------------+------------------------+-----------------------------------+
-| vecmat | ``(n),(n,p)->(p)`` | vector-matrix multiplication |
-+-------------+------------------------+-----------------------------------+
-| matvec | ``(m,n),(n)->(m)`` | matrix-vector multiplication |
-+-------------+------------------------+-----------------------------------+
-| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
-| | | outer over the second to last, |
-| | | and loop/broadcast over the rest. |
-+-------------+------------------------+-----------------------------------+
++-------------+----------------------------+-----------------------------------+
+| name | signature | common usage |
++=============+============================+===================================+
+| add | ``(),()->()`` | binary ufunc |
++-------------+----------------------------+-----------------------------------+
+| sum1d | ``(i)->()`` | reduction |
++-------------+----------------------------+-----------------------------------+
+| inner1d | ``(i),(i)->()`` | vector-vector multiplication |
++-------------+----------------------------+-----------------------------------+
+| matmat | ``(m,n),(n,p)->(m,p)`` | matrix multiplication |
++-------------+----------------------------+-----------------------------------+
+| vecmat | ``(n),(n,p)->(p)`` | vector-matrix multiplication |
++-------------+----------------------------+-----------------------------------+
+| matvec | ``(m,n),(n)->(m)`` | matrix-vector multiplication |
++-------------+----------------------------+-----------------------------------+
+| matmul | ``(m?,n),(n,p?)->(m?,p?)`` | combination of the four above |
++-------------+----------------------------+-----------------------------------+
+| outer_inner | ``(i,t),(j,t)->(i,j)`` | inner over the last dimension, |
+| | | outer over the second to last, |
+| | | and loop/broadcast over the rest. |
++-------------+----------------------------+-----------------------------------+
+| cross1d | ``(3),(3)->(3)`` | cross product where the last |
+| | | dimension is frozen and must be 3 |
++-------------+----------------------------+-----------------------------------+
+
+.. _frozen:
+
+The last is an instance of freezing a core dimension and can be used to
+improve ufunc performance
C-API for implementing Elementary Functions
-------------------------------------------
diff --git a/doc/source/reference/c-api.rst b/doc/source/reference/c-api/index.rst
index b8cbe97b2..56fe8e473 100644
--- a/doc/source/reference/c-api.rst
+++ b/doc/source/reference/c-api/index.rst
@@ -40,12 +40,12 @@ code.
.. toctree::
:maxdepth: 2
- c-api.types-and-structures
- c-api.config
- c-api.dtype
- c-api.array
- c-api.iterator
- c-api.ufunc
- c-api.generalized-ufuncs
- c-api.coremath
- c-api.deprecations
+ types-and-structures
+ config
+ dtype
+ array
+ iterator
+ ufunc
+ generalized-ufuncs
+ coremath
+ deprecations
diff --git a/doc/source/reference/c-api.iterator.rst b/doc/source/reference/c-api/iterator.rst
index 940452d3c..b77d029cc 100644
--- a/doc/source/reference/c-api.iterator.rst
+++ b/doc/source/reference/c-api/iterator.rst
@@ -593,25 +593,23 @@ Construction and Destruction
code doing iteration can write to this operand to
control which elements will be untouched and which ones will be
modified. This is useful when the mask should be a combination
- of input masks, for example. Mask values can be created
- with the :c:func:`NpyMask_Create` function.
+ of input masks.
.. c:var:: NPY_ITER_WRITEMASKED
.. versionadded:: 1.7
- Indicates that only elements which the operand with
- the ARRAYMASK flag indicates are intended to be modified
- by the iteration. In general, the iterator does not enforce
- this, it is up to the code doing the iteration to follow
- that promise. Code can use the :c:func:`NpyMask_IsExposed`
- inline function to test whether the mask at a particular
- element allows writing.
+ This array is the mask for all `writemasked <numpy.nditer>`
+ operands. Code uses the ``writemasked`` flag which indicates
+ that only elements where the chosen ARRAYMASK operand is True
+ will be written to. In general, the iterator does not enforce
+ this, it is up to the code doing the iteration to follow that
+ promise.
- When this flag is used, and this operand is buffered, this
- changes how data is copied from the buffer into the array.
+ When ``writemasked`` flag is used, and this operand is buffered,
+ this changes how data is copied from the buffer into the array.
A masked copying routine is used, which only copies the
- elements in the buffer for which :c:func:`NpyMask_IsExposed`
+ elements in the buffer for which ``writemasked``
returns true from the corresponding element in the ARRAYMASK
operand.
@@ -630,7 +628,7 @@ Construction and Destruction
.. c:function:: NpyIter* NpyIter_AdvancedNew( \
npy_intp nop, PyArrayObject** op, npy_uint32 flags, NPY_ORDER order, \
NPY_CASTING casting, npy_uint32* op_flags, PyArray_Descr** op_dtypes, \
- int oa_ndim, int** op_axes, npy_intp* itershape, npy_intp buffersize)
+ int oa_ndim, int** op_axes, npy_intp const* itershape, npy_intp buffersize)
Extends :c:func:`NpyIter_MultiNew` with several advanced options providing
more control over broadcasting and buffering.
@@ -867,7 +865,7 @@ Construction and Destruction
} while (iternext2(iter2));
} while (iternext1(iter1));
-.. c:function:: int NpyIter_GotoMultiIndex(NpyIter* iter, npy_intp* multi_index)
+.. c:function:: int NpyIter_GotoMultiIndex(NpyIter* iter, npy_intp const* multi_index)
Adjusts the iterator to point to the ``ndim`` indices
pointed to by ``multi_index``. Returns an error if a multi-index
@@ -974,19 +972,6 @@ Construction and Destruction
Returns the number of operands in the iterator.
- When :c:data:`NPY_ITER_USE_MASKNA` is used on an operand, a new
- operand is added to the end of the operand list in the iterator
- to track that operand's NA mask. Thus, this equals the number
- of construction operands plus the number of operands for
- which the flag :c:data:`NPY_ITER_USE_MASKNA` was specified.
-
-.. c:function:: int NpyIter_GetFirstMaskNAOp(NpyIter* iter)
-
- .. versionadded:: 1.7
-
- Returns the index of the first NA mask operand in the array. This
- value is equal to the number of operands passed into the constructor.
-
.. c:function:: npy_intp* NpyIter_GetAxisStrideArray(NpyIter* iter, int axis)
Gets the array of strides for the specified axis. Requires that
@@ -1023,16 +1008,6 @@ Construction and Destruction
that are being iterated. The result points into ``iter``,
so the caller does not gain any references to the PyObjects.
-.. c:function:: npy_int8* NpyIter_GetMaskNAIndexArray(NpyIter* iter)
-
- .. versionadded:: 1.7
-
- This gives back a pointer to the ``nop`` indices which map
- construction operands with :c:data:`NPY_ITER_USE_MASKNA` flagged
- to their corresponding NA mask operands and vice versa. For
- operands which were not flagged with :c:data:`NPY_ITER_USE_MASKNA`,
- this array contains negative values.
-
.. c:function:: PyObject* NpyIter_GetIterView(NpyIter* iter, npy_intp i)
This gives back a reference to a new ndarray view, which is a view
diff --git a/doc/source/reference/c-api.types-and-structures.rst b/doc/source/reference/c-api/types-and-structures.rst
index 095693c5b..336dff211 100644
--- a/doc/source/reference/c-api.types-and-structures.rst
+++ b/doc/source/reference/c-api/types-and-structures.rst
@@ -1,3 +1,4 @@
+
*****************************
Python Types and C-Structures
*****************************
@@ -57,8 +58,8 @@ types are place holders that allow the array scalars to fit into a
hierarchy of actual Python types.
-PyArray_Type
-------------
+PyArray_Type and PyArrayObject
+------------------------------
.. c:var:: PyArray_Type
@@ -74,8 +75,9 @@ PyArray_Type
subclasses) will have this structure. For future compatibility,
these structure members should normally be accessed using the
provided macros. If you need a shorter name, then you can make use
- of :c:type:`NPY_AO` which is defined to be equivalent to
- :c:type:`PyArrayObject`.
+ of :c:type:`NPY_AO` (deprecated) which is defined to be equivalent to
+ :c:type:`PyArrayObject`. Direct access to the struct fields are
+ deprecated. Use the `PyArray_*(arr)` form instead.
.. code-block:: c
@@ -91,7 +93,7 @@ PyArray_Type
PyObject *weakreflist;
} PyArrayObject;
-.. c:macro: PyArrayObject.PyObject_HEAD
+.. c:macro:: PyArrayObject.PyObject_HEAD
This is needed by all Python objects. It consists of (at least)
a reference count member ( ``ob_refcnt`` ) and a pointer to the
@@ -103,7 +105,8 @@ PyArray_Type
.. c:member:: char *PyArrayObject.data
- A pointer to the first element of the array. This pointer can
+ Accessible via :c:data:`PyArray_DATA`, this data member is a
+ pointer to the first element of the array. This pointer can
(and normally should) be recast to the data type of the array.
.. c:member:: int PyArrayObject.nd
@@ -111,33 +114,38 @@ PyArray_Type
An integer providing the number of dimensions for this
array. When nd is 0, the array is sometimes called a rank-0
array. Such arrays have undefined dimensions and strides and
- cannot be accessed. :c:data:`NPY_MAXDIMS` is the largest number of
- dimensions for any array.
+ cannot be accessed. Macro :c:data:`PyArray_NDIM` defined in
+ ``ndarraytypes.h`` points to this data member. :c:data:`NPY_MAXDIMS`
+ is the largest number of dimensions for any array.
.. c:member:: npy_intp PyArrayObject.dimensions
An array of integers providing the shape in each dimension as
long as nd :math:`\geq` 1. The integer is always large enough
to hold a pointer on the platform, so the dimension size is
- only limited by memory.
+ only limited by memory. :c:data:`PyArray_DIMS` is the macro
+ associated with this data member.
.. c:member:: npy_intp *PyArrayObject.strides
An array of integers providing for each dimension the number of
bytes that must be skipped to get to the next element in that
- dimension.
+ dimension. Associated with macro :c:data:`PyArray_STRIDES`.
.. c:member:: PyObject *PyArrayObject.base
- This member is used to hold a pointer to another Python object that
- is related to this array. There are two use cases: 1) If this array
- does not own its own memory, then base points to the Python object
- that owns it (perhaps another array object), 2) If this array has
- the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
- :c:data:NPY_ARRAY_WRITEBACKIFCOPY`: flag set, then this array is
- a working copy of a "misbehaved" array. When
- ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to by base
- will be updated with the contents of this array.
+ Pointed to by :c:data:`PyArray_BASE`, this member is used to hold a
+ pointer to another Python object that is related to this array.
+ There are two use cases:
+
+ - If this array does not own its own memory, then base points to the
+ Python object that owns it (perhaps another array object)
+ - If this array has the (deprecated) :c:data:`NPY_ARRAY_UPDATEIFCOPY` or
+ :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` flag set, then this array is a working
+ copy of a "misbehaved" array.
+
+ When ``PyArray_ResolveWritebackIfCopy`` is called, the array pointed to
+ by base will be updated with the contents of this array.
.. c:member:: PyArray_Descr *PyArrayObject.descr
@@ -147,11 +155,13 @@ PyArray_Type
descriptor structure for each data type supported. This
descriptor structure contains useful information about the type
as well as a pointer to a table of function pointers to
- implement specific functionality.
+ implement specific functionality. As the name suggests, it is
+ associated with the macro :c:data:`PyArray_DESCR`.
.. c:member:: int PyArrayObject.flags
- Flags indicating how the memory pointed to by data is to be
+ Pointed to by the macro :c:data:`PyArray_FLAGS`, this data member represents
+ the flags indicating how the memory pointed to by data is to be
interpreted. Possible flags are :c:data:`NPY_ARRAY_C_CONTIGUOUS`,
:c:data:`NPY_ARRAY_F_CONTIGUOUS`, :c:data:`NPY_ARRAY_OWNDATA`,
:c:data:`NPY_ARRAY_ALIGNED`, :c:data:`NPY_ARRAY_WRITEABLE`,
@@ -163,8 +173,8 @@ PyArray_Type
weakref module).
-PyArrayDescr_Type
------------------
+PyArrayDescr_Type and PyArray_Descr
+-----------------------------------
.. c:var:: PyArrayDescr_Type
@@ -182,8 +192,18 @@ PyArrayDescr_Type
.. c:type:: PyArray_Descr
- The format of the :c:type:`PyArray_Descr` structure that lies at the
- heart of the :c:data:`PyArrayDescr_Type` is
+ The :c:type:`PyArray_Descr` structure lies at the heart of the
+ :c:data:`PyArrayDescr_Type`. While it is described here for
+ completeness, it should be considered internal to NumPy and manipulated via
+ ``PyArrayDescr_*`` or ``PyDataType*`` functions and macros. The size of this
+ structure is subject to change across versions of NumPy. To ensure
+ compatibility:
+
+ - Never declare a non-pointer instance of the struct
+ - Never perform pointer arithmatic
+ - Never use ``sizof(PyArray_Descr)``
+
+ It has the following structure:
.. code-block:: c
@@ -193,14 +213,17 @@ PyArrayDescr_Type
char kind;
char type;
char byteorder;
- char unused;
- int flags;
+ char flags;
int type_num;
int elsize;
int alignment;
PyArray_ArrayDescr *subarray;
PyObject *fields;
+ PyObject *names;
PyArray_ArrFuncs *f;
+ PyObject *metadata;
+ NpyAuxData *c_metadata;
+ npy_hash_t hash;
} PyArray_Descr;
.. c:member:: PyTypeObject *PyArray_Descr.typeobj
@@ -232,7 +255,7 @@ PyArrayDescr_Type
endian), '=' (native), '\|' (irrelevant, ignore). All builtin data-
types have byteorder '='.
-.. c:member:: int PyArray_Descr.flags
+.. c:member:: char PyArray_Descr.flags
A data-type bit-flag that determines if the data-type exhibits object-
array like behavior. Each bit in this member is a flag which are named
@@ -240,11 +263,13 @@ PyArrayDescr_Type
.. c:var:: NPY_ITEM_REFCOUNT
- .. c:var:: NPY_ITEM_HASOBJECT
-
Indicates that items of this data-type must be reference
counted (using :c:func:`Py_INCREF` and :c:func:`Py_DECREF` ).
+ .. c:var:: NPY_ITEM_HASOBJECT
+
+ Same as :c:data:`NPY_ITEM_REFCOUNT`.
+
.. c:var:: NPY_LIST_PICKLE
Indicates arrays of this data-type must be converted to a list
@@ -367,6 +392,11 @@ PyArrayDescr_Type
normally a Python string. These tuples are placed in this
dictionary keyed by name (and also title if given).
+.. c:member:: PyObject *PyArray_Descr.names
+
+ An ordered tuple of field names. It is NULL if no field is
+ defined.
+
.. c:member:: PyArray_ArrFuncs *PyArray_Descr.f
A pointer to a structure containing functions that the type needs
@@ -374,6 +404,20 @@ PyArrayDescr_Type
thing as the universal functions (ufuncs) described later. Their
signatures can vary arbitrarily.
+.. c:member:: PyObject *PyArray_Descr.metadata
+
+ Metadata about this dtype.
+
+.. c:member:: NpyAuxData *PyArray_Descr.c_metadata
+
+ Metadata specific to the C implementation
+ of the particular dtype. Added for NumPy 1.7.0.
+
+.. c:member:: Npy_hash_t *PyArray_Descr.hash
+
+ Currently unused. Reserved for future use in caching
+ hash values.
+
.. c:type:: PyArray_ArrFuncs
Functions implementing internal features. Not all of these
@@ -498,20 +542,19 @@ PyArrayDescr_Type
and ``is2`` *bytes*, respectively. This function requires
behaved (though not necessarily contiguous) memory.
- .. c:member:: int scanfunc(FILE* fd, void* ip , void* sep , void* arr)
+ .. c:member:: int scanfunc(FILE* fd, void* ip, void* arr)
A pointer to a function that scans (scanf style) one element
of the corresponding type from the file descriptor ``fd`` into
the array memory pointed to by ``ip``. The array is assumed
- to be behaved. If ``sep`` is not NULL, then a separator string
- is also scanned from the file before returning. The last
- argument ``arr`` is the array to be scanned into. A 0 is
- returned if the scan is successful. A negative number
- indicates something went wrong: -1 means the end of file was
- reached before the separator string could be scanned, -4 means
- that the end of file was reached before the element could be
- scanned, and -3 means that the element could not be
- interpreted from the format string. Requires a behaved array.
+ to be behaved.
+ The last argument ``arr`` is the array to be scanned into.
+ Returns number of receiving arguments successfully assigned (which
+ may be zero in case a matching failure occurred before the first
+ receiving argument was assigned), or EOF if input failure occurs
+ before the first receiving argument was assigned.
+ This function should be called without holding the Python GIL, and
+ has to grab it for error reporting.
.. c:member:: int fromstr(char* str, void* ip, char** endptr, void* arr)
@@ -522,6 +565,8 @@ PyArrayDescr_Type
string. The last argument ``arr`` is the array into which ip
points (needed for variable-size data- types). Returns 0 on
success or -1 on failure. Requires a behaved array.
+ This function should be called without holding the Python GIL, and
+ has to grab it for error reporting.
.. c:member:: Bool nonzero(void* data, void* arr)
@@ -643,25 +688,28 @@ PyArrayDescr_Type
The :c:data:`PyArray_Type` typeobject implements many of the features of
-Python objects including the tp_as_number, tp_as_sequence,
-tp_as_mapping, and tp_as_buffer interfaces. The rich comparison
-(tp_richcompare) is also used along with new-style attribute lookup
-for methods (tp_methods) and properties (tp_getset). The
-:c:data:`PyArray_Type` can also be sub-typed.
+:c:type:`Python objects <PyTypeObject>` including the :c:member:`tp_as_number
+<PyTypeObject.tp_as_number>`, :c:member:`tp_as_sequence
+<PyTypeObject.tp_as_sequence>`, :c:member:`tp_as_mapping
+<PyTypeObject.tp_as_mapping>`, and :c:member:`tp_as_buffer
+<PyTypeObject.tp_as_buffer>` interfaces. The :c:type:`rich comparison
+<richcmpfunc>`) is also used along with new-style attribute lookup for
+member (:c:member:`tp_members <PyTypeObject.tp_members>`) and properties
+(:c:member:`tp_getset <PyTypeObject.tp_getset>`).
+The :c:data:`PyArray_Type` can also be sub-typed.
.. tip::
- The tp_as_number methods use a generic approach to call whatever
- function has been registered for handling the operation. The
- function PyNumeric_SetOps(..) can be used to register functions to
- handle particular mathematical operations (for all arrays). When
- the umath module is imported, it sets the numeric operations for
- all arrays to the corresponding ufuncs. The tp_str and tp_repr
- methods can also be altered using PyString_SetStringFunction(...).
+ The ``tp_as_number`` methods use a generic approach to call whatever
+ function has been registered for handling the operation. When the
+ ``_multiarray_umath module`` is imported, it sets the numeric operations
+ for all arrays to the corresponding ufuncs. This choice can be changed with
+ :c:func:`PyUFunc_ReplaceLoopBySignature` The ``tp_str`` and ``tp_repr``
+ methods can also be altered using :c:func:`PyArray_SetStringFunction`.
-PyUFunc_Type
-------------
+PyUFunc_Type and PyUFuncObject
+------------------------------
.. c:var:: PyUFunc_Type
@@ -685,7 +733,14 @@ PyUFunc_Type
the information needed to call the underlying C-code loops that
perform the actual work. While it is described here for completeness, it
should be considered internal to NumPy and manipulated via ``PyUFunc_*``
- functions. It has the following structure:
+ functions. The size of this structure is subject to change across versions
+ of NumPy. To ensure compatibility:
+
+ - Never declare a non-pointer instance of the struct
+ - Never perform pointer arithmetic
+ - Never use ``sizeof(PyUFuncObject)``
+
+ It has the following structure:
.. code-block:: c
@@ -713,10 +768,13 @@ PyUFunc_Type
char *core_signature;
PyUFunc_TypeResolutionFunc *type_resolver;
PyUFunc_LegacyInnerLoopSelectionFunc *legacy_inner_loop_selector;
- void *reserved2;
PyUFunc_MaskedInnerLoopSelectionFunc *masked_inner_loop_selector;
npy_uint32 *op_flags;
npy_uint32 *iter_flags;
+ /* new in API version 0x0000000D */
+ npy_intp *core_dim_sizes;
+ npy_intp *core_dim_flags;
+
} PyUFuncObject;
.. c:macro: PyUFuncObject.PyObject_HEAD
@@ -743,8 +801,8 @@ PyUFunc_Type
the identity for this operation. It is only used for a
reduce-like call on an empty array.
- .. c:member:: void PyUFuncObject.functions(char** args, npy_intp* dims,
- npy_intp* steps, void* extradata)
+ .. c:member:: void PyUFuncObject.functions( \
+ char** args, npy_intp* dims, npy_intp* steps, void* extradata)
An array of function pointers --- one for each data type
supported by the ufunc. This is the vector loop that is called
@@ -776,6 +834,10 @@ PyUFunc_Type
specifies how many different 1-d loops (of the builtin data
types) are available.
+ .. c:member:: int PyUFuncObject.reserved1
+
+ Unused.
+
.. c:member:: char *PyUFuncObject.name
A string name for the ufunc. This is used dynamically to build
@@ -870,8 +932,23 @@ PyUFunc_Type
Override the default nditer flags for the ufunc.
-PyArrayIter_Type
-----------------
+ Added in API version 0x0000000D
+
+ .. c:member:: npy_intp *PyUFuncObject.core_dim_sizes
+
+ For each distinct core dimension, the possible
+ :ref:`frozen <frozen>` size if :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` is 0
+
+ .. c:member:: npy_uint32 *PyUFuncObject.core_dim_flags
+
+ For each distinct core dimension, a set of ``UFUNC_CORE_DIM*`` flags
+
+ - :c:data:`UFUNC_CORE_DIM_CAN_IGNORE` if the dim name ends in ``?``
+ - :c:data:`UFUNC_CORE_DIM_SIZE_INFERRED` if the dim size will be
+ determined from the operands and not from a :ref:`frozen <frozen>` signature
+
+PyArrayIter_Type and PyArrayIterObject
+--------------------------------------
.. c:var:: PyArrayIter_Type
@@ -980,8 +1057,8 @@ with it through the use of the macros :c:func:`PyArray_ITER_NEXT` (it),
:c:type:`PyArrayIterObject *`.
-PyArrayMultiIter_Type
----------------------
+PyArrayMultiIter_Type and PyArrayMultiIterObject
+------------------------------------------------
.. c:var:: PyArrayMultiIter_Type
@@ -1042,8 +1119,8 @@ PyArrayMultiIter_Type
arrays to be broadcast together. On return, the iterators are
adjusted for broadcasting.
-PyArrayNeighborhoodIter_Type
-----------------------------
+PyArrayNeighborhoodIter_Type and PyArrayNeighborhoodIterObject
+--------------------------------------------------------------
.. c:var:: PyArrayNeighborhoodIter_Type
@@ -1056,8 +1133,33 @@ PyArrayNeighborhoodIter_Type
:c:data:`PyArrayNeighborhoodIter_Type` is the
:c:type:`PyArrayNeighborhoodIterObject`.
-PyArrayFlags_Type
------------------
+ .. code-block:: c
+
+ typedef struct {
+ PyObject_HEAD
+ int nd_m1;
+ npy_intp index, size;
+ npy_intp coordinates[NPY_MAXDIMS]
+ npy_intp dims_m1[NPY_MAXDIMS];
+ npy_intp strides[NPY_MAXDIMS];
+ npy_intp backstrides[NPY_MAXDIMS];
+ npy_intp factors[NPY_MAXDIMS];
+ PyArrayObject *ao;
+ char *dataptr;
+ npy_bool contiguous;
+ npy_intp bounds[NPY_MAXDIMS][2];
+ npy_intp limits[NPY_MAXDIMS][2];
+ npy_intp limits_sizes[NPY_MAXDIMS];
+ npy_iter_get_dataptr_t translate;
+ npy_intp nd;
+ npy_intp dimensions[NPY_MAXDIMS];
+ PyArrayIterObject* _internal_iter;
+ char* constant;
+ int mode;
+ } PyArrayNeighborhoodIterObject;
+
+PyArrayFlags_Type and PyArrayFlagsObject
+----------------------------------------
.. c:var:: PyArrayFlags_Type
@@ -1067,6 +1169,16 @@ PyArrayFlags_Type
attributes or by accessing them as if the object were a dictionary
with the flag names as entries.
+.. c:type:: PyArrayFlagsObject
+
+ .. code-block:: c
+
+ typedef struct PyArrayFlagsObject {
+ PyObject_HEAD
+ PyObject *arr;
+ int flags;
+ } PyArrayFlagsObject;
+
ScalarArrayTypes
----------------
diff --git a/doc/source/reference/c-api.ufunc.rst b/doc/source/reference/c-api/ufunc.rst
index 07c7b0c80..c9cc60141 100644
--- a/doc/source/reference/c-api.ufunc.rst
+++ b/doc/source/reference/c-api/ufunc.rst
@@ -21,7 +21,17 @@ Constants
.. c:var:: PyUFunc_{VALUE}
- ``{VALUE}`` can be **One** (1), **Zero** (0), or **None** (-1)
+ .. c:var:: PyUFunc_One
+
+ .. c:var:: PyUFunc_Zero
+
+ .. c:var:: PyUFunc_MinusOne
+
+ .. c:var:: PyUFunc_ReorderableNone
+
+ .. c:var:: PyUFunc_None
+
+ .. c:var:: PyUFunc_IdentityValue
Macros
@@ -39,28 +49,6 @@ Macros
Used in universal function code to re-acquire the Python GIL if it
was released (because loop->obj was not true).
-.. c:function:: UFUNC_CHECK_ERROR(loop)
-
- A macro used internally to check for errors and goto fail if
- found. This macro requires a fail label in the current code
- block. The *loop* variable must have at least members (obj,
- errormask, and errorobj). If *loop* ->obj is nonzero, then
- :c:func:`PyErr_Occurred` () is called (meaning the GIL must be held). If
- *loop* ->obj is zero, then if *loop* ->errormask is nonzero,
- :c:func:`PyUFunc_checkfperr` is called with arguments *loop* ->errormask
- and *loop* ->errobj. If the result of this check of the IEEE
- floating point registers is true then the code redirects to the
- fail label which must be defined.
-
-.. c:function:: UFUNC_CHECK_STATUS(ret)
-
- Deprecated: use npy_clear_floatstatus from npy_math.h instead.
-
- A macro that expands to platform-dependent code. The *ret*
- variable can be any integer. The :c:data:`UFUNC_FPE_{ERR}` bits are
- set in *ret* according to the status of the corresponding error
- flags of the floating point processor.
-
Functions
---------
@@ -169,8 +157,12 @@ Functions
:param identity:
Either :c:data:`PyUFunc_One`, :c:data:`PyUFunc_Zero`,
- :c:data:`PyUFunc_None`. This specifies what should be returned when
+ :c:data:`PyUFunc_MinusOne`, or :c:data:`PyUFunc_None`.
+ This specifies what should be returned when
an empty array is passed to the reduce method of the ufunc.
+ The special value :c:data:`PyUFunc_IdentityValue` may only be used with
+ the :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity` method, to
+ allow an arbitrary python object to be used as the identity.
:param name:
The name for the ufunc as a ``NULL`` terminated string. Specifying
@@ -206,6 +198,21 @@ Functions
to calling PyUFunc_FromFuncAndData. A copy of the string is made,
so the passed in buffer can be freed.
+.. c:function:: PyObject* PyUFunc_FromFuncAndDataAndSignatureAndIdentity( \
+ PyUFuncGenericFunction *func, void **data, char *types, int ntypes, \
+ int nin, int nout, int identity, char *name, char *doc, int unused, \
+ char *signature, PyObject *identity_value)
+
+ This function is very similar to `PyUFunc_FromFuncAndDataAndSignature` above,
+ but has an extra *identity_value* argument, to define an arbitrary identity
+ for the ufunc when ``identity`` is passed as ``PyUFunc_IdentityValue``.
+
+ :param identity_value:
+ The identity for the new gufunc. Must be passed as ``NULL`` unless the
+ ``identity`` argument is ``PyUFunc_IdentityValue``. Setting it to NULL
+ is equivalent to calling PyUFunc_FromFuncAndDataAndSignature.
+
+
.. c:function:: int PyUFunc_RegisterLoopForType( \
PyUFuncObject* ufunc, int usertype, PyUFuncGenericFunction function, \
int* arg_types, void* data)
diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst
index 289822909..a22db3e8e 100644
--- a/doc/source/reference/distutils.rst
+++ b/doc/source/reference/distutils.rst
@@ -13,8 +13,7 @@ distutils, use the :func:`setup <core.setup>` command from
:mod:`numpy.distutils.misc_util` that can make it easier to construct
keyword arguments to pass to the setup function (by passing the
dictionary obtained from the todict() method of the class). More
-information is available in the NumPy Distutils Users Guide in
-``<site-packages>/numpy/doc/DISTUTILS.txt``.
+information is available in the :ref:`distutils-user-guide`.
.. index::
@@ -23,39 +22,31 @@ information is available in the NumPy Distutils Users Guide in
Modules in :mod:`numpy.distutils`
=================================
+.. toctree::
+ :maxdepth: 2
-misc_util
----------
+ distutils/misc_util
-.. module:: numpy.distutils.misc_util
+
+.. currentmodule:: numpy.distutils
.. autosummary::
:toctree: generated/
- get_numpy_include_dirs
- dict_append
- appendpath
- allpath
- dot_join
- generate_config_py
- get_cmd
- terminal_has_colors
- red_text
- green_text
- yellow_text
- blue_text
- cyan_text
- cyg2win32
- all_strings
- has_f_sources
- has_cxx_sources
- filter_sources
- get_dependencies
- is_local_src_dir
- get_ext_source_files
- get_script_files
+ ccompiler
+ cpuinfo.cpu
+ core.Extension
+ exec_command
+ log.set_verbosity
+ system_info.get_info
+ system_info.get_standard_file
+Configuration class
+===================
+
+.. currentmodule:: numpy.distutils.misc_util
+
.. class:: Configuration(package_name=None, parent_name=None, top_path=None, package_path=None, **attrs)
Construct a configuration instance for the given package name. If
@@ -110,20 +101,6 @@ misc_util
.. automethod:: get_info
-Other modules
--------------
-
-.. currentmodule:: numpy.distutils
-
-.. autosummary::
- :toctree: generated/
-
- system_info.get_info
- system_info.get_standard_file
- cpuinfo.cpu
- log.set_verbosity
- exec_command
-
Building Installable C libraries
================================
@@ -215,102 +192,4 @@ template and placed in the build directory to be used instead. Two
forms of template conversion are supported. The first form occurs for
files named <file>.ext.src where ext is a recognized Fortran
extension (f, f90, f95, f77, for, ftn, pyf). The second form is used
-for all other cases.
-
-.. index::
- single: code generation
-
-Fortran files
--------------
-
-This template converter will replicate all **function** and
-**subroutine** blocks in the file with names that contain '<...>'
-according to the rules in '<...>'. The number of comma-separated words
-in '<...>' determines the number of times the block is repeated. What
-these words are indicates what that repeat rule, '<...>', should be
-replaced with in each block. All of the repeat rules in a block must
-contain the same number of comma-separated words indicating the number
-of times that block should be repeated. If the word in the repeat rule
-needs a comma, leftarrow, or rightarrow, then prepend it with a
-backslash ' \'. If a word in the repeat rule matches ' \\<index>' then
-it will be replaced with the <index>-th word in the same repeat
-specification. There are two forms for the repeat rule: named and
-short.
-
-
-Named repeat rule
-^^^^^^^^^^^^^^^^^
-
-A named repeat rule is useful when the same set of repeats must be
-used several times in a block. It is specified using <rule1=item1,
-item2, item3,..., itemN>, where N is the number of times the block
-should be repeated. On each repeat of the block, the entire
-expression, '<...>' will be replaced first with item1, and then with
-item2, and so forth until N repeats are accomplished. Once a named
-repeat specification has been introduced, the same repeat rule may be
-used **in the current block** by referring only to the name
-(i.e. <rule1>.
-
-
-Short repeat rule
-^^^^^^^^^^^^^^^^^
-
-A short repeat rule looks like <item1, item2, item3, ..., itemN>. The
-rule specifies that the entire expression, '<...>' should be replaced
-first with item1, and then with item2, and so forth until N repeats
-are accomplished.
-
-
-Pre-defined names
-^^^^^^^^^^^^^^^^^
-
-The following predefined named repeat rules are available:
-
-- <prefix=s,d,c,z>
-
-- <_c=s,d,c,z>
-
-- <_t=real, double precision, complex, double complex>
-
-- <ftype=real, double precision, complex, double complex>
-
-- <ctype=float, double, complex_float, complex_double>
-
-- <ftypereal=float, double precision, \\0, \\1>
-
-- <ctypereal=float, double, \\0, \\1>
-
-
-Other files
------------
-
-Non-Fortran files use a separate syntax for defining template blocks
-that should be repeated using a variable expansion similar to the
-named repeat rules of the Fortran-specific repeats. The template rules
-for these files are:
-
-1. "/\**begin repeat "on a line by itself marks the beginning of
- a segment that should be repeated.
-
-2. Named variable expansions are defined using #name=item1, item2, item3,
- ..., itemN# and placed on successive lines. These variables are
- replaced in each repeat block with corresponding word. All named
- variables in the same repeat block must define the same number of
- words.
-
-3. In specifying the repeat rule for a named variable, item*N is short-
- hand for item, item, ..., item repeated N times. In addition,
- parenthesis in combination with \*N can be used for grouping several
- items that should be repeated. Thus, #name=(item1, item2)*4# is
- equivalent to #name=item1, item2, item1, item2, item1, item2, item1,
- item2#
-
-4. "\*/ "on a line by itself marks the end of the variable expansion
- naming. The next line is the first line that will be repeated using
- the named rules.
-
-5. Inside the block to be repeated, the variables that should be expanded
- are specified as @name@.
-
-6. "/\**end repeat**/ "on a line by itself marks the previous line
- as the last line of the block to be repeated.
+for all other cases. See :ref:`templating`.
diff --git a/doc/source/reference/distutils/misc_util.rst b/doc/source/reference/distutils/misc_util.rst
new file mode 100644
index 000000000..bbb83a5ab
--- /dev/null
+++ b/doc/source/reference/distutils/misc_util.rst
@@ -0,0 +1,7 @@
+distutils.misc_util
+===================
+
+.. automodule:: numpy.distutils.misc_util
+ :members:
+ :undoc-members:
+ :exclude-members: Configuration
diff --git a/doc/source/reference/distutils_guide.rst b/doc/source/reference/distutils_guide.rst
new file mode 100644
index 000000000..081719d16
--- /dev/null
+++ b/doc/source/reference/distutils_guide.rst
@@ -0,0 +1,7 @@
+.. _distutils-user-guide:
+
+NumPy Distutils - Users Guide
+=============================
+
+.. include:: ../../DISTUTILS.rst.txt
+ :start-line: 6
diff --git a/doc/source/reference/index.rst b/doc/source/reference/index.rst
index 2140c57f7..6742d605a 100644
--- a/doc/source/reference/index.rst
+++ b/doc/source/reference/index.rst
@@ -23,7 +23,8 @@ For learning how to use NumPy, see also :ref:`user`.
ufuncs
routines
distutils
- c-api
+ distutils_guide
+ c-api/index
internals
swig
@@ -35,4 +36,4 @@ Large parts of this manual originate from Travis E. Oliphant's book
`Guide to NumPy <https://archive.org/details/NumPyBook>`__ (which generously
entered Public Domain in August 2008). The reference documentation for many of
the functions are written by numerous contributors and developers of
-NumPy. \ No newline at end of file
+NumPy.
diff --git a/doc/source/reference/internals.rst b/doc/source/reference/internals.rst
index e1d6644a6..03d081bf9 100644
--- a/doc/source/reference/internals.rst
+++ b/doc/source/reference/internals.rst
@@ -5,5 +5,6 @@ NumPy internals
.. toctree::
internals.code-explanations
+ alignment
.. automodule:: numpy.doc.internals
diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst
index 427ad1536..204ebfe08 100644
--- a/doc/source/reference/maskedarray.baseclass.rst
+++ b/doc/source/reference/maskedarray.baseclass.rst
@@ -49,11 +49,11 @@ The :class:`MaskedArray` class
.. class:: MaskedArray
- A subclass of :class:`~numpy.ndarray` designed to manipulate numerical arrays with missing data.
+A subclass of :class:`~numpy.ndarray` designed to manipulate numerical arrays with missing data.
- An instance of :class:`MaskedArray` can be thought as the combination of several elements:
+An instance of :class:`MaskedArray` can be thought as the combination of several elements:
* The :attr:`~MaskedArray.data`, as a regular :class:`numpy.ndarray` of any shape or datatype (the data).
* A boolean :attr:`~numpy.ma.MaskedArray.mask` with the same shape as the data, where a ``True`` value indicates that the corresponding element of the data is invalid.
@@ -62,89 +62,26 @@ The :class:`MaskedArray` class
+.. _ma-attributes:
+
Attributes and properties of masked arrays
------------------------------------------
.. seealso:: :ref:`Array Attributes <arrays.ndarray.attributes>`
+.. autoattribute:: MaskedArray.data
-.. attribute:: MaskedArray.data
-
- Returns the underlying data, as a view of the masked array.
- If the underlying data is a subclass of :class:`numpy.ndarray`, it is
- returned as such.
-
- >>> x = ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
- >>> x.data
- matrix([[1, 2],
- [3, 4]])
-
- The type of the data can be accessed through the :attr:`baseclass`
- attribute.
-
-.. attribute:: MaskedArray.mask
-
- Returns the underlying mask, as an array with the same shape and structure
- as the data, but where all fields are atomically booleans.
- A value of ``True`` indicates an invalid entry.
-
-
-.. attribute:: MaskedArray.recordmask
-
- Returns the mask of the array if it has no named fields. For structured
- arrays, returns a ndarray of booleans where entries are ``True`` if **all**
- the fields are masked, ``False`` otherwise::
-
- >>> x = ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)],
- ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],
- ... dtype=[('a', int), ('b', int)])
- >>> x.recordmask
- array([False, False, True, False, False])
-
-
-.. attribute:: MaskedArray.fill_value
-
- Returns the value used to fill the invalid entries of a masked array.
- The value is either a scalar (if the masked array has no named fields),
- or a 0-D ndarray with the same :attr:`dtype` as the masked array if it has
- named fields.
-
- The default filling value depends on the datatype of the array:
-
- ======== ========
- datatype default
- ======== ========
- bool True
- int 999999
- float 1.e20
- complex 1.e20+0j
- object '?'
- string 'N/A'
- ======== ========
-
-
-
-.. attribute:: MaskedArray.baseclass
-
- Returns the class of the underlying data.
-
- >>> x = ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 0], [1, 0]])
- >>> x.baseclass
- <class 'numpy.matrixlib.defmatrix.matrix'>
-
-
-.. attribute:: MaskedArray.sharedmask
+.. autoattribute:: MaskedArray.mask
- Returns whether the mask of the array is shared between several masked arrays.
- If this is the case, any modification to the mask of one array will be
- propagated to the others.
+.. autoattribute:: MaskedArray.recordmask
+.. autoattribute:: MaskedArray.fill_value
-.. attribute:: MaskedArray.hardmask
+.. autoattribute:: MaskedArray.baseclass
- Returns whether the mask is hard (``True``) or soft (``False``).
- When the mask is hard, masked entries cannot be unmasked.
+.. autoattribute:: MaskedArray.sharedmask
+.. autoattribute:: MaskedArray.hardmask
As :class:`MaskedArray` is a subclass of :class:`~numpy.ndarray`, a masked array also inherits all the attributes and properties of a :class:`~numpy.ndarray` instance.
@@ -184,10 +121,8 @@ Conversion
:toctree: generated/
MaskedArray.__float__
- MaskedArray.__hex__
MaskedArray.__int__
MaskedArray.__long__
- MaskedArray.__oct__
MaskedArray.view
MaskedArray.astype
@@ -311,7 +246,7 @@ Truth value of an array (:func:`bool()`):
.. autosummary::
:toctree: generated/
- MaskedArray.__nonzero__
+ MaskedArray.__bool__
Arithmetic:
@@ -328,7 +263,6 @@ Arithmetic:
MaskedArray.__mul__
MaskedArray.__rmul__
MaskedArray.__div__
- MaskedArray.__rdiv__
MaskedArray.__truediv__
MaskedArray.__rtruediv__
MaskedArray.__floordiv__
diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst
index 07ad6c292..7375d60fb 100644
--- a/doc/source/reference/maskedarray.generic.rst
+++ b/doc/source/reference/maskedarray.generic.rst
@@ -2,7 +2,7 @@
.. _maskedarray.generic:
-
+.. module:: numpy.ma
The :mod:`numpy.ma` module
==========================
diff --git a/doc/source/reference/random/bit_generators/bitgenerators.rst b/doc/source/reference/random/bit_generators/bitgenerators.rst
new file mode 100644
index 000000000..1474f7dac
--- /dev/null
+++ b/doc/source/reference/random/bit_generators/bitgenerators.rst
@@ -0,0 +1,11 @@
+:orphan:
+
+BitGenerator
+------------
+
+.. currentmodule:: numpy.random.bit_generator
+
+.. autosummary::
+ :toctree: generated/
+
+ BitGenerator
diff --git a/doc/source/reference/random/bit_generators/index.rst b/doc/source/reference/random/bit_generators/index.rst
new file mode 100644
index 000000000..35d9e5d09
--- /dev/null
+++ b/doc/source/reference/random/bit_generators/index.rst
@@ -0,0 +1,112 @@
+.. _bit_generator:
+
+.. currentmodule:: numpy.random
+
+Bit Generators
+--------------
+
+The random values produced by :class:`~Generator`
+orignate in a BitGenerator. The BitGenerators do not directly provide
+random numbers and only contains methods used for seeding, getting or
+setting the state, jumping or advancing the state, and for accessing
+low-level wrappers for consumption by code that can efficiently
+access the functions provided, e.g., `numba <https://numba.pydata.org>`_.
+
+Supported BitGenerators
+=======================
+
+The included BitGenerators are:
+
+* PCG-64 - The default. A fast generator that supports many parallel streams
+ and can be advanced by an arbitrary amount. See the documentation for
+ :meth:`~.PCG64.advance`. PCG-64 has a period of :math:`2^{128}`. See the `PCG
+ author's page`_ for more details about this class of PRNG.
+* MT19937 - The standard Python BitGenerator. Adds a `~mt19937.MT19937.jumped`
+ function that returns a new generator with state as-if :math:`2^{128}` draws have
+ been made.
+* Philox - A counter-based generator capable of being advanced an
+ arbitrary number of steps or generating independent streams. See the
+ `Random123`_ page for more details about this class of bit generators.
+* SFC64 - A fast generator based on random invertible mappings. Usually the
+ fastest generator of the four. See the `SFC author's page`_ for (a little)
+ more detail.
+
+.. _`PCG author's page`: http://www.pcg-random.org/
+.. _`Random123`: https://www.deshawresearch.com/resources_random123.html
+.. _`SFC author's page`: http://pracrand.sourceforge.net/RNG_engines.txt
+
+.. toctree::
+ :maxdepth: 1
+
+ BitGenerator <bitgenerators>
+ MT19937 <mt19937>
+ PCG64 <pcg64>
+ Philox <philox>
+ SFC64 <sfc64>
+
+Seeding and Entropy
+-------------------
+
+A BitGenerator provides a stream of random values. In order to generate
+reproducible streams, BitGenerators support setting their initial state via a
+seed. All of the provided BitGenerators will take an arbitrary-sized
+non-negative integer, or a list of such integers, as a seed. BitGenerators
+need to take those inputs and process them into a high-quality internal state
+for the BitGenerator. All of the BitGenerators in numpy delegate that task to
+`~SeedSequence`, which uses hashing techniques to ensure that even low-quality
+seeds generate high-quality initial states.
+
+.. code-block:: python
+
+ from numpy.random import PCG64
+
+ bg = PCG64(12345678903141592653589793)
+
+.. end_block
+
+`~SeedSequence` is designed to be convenient for implementing best practices.
+We recommend that a stochastic program defaults to using entropy from the OS so
+that each run is different. The program should print out or log that entropy.
+In order to reproduce a past value, the program should allow the user to
+provide that value through some mechanism, a command-line argument is common,
+so that the user can then re-enter that entropy to reproduce the result.
+`~SeedSequence` can take care of everything except for communicating with the
+user, which is up to you.
+
+.. code-block:: python
+
+ from numpy.random import PCG64, SeedSequence
+
+ # Get the user's seed somehow, maybe through `argparse`.
+ # If the user did not provide a seed, it should return `None`.
+ seed = get_user_seed()
+ ss = SeedSequence(seed)
+ print('seed = {}'.format(ss.entropy))
+ bg = PCG64(ss)
+
+.. end_block
+
+We default to using a 128-bit integer using entropy gathered from the OS. This
+is a good amount of entropy to initialize all of the generators that we have in
+numpy. We do not recommend using small seeds below 32 bits for general use.
+Using just a small set of seeds to instantiate larger state spaces means that
+there are some initial states that are impossible to reach. This creates some
+biases if everyone uses such values.
+
+There will not be anything *wrong* with the results, per se; even a seed of
+0 is perfectly fine thanks to the processing that `~SeedSequence` does. If you
+just need *some* fixed value for unit tests or debugging, feel free to use
+whatever seed you like. But if you want to make inferences from the results or
+publish them, drawing from a larger set of seeds is good practice.
+
+If you need to generate a good seed "offline", then ``SeedSequence().entropy``
+or using ``secrets.randbits(128)`` from the standard library are both
+convenient ways.
+
+.. autosummary::
+ :toctree: generated/
+
+ SeedSequence
+ bit_generator.ISeedSequence
+ bit_generator.ISpawnableSeedSequence
+ bit_generator.SeedlessSeedSequence
diff --git a/doc/source/reference/random/bit_generators/mt19937.rst b/doc/source/reference/random/bit_generators/mt19937.rst
new file mode 100644
index 000000000..71875db4e
--- /dev/null
+++ b/doc/source/reference/random/bit_generators/mt19937.rst
@@ -0,0 +1,32 @@
+Mersenne Twister (MT19937)
+--------------------------
+
+.. currentmodule:: numpy.random
+
+.. autoclass:: MT19937
+ :exclude-members:
+
+State
+=====
+
+.. autosummary::
+ :toctree: generated/
+
+ ~MT19937.state
+
+Parallel generation
+===================
+.. autosummary::
+ :toctree: generated/
+
+ ~MT19937.jumped
+
+Extending
+=========
+.. autosummary::
+ :toctree: generated/
+
+ ~MT19937.cffi
+ ~MT19937.ctypes
+
+
diff --git a/doc/source/reference/random/bit_generators/pcg64.rst b/doc/source/reference/random/bit_generators/pcg64.rst
new file mode 100644
index 000000000..5881b7008
--- /dev/null
+++ b/doc/source/reference/random/bit_generators/pcg64.rst
@@ -0,0 +1,31 @@
+Parallel Congruent Generator (64-bit, PCG64)
+--------------------------------------------
+
+.. currentmodule:: numpy.random
+
+.. autoclass:: PCG64
+ :exclude-members:
+
+State
+=====
+
+.. autosummary::
+ :toctree: generated/
+
+ ~PCG64.state
+
+Parallel generation
+===================
+.. autosummary::
+ :toctree: generated/
+
+ ~PCG64.advance
+ ~PCG64.jumped
+
+Extending
+=========
+.. autosummary::
+ :toctree: generated/
+
+ ~PCG64.cffi
+ ~PCG64.ctypes
diff --git a/doc/source/reference/random/bit_generators/philox.rst b/doc/source/reference/random/bit_generators/philox.rst
new file mode 100644
index 000000000..8eba2d351
--- /dev/null
+++ b/doc/source/reference/random/bit_generators/philox.rst
@@ -0,0 +1,33 @@
+Philox Counter-based RNG
+------------------------
+
+.. currentmodule:: numpy.random
+
+.. autoclass:: Philox
+ :exclude-members:
+
+State
+=====
+
+.. autosummary::
+ :toctree: generated/
+
+ ~Philox.state
+
+Parallel generation
+===================
+.. autosummary::
+ :toctree: generated/
+
+ ~Philox.advance
+ ~Philox.jumped
+
+Extending
+=========
+.. autosummary::
+ :toctree: generated/
+
+ ~Philox.cffi
+ ~Philox.ctypes
+
+
diff --git a/doc/source/reference/random/bit_generators/sfc64.rst b/doc/source/reference/random/bit_generators/sfc64.rst
new file mode 100644
index 000000000..d34124a33
--- /dev/null
+++ b/doc/source/reference/random/bit_generators/sfc64.rst
@@ -0,0 +1,26 @@
+SFC64 Small Fast Chaotic PRNG
+-----------------------------
+
+.. currentmodule:: numpy.random
+
+.. autoclass:: SFC64
+ :exclude-members:
+
+State
+=====
+
+.. autosummary::
+ :toctree: generated/
+
+ ~SFC64.state
+
+Extending
+=========
+.. autosummary::
+ :toctree: generated/
+
+ ~SFC64.cffi
+ ~SFC64.ctypes
+
+
+
diff --git a/doc/source/reference/random/extending.rst b/doc/source/reference/random/extending.rst
new file mode 100644
index 000000000..22f9cb7e4
--- /dev/null
+++ b/doc/source/reference/random/extending.rst
@@ -0,0 +1,165 @@
+.. currentmodule:: numpy.random
+
+Extending
+---------
+The BitGenerators have been designed to be extendable using standard tools for
+high-performance Python -- numba and Cython. The `~Generator` object can also
+be used with user-provided BitGenerators as long as these export a small set of
+required functions.
+
+Numba
+=====
+Numba can be used with either CTypes or CFFI. The current iteration of the
+BitGenerators all export a small set of functions through both interfaces.
+
+This example shows how numba can be used to produce Box-Muller normals using
+a pure Python implementation which is then compiled. The random numbers are
+provided by ``ctypes.next_double``.
+
+.. code-block:: python
+
+ from numpy.random import PCG64
+ import numpy as np
+ import numba as nb
+
+ x = PCG64()
+ f = x.ctypes.next_double
+ s = x.ctypes.state
+ state_addr = x.ctypes.state_address
+
+ def normals(n, state):
+ out = np.empty(n)
+ for i in range((n+1)//2):
+ x1 = 2.0*f(state) - 1.0
+ x2 = 2.0*f(state) - 1.0
+ r2 = x1*x1 + x2*x2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0*f(state) - 1.0
+ x2 = 2.0*f(state) - 1.0
+ r2 = x1*x1 + x2*x2
+ g = np.sqrt(-2.0*np.log(r2)/r2)
+ out[2*i] = g*x1
+ if 2*i+1 < n:
+ out[2*i+1] = g*x2
+ return out
+
+ # Compile using Numba
+ print(normals(10, s).var())
+ # Warm up
+ normalsj = nb.jit(normals, nopython=True)
+ # Must use state address not state with numba
+ normalsj(1, state_addr)
+ %timeit normalsj(1000000, state_addr)
+ print('1,000,000 Box-Muller (numba/PCG64) randoms')
+ %timeit np.random.standard_normal(1000000)
+ print('1,000,000 Box-Muller (NumPy) randoms')
+
+
+Both CTypes and CFFI allow the more complicated distributions to be used
+directly in Numba after compiling the file distributions.c into a DLL or so.
+An example showing the use of a more complicated distribution is in the
+examples folder.
+
+.. _randomgen_cython:
+
+Cython
+======
+
+Cython can be used to unpack the ``PyCapsule`` provided by a BitGenerator.
+This example uses `~pcg64.PCG64` and
+``random_gauss_zig``, the Ziggurat-based generator for normals, to fill an
+array. The usual caveats for writing high-performance code using Cython --
+removing bounds checks and wrap around, providing array alignment information
+-- still apply.
+
+.. code-block:: cython
+
+ import numpy as np
+ cimport numpy as np
+ cimport cython
+ from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+ from numpy.random.common cimport *
+ from numpy.random.distributions cimport random_gauss_zig
+ from numpy.random import PCG64
+
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def normals_zig(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ # Best practice is to release GIL and acquire the lock
+ with x.lock, nogil:
+ for i in range(n):
+ random_values[i] = random_gauss_zig(rng)
+ randoms = np.asarray(random_values)
+ return randoms
+
+The BitGenerator can also be directly accessed using the members of the basic
+RNG structure.
+
+.. code-block:: cython
+
+ @cython.boundscheck(False)
+ @cython.wraparound(False)
+ def uniforms(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ with x.lock, nogil:
+ for i in range(n):
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+ return randoms
+
+These functions along with a minimal setup file are included in the
+examples folder.
+
+New Basic RNGs
+==============
+`~Generator` can be used with other user-provided BitGenerators. The simplest
+way to write a new BitGenerator is to examine the pyx file of one of the
+existing BitGenerators. The key structure that must be provided is the
+``capsule`` which contains a ``PyCapsule`` to a struct pointer of type
+``bitgen_t``,
+
+.. code-block:: c
+
+ typedef struct bitgen {
+ void *state;
+ uint64_t (*next_uint64)(void *st);
+ uint32_t (*next_uint32)(void *st);
+ double (*next_double)(void *st);
+ uint64_t (*next_raw)(void *st);
+ } bitgen_t;
+
+which provides 5 pointers. The first is an opaque pointer to the data structure
+used by the BitGenerators. The next three are function pointers which return
+the next 64- and 32-bit unsigned integers, the next random double and the next
+raw value. This final function is used for testing and so can be set to
+the next 64-bit unsigned integer function if not needed. Functions inside
+``Generator`` use this structure as in
+
+.. code-block:: c
+
+ bitgen_state->next_uint64(bitgen_state->state)
diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst
new file mode 100644
index 000000000..068143270
--- /dev/null
+++ b/doc/source/reference/random/generator.rst
@@ -0,0 +1,84 @@
+.. currentmodule:: numpy.random
+
+Random Generator
+----------------
+The `~Generator` provides access to
+a wide range of distributions, and served as a replacement for
+:class:`~numpy.random.RandomState`. The main difference between
+the two is that ``Generator`` relies on an additional BitGenerator to
+manage state and generate the random bits, which are then transformed into
+random values from useful distributions. The default BitGenerator used by
+``Generator`` is `~PCG64`. The BitGenerator
+can be changed by passing an instantized BitGenerator to ``Generator``.
+
+
+.. autofunction:: default_rng
+
+.. autoclass:: Generator
+ :exclude-members:
+
+Accessing the BitGenerator
+==========================
+.. autosummary::
+ :toctree: generated/
+
+ ~numpy.random.Generator.bit_generator
+
+Simple random data
+==================
+.. autosummary::
+ :toctree: generated/
+
+ ~numpy.random.Generator.integers
+ ~numpy.random.Generator.random
+ ~numpy.random.Generator.choice
+ ~numpy.random.Generator.bytes
+
+Permutations
+============
+.. autosummary::
+ :toctree: generated/
+
+ ~numpy.random.Generator.shuffle
+ ~numpy.random.Generator.permutation
+
+Distributions
+=============
+.. autosummary::
+ :toctree: generated/
+
+ ~numpy.random.Generator.beta
+ ~numpy.random.Generator.binomial
+ ~numpy.random.Generator.chisquare
+ ~numpy.random.Generator.dirichlet
+ ~numpy.random.Generator.exponential
+ ~numpy.random.Generator.f
+ ~numpy.random.Generator.gamma
+ ~numpy.random.Generator.geometric
+ ~numpy.random.Generator.gumbel
+ ~numpy.random.Generator.hypergeometric
+ ~numpy.random.Generator.laplace
+ ~numpy.random.Generator.logistic
+ ~numpy.random.Generator.lognormal
+ ~numpy.random.Generator.logseries
+ ~numpy.random.Generator.multinomial
+ ~numpy.random.Generator.multivariate_normal
+ ~numpy.random.Generator.negative_binomial
+ ~numpy.random.Generator.noncentral_chisquare
+ ~numpy.random.Generator.noncentral_f
+ ~numpy.random.Generator.normal
+ ~numpy.random.Generator.pareto
+ ~numpy.random.Generator.poisson
+ ~numpy.random.Generator.power
+ ~numpy.random.Generator.rayleigh
+ ~numpy.random.Generator.standard_cauchy
+ ~numpy.random.Generator.standard_exponential
+ ~numpy.random.Generator.standard_gamma
+ ~numpy.random.Generator.standard_normal
+ ~numpy.random.Generator.standard_t
+ ~numpy.random.Generator.triangular
+ ~numpy.random.Generator.uniform
+ ~numpy.random.Generator.vonmises
+ ~numpy.random.Generator.wald
+ ~numpy.random.Generator.weibull
+ ~numpy.random.Generator.zipf
diff --git a/doc/source/reference/random/index.rst b/doc/source/reference/random/index.rst
new file mode 100644
index 000000000..b0283f3a7
--- /dev/null
+++ b/doc/source/reference/random/index.rst
@@ -0,0 +1,208 @@
+.. _numpyrandom:
+
+.. py:module:: numpy.random
+
+.. currentmodule:: numpy.random
+
+Random sampling (:mod:`numpy.random`)
+=====================================
+
+Numpy's random number routines produce pseudo random numbers using
+combinations of a `BitGenerator` to create sequences and a `Generator`
+to use those sequences to sample from different statistical distributions:
+
+* BitGenerators: Objects that generate random numbers. These are typically
+ unsigned integer words filled with sequences of either 32 or 64 random bits.
+* Generators: Objects that transform sequences of random bits from a
+ BitGenerator into sequences of numbers that follow a specific probability
+ distribution (such as uniform, Normal or Binomial) within a specified
+ interval.
+
+Since Numpy version 1.17.0 the Generator can be initialized with a
+number of different BitGenerators. It exposes many different probability
+distributions. See `NEP 19 <https://www.numpy.org/neps/
+nep-0019-rng-policy.html>`_ for context on the updated random Numpy number
+routines. The legacy `.RandomState` random number routines are still
+available, but limited to a single BitGenerator.
+
+For convenience and backward compatibility, a single `~.RandomState`
+instance's methods are imported into the numpy.random namespace, see
+:ref:`legacy` for the complete list.
+
+Quick Start
+-----------
+
+By default, `~Generator` uses bits provided by `~pcg64.PCG64` which
+has better statistical properties than the legacy mt19937 random
+number generator in `~.RandomState`.
+
+.. code-block:: python
+
+ # Uses the old numpy.random.RandomState
+ from numpy import random
+ random.standard_normal()
+
+`~Generator` can be used as a replacement for `~.RandomState`. Both class
+instances now hold a internal `BitGenerator` instance to provide the bit
+stream, it is accessible as ``gen.bit_generator``. Some long-overdue API
+cleanup means that legacy and compatibility methods have been removed from
+`~.Generator`
+
+=================== ============== ============
+`~.RandomState` `~.Generator` Notes
+------------------- -------------- ------------
+``random_sample``, ``random`` Compatible with `random.random`
+``rand``
+------------------- -------------- ------------
+``randint``, ``integers`` Add an ``endpoint`` kwarg
+``random_integers``
+------------------- -------------- ------------
+``tomaxint`` removed Use ``integers(0, np.iinfo(np.int).max,``
+ ``endpoint=False)``
+------------------- -------------- ------------
+``seed`` removed Use `~.SeedSequence.spawn`
+=================== ============== ============
+
+See `new-or-different` for more information
+
+.. code-block:: python
+
+ # As replacement for RandomState(); default_rng() instantiates Generator with
+ # the default PCG64 BitGenerator.
+ from numpy.random import default_rng
+ rg = default_rng()
+ rg.standard_normal()
+ rg.bit_generator
+
+Something like the following code can be used to support both ``RandomState``
+and ``Generator``, with the understanding that the interfaces are slightly
+different
+
+.. code-block:: python
+
+ try:
+ rg_integers = rg.integers
+ except AttributeError:
+ rg_integers = rg.randint
+ a = rg_integers(1000)
+
+Seeds can be passed to any of the BitGenerators. The provided value is mixed
+via `~.SeedSequence` to spread a possible sequence of seeds across a wider
+range of initialization states for the BitGenerator. Here `~.PCG64` is used and
+is wrapped with a `~.Generator`.
+
+.. code-block:: python
+
+ from numpy.random import Generator, PCG64
+ rg = Generator(PCG64(12345))
+ rg.standard_normal()
+
+Introduction
+------------
+The new infrastructure takes a different approach to producing random numbers
+from the `~.RandomState` object. Random number generation is separated into
+two components, a bit generator and a random generator.
+
+The `BitGenerator` has a limited set of responsibilities. It manages state
+and provides functions to produce random doubles and random unsigned 32- and
+64-bit values.
+
+The `random generator <Generator>` takes the
+bit generator-provided stream and transforms them into more useful
+distributions, e.g., simulated normal random values. This structure allows
+alternative bit generators to be used with little code duplication.
+
+The `Generator` is the user-facing object that is nearly identical to
+`.RandomState`. The canonical method to initialize a generator passes a
+`~.PCG64` bit generator as the sole argument.
+
+.. code-block:: python
+
+ from numpy.random import default_rng
+ rg = default_rng(12345)
+ rg.random()
+
+One can also instantiate `Generator` directly with a `BitGenerator` instance.
+To use the older `~mt19937.MT19937` algorithm, one can instantiate it directly
+and pass it to `Generator`.
+
+.. code-block:: python
+
+ from numpy.random import Generator, MT19937
+ rg = Generator(MT19937(12345))
+ rg.random()
+
+What's New or Different
+~~~~~~~~~~~~~~~~~~~~~~~
+.. warning::
+
+ The Box-Muller method used to produce NumPy's normals is no longer available
+ in `Generator`. It is not possible to reproduce the exact random
+ values using Generator for the normal distribution or any other
+ distribution that relies on the normal such as the `.RandomState.gamma` or
+ `.RandomState.standard_t`. If you require bitwise backward compatible
+ streams, use `.RandomState`.
+
+* The Generator's normal, exponential and gamma functions use 256-step Ziggurat
+ methods which are 2-10 times faster than NumPy's Box-Muller or inverse CDF
+ implementations.
+* Optional ``dtype`` argument that accepts ``np.float32`` or ``np.float64``
+ to produce either single or double prevision uniform random variables for
+ select distributions
+* Optional ``out`` argument that allows existing arrays to be filled for
+ select distributions
+* All BitGenerators can produce doubles, uint64s and uint32s via CTypes
+ (`~.PCG64.ctypes`) and CFFI (`~.PCG64.cffi`). This allows the bit generators
+ to be used in numba.
+* The bit generators can be used in downstream projects via
+ :ref:`Cython <randomgen_cython>`.
+* `~.Generator.integers` is now the canonical way to generate integer
+ random numbers from a discrete uniform distribution. The ``rand`` and
+ ``randn`` methods are only available through the legacy `~.RandomState`.
+ The ``endpoint`` keyword can be used to specify open or closed intervals.
+ This replaces both ``randint`` and the deprecated ``random_integers``.
+* `~.Generator.random` is now the canonical way to generate floating-point
+ random numbers, which replaces `.RandomState.random_sample`,
+ `.RandomState.sample`, and `.RandomState.ranf`. This is consistent with
+ Python's `random.random`.
+* All BitGenerators in numpy use `~SeedSequence` to convert seeds into
+ initialized states.
+
+See :ref:`new-or-different` for a complete list of improvements and
+differences from the traditional ``Randomstate``.
+
+Parallel Generation
+~~~~~~~~~~~~~~~~~~~
+
+The included generators can be used in parallel, distributed applications in
+one of three ways:
+
+* :ref:`seedsequence-spawn`
+* :ref:`independent-streams`
+* :ref:`parallel-jumped`
+
+Concepts
+--------
+.. toctree::
+ :maxdepth: 1
+
+ generator
+ Legacy Generator (RandomState) <legacy>
+ BitGenerators, SeedSequences <bit_generators/index>
+
+Features
+--------
+.. toctree::
+ :maxdepth: 2
+
+ Parallel Applications <parallel>
+ Multithreaded Generation <multithreading>
+ new-or-different
+ Comparing Performance <performance>
+ extending
+
+Original Source
+~~~~~~~~~~~~~~~
+
+This package was developed independently of NumPy and was integrated in version
+1.17.0. The original repo is at https://github.com/bashtage/randomgen.
diff --git a/doc/source/reference/random/legacy.rst b/doc/source/reference/random/legacy.rst
new file mode 100644
index 000000000..413a42727
--- /dev/null
+++ b/doc/source/reference/random/legacy.rst
@@ -0,0 +1,123 @@
+.. currentmodule:: numpy.random
+
+.. _legacy:
+
+Legacy Random Generation
+------------------------
+The `RandomState` provides access to
+legacy generators. This generator is considered frozen and will have
+no further improvements. It is guaranteed to produce the same values
+as the final point release of NumPy v1.16. These all depend on Box-Muller
+normals or inverse CDF exponentials or gammas. This class should only be used
+if it is essential to have randoms that are identical to what
+would have been produced by previous versions of NumPy.
+
+`RandomState` adds additional information
+to the state which is required when using Box-Muller normals since these
+are produced in pairs. It is important to use
+`RandomState.get_state`, and not the underlying bit generators
+`state`, when accessing the state so that these extra values are saved.
+
+Although we provide the `MT19937` BitGenerator for use independent of
+`RandomState`, note that its default seeding uses `SeedSequence`
+rather than the legacy seeding algorithm. `RandomState` will use the
+legacy seeding algorithm. The methods to use the legacy seeding algorithm are
+currently private as the main reason to use them is just to implement
+`RandomState`. However, one can reset the state of `MT19937`
+using the state of the `RandomState`:
+
+.. code-block:: python
+
+ from numpy.random import MT19937
+ from numpy.random import RandomState
+
+ rs = RandomState(12345)
+ mt19937 = MT19937()
+ mt19937.state = rs.get_state()
+ rs2 = RandomState(mt19937)
+
+ # Same output
+ rs.standard_normal()
+ rs2.standard_normal()
+
+ rs.random()
+ rs2.random()
+
+ rs.standard_exponential()
+ rs2.standard_exponential()
+
+
+.. autoclass:: RandomState
+ :exclude-members:
+
+Seeding and State
+=================
+
+.. autosummary::
+ :toctree: generated/
+
+ ~RandomState.get_state
+ ~RandomState.set_state
+ ~RandomState.seed
+
+Simple random data
+==================
+.. autosummary::
+ :toctree: generated/
+
+ ~RandomState.rand
+ ~RandomState.randn
+ ~RandomState.randint
+ ~RandomState.random_integers
+ ~RandomState.random_sample
+ ~RandomState.choice
+ ~RandomState.bytes
+
+Permutations
+============
+.. autosummary::
+ :toctree: generated/
+
+ ~RandomState.shuffle
+ ~RandomState.permutation
+
+Distributions
+=============
+.. autosummary::
+ :toctree: generated/
+
+ ~RandomState.beta
+ ~RandomState.binomial
+ ~RandomState.chisquare
+ ~RandomState.dirichlet
+ ~RandomState.exponential
+ ~RandomState.f
+ ~RandomState.gamma
+ ~RandomState.geometric
+ ~RandomState.gumbel
+ ~RandomState.hypergeometric
+ ~RandomState.laplace
+ ~RandomState.logistic
+ ~RandomState.lognormal
+ ~RandomState.logseries
+ ~RandomState.multinomial
+ ~RandomState.multivariate_normal
+ ~RandomState.negative_binomial
+ ~RandomState.noncentral_chisquare
+ ~RandomState.noncentral_f
+ ~RandomState.normal
+ ~RandomState.pareto
+ ~RandomState.poisson
+ ~RandomState.power
+ ~RandomState.rayleigh
+ ~RandomState.standard_cauchy
+ ~RandomState.standard_exponential
+ ~RandomState.standard_gamma
+ ~RandomState.standard_normal
+ ~RandomState.standard_t
+ ~RandomState.triangular
+ ~RandomState.uniform
+ ~RandomState.vonmises
+ ~RandomState.wald
+ ~RandomState.weibull
+ ~RandomState.zipf
diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst
new file mode 100644
index 000000000..6883d3672
--- /dev/null
+++ b/doc/source/reference/random/multithreading.rst
@@ -0,0 +1,108 @@
+Multithreaded Generation
+========================
+
+The four core distributions (:meth:`~.Generator.random`,
+:meth:`~.Generator.standard_normal`, :meth:`~.Generator.standard_exponential`,
+and :meth:`~.Generator.standard_gamma`) all allow existing arrays to be filled
+using the ``out`` keyword argument. Existing arrays need to be contiguous and
+well-behaved (writable and aligned). Under normal circumstances, arrays
+created using the common constructors such as :meth:`numpy.empty` will satisfy
+these requirements.
+
+This example makes use of Python 3 :mod:`concurrent.futures` to fill an array
+using multiple threads. Threads are long-lived so that repeated calls do not
+require any additional overheads from thread creation. The underlying
+BitGenerator is `PCG64` which is fast, has a long period and supports
+using `PCG64.jumped` to return a new generator while advancing the
+state. The random numbers generated are reproducible in the sense that the same
+seed will produce the same outputs.
+
+.. code-block:: ipython
+
+ from numpy.random import Generator, PCG64
+ import multiprocessing
+ import concurrent.futures
+ import numpy as np
+
+ class MultithreadedRNG(object):
+ def __init__(self, n, seed=None, threads=None):
+ rg = PCG64(seed)
+ if threads is None:
+ threads = multiprocessing.cpu_count()
+ self.threads = threads
+
+ self._random_generators = [rg]
+ last_rg = rg
+ for _ in range(0, threads-1):
+ new_rg = last_rg.jumped()
+ self._random_generators.append(new_rg)
+ last_rg = new_rg
+
+ self.n = n
+ self.executor = concurrent.futures.ThreadPoolExecutor(threads)
+ self.values = np.empty(n)
+ self.step = np.ceil(n / threads).astype(np.int)
+
+ def fill(self):
+ def _fill(random_state, out, first, last):
+ random_state.standard_normal(out=out[first:last])
+
+ futures = {}
+ for i in range(self.threads):
+ args = (_fill,
+ self._random_generators[i],
+ self.values,
+ i * self.step,
+ (i + 1) * self.step)
+ futures[self.executor.submit(*args)] = i
+ concurrent.futures.wait(futures)
+
+ def __del__(self):
+ self.executor.shutdown(False)
+
+
+The multithreaded random number generator can be used to fill an array.
+The ``values`` attributes shows the zero-value before the fill and the
+random value after.
+
+.. code-block:: ipython
+
+ In [2]: mrng = MultithreadedRNG(10000000, seed=0)
+ ...: print(mrng.values[-1])
+ 0.0
+
+ In [3]: mrng.fill()
+ ...: print(mrng.values[-1])
+ 3.296046120254392
+
+The time required to produce using multiple threads can be compared to
+the time required to generate using a single thread.
+
+.. code-block:: ipython
+
+ In [4]: print(mrng.threads)
+ ...: %timeit mrng.fill()
+
+ 4
+ 32.8 ms ± 2.71 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+
+The single threaded call directly uses the BitGenerator.
+
+.. code-block:: ipython
+
+ In [5]: values = np.empty(10000000)
+ ...: rg = Generator(PCG64())
+ ...: %timeit rg.standard_normal(out=values)
+
+ 99.6 ms ± 222 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+
+The gains are substantial and the scaling is reasonable even for large that
+are only moderately large. The gains are even larger when compared to a call
+that does not use an existing array due to array creation overhead.
+
+.. code-block:: ipython
+
+ In [6]: rg = Generator(PCG64())
+ ...: %timeit rg.standard_normal(10000000)
+
+ 125 ms ± 309 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
diff --git a/doc/source/reference/random/new-or-different.rst b/doc/source/reference/random/new-or-different.rst
new file mode 100644
index 000000000..c8815f98f
--- /dev/null
+++ b/doc/source/reference/random/new-or-different.rst
@@ -0,0 +1,115 @@
+.. _new-or-different:
+
+.. currentmodule:: numpy.random
+
+What's New or Different
+-----------------------
+
+.. warning::
+
+ The Box-Muller method used to produce NumPy's normals is no longer available
+ in `Generator`. It is not possible to reproduce the exact random
+ values using ``Generator`` for the normal distribution or any other
+ distribution that relies on the normal such as the `gamma` or
+ `standard_t`. If you require bitwise backward compatible
+ streams, use `RandomState`.
+
+Quick comparison of legacy `mtrand <legacy>`_ to the new `Generator`
+
+================== ==================== =============
+Feature Older Equivalent Notes
+------------------ -------------------- -------------
+`~.Generator` `~.RandomState` ``Generator`` requires a stream
+ source, called a `BitGenerator
+ <bit_generators>` A number of these
+ are provided. ``RandomState`` uses
+ the Mersenne Twister `~.MT19937` by
+ default, but can also be instantiated
+ with any BitGenerator.
+------------------ -------------------- -------------
+``random`` ``random_sample``, Access the values in a BitGenerator,
+ ``rand`` convert them to ``float64`` in the
+ interval ``[0.0.,`` `` 1.0)``.
+ In addition to the ``size`` kwarg, now
+ supports ``dtype='d'`` or ``dtype='f'``,
+ and an ``out`` kwarg to fill a user-
+ supplied array.
+
+ Many other distributions are also
+ supported.
+------------------ -------------------- -------------
+``integers`` ``randint``, Use the ``endpoint`` kwarg to adjust
+ ``random_integers`` the inclusion or exclution of the
+ ``high`` interval endpoint
+================== ==================== =============
+
+And in more detail:
+
+* Simulate from the complex normal distribution
+ (`~.Generator.complex_normal`)
+* The normal, exponential and gamma generators use 256-step Ziggurat
+ methods which are 2-10 times faster than NumPy's default implementation in
+ `~.Generator.standard_normal`, `~.Generator.standard_exponential` or
+ `~.Generator.standard_gamma`.
+* `~.Generator.integers` is now the canonical way to generate integer
+ random numbers from a discrete uniform distribution. The ``rand`` and
+ ``randn`` methods are only available through the legacy `~.RandomState`.
+ This replaces both ``randint`` and the deprecated ``random_integers``.
+* The Box-Muller method used to produce NumPy's normals is no longer available.
+* All bit generators can produce doubles, uint64s and
+ uint32s via CTypes (`~PCG64.ctypes`) and CFFI (`~PCG64.cffi`).
+ This allows these bit generators to be used in numba.
+* The bit generators can be used in downstream projects via
+ Cython.
+
+
+.. ipython:: python
+
+ from numpy.random import Generator, PCG64
+ import numpy.random
+ rg = Generator(PCG64())
+ %timeit rg.standard_normal(100000)
+ %timeit numpy.random.standard_normal(100000)
+
+.. ipython:: python
+
+ %timeit rg.standard_exponential(100000)
+ %timeit numpy.random.standard_exponential(100000)
+
+.. ipython:: python
+
+ %timeit rg.standard_gamma(3.0, 100000)
+ %timeit numpy.random.standard_gamma(3.0, 100000)
+
+* Optional ``dtype`` argument that accepts ``np.float32`` or ``np.float64``
+ to produce either single or double prevision uniform random variables for
+ select distributions
+
+ * Uniforms (`~.Generator.random` and `~.Generator.integers`)
+ * Normals (`~.Generator.standard_normal`)
+ * Standard Gammas (`~.Generator.standard_gamma`)
+ * Standard Exponentials (`~.Generator.standard_exponential`)
+
+.. ipython:: python
+
+ rg = Generator(PCG64(0))
+ rg.random(3, dtype='d')
+ rg.random(3, dtype='f')
+
+* Optional ``out`` argument that allows existing arrays to be filled for
+ select distributions
+
+ * Uniforms (`~.Generator.random`)
+ * Normals (`~.Generator.standard_normal`)
+ * Standard Gammas (`~.Generator.standard_gamma`)
+ * Standard Exponentials (`~.Generator.standard_exponential`)
+
+ This allows multithreading to fill large arrays in chunks using suitable
+ BitGenerators in parallel.
+
+.. ipython:: python
+
+ existing = np.zeros(4)
+ rg.random(out=existing[:2])
+ print(existing)
+
diff --git a/doc/source/reference/random/parallel.rst b/doc/source/reference/random/parallel.rst
new file mode 100644
index 000000000..2f79f22d8
--- /dev/null
+++ b/doc/source/reference/random/parallel.rst
@@ -0,0 +1,193 @@
+Parallel Random Number Generation
+=================================
+
+There are three strategies implemented that can be used to produce
+repeatable pseudo-random numbers across multiple processes (local
+or distributed).
+
+.. currentmodule:: numpy.random
+
+.. _seedsequence-spawn:
+
+`~SeedSequence` spawning
+------------------------
+
+`~SeedSequence` `implements an algorithm`_ to process a user-provided seed,
+typically as an integer of some size, and to convert it into an initial state for
+a `~BitGenerator`. It uses hashing techniques to ensure that low-quality seeds
+are turned into high quality initial states (at least, with very high
+probability).
+
+For example, `~mt19937.MT19937` has a state consisting of 624
+`uint32` integers. A naive way to take a 32-bit integer seed would be to just set
+the last element of the state to the 32-bit seed and leave the rest 0s. This is
+a valid state for `~mt19937.MT19937`, but not a good one. The Mersenne Twister
+algorithm `suffers if there are too many 0s`_. Similarly, two adjacent 32-bit
+integer seeds (i.e. ``12345`` and ``12346``) would produce very similar
+streams.
+
+`~SeedSequence` avoids these problems by using successions of integer hashes
+with good `avalanche properties`_ to ensure that flipping any bit in the input
+input has about a 50% chance of flipping any bit in the output. Two input seeds
+that are very close to each other will produce initial states that are very far
+from each other (with very high probability). It is also constructed in such
+a way that you can provide arbitrary-sized integers or lists of integers.
+`~SeedSequence` will take all of the bits that you provide and mix them
+together to produce however many bits the consuming `~BitGenerator` needs to
+initialize itself.
+
+These properties together mean that we can safely mix together the usual
+user-provided seed with simple incrementing counters to get `~BitGenerator`
+states that are (to very high probability) independent of each other. We can
+wrap this together into an API that is easy to use and difficult to misuse.
+
+.. code-block:: python
+
+ from numpy.random import SeedSequence, default_rng
+
+ ss = SeedSequence(12345)
+
+ # Spawn off 10 child SeedSequences to pass to child processes.
+ child_seeds = ss.spawn(10)
+ streams = [default_rng(s) for s in child_seeds]
+
+.. end_block
+
+Child `~SeedSequence` objects can also spawn to make grandchildren, and so on.
+Each `~SeedSequence` has its position in the tree of spawned `~SeedSequence`
+objects mixed in with the user-provided seed to generate independent (with very
+high probability) streams.
+
+.. code-block:: python
+
+ grandchildren = child_seeds[0].spawn(4)
+ grand_streams = [default_rng(s) for s in grandchildren]
+
+.. end_block
+
+This feature lets you make local decisions about when and how to split up
+streams without coordination between processes. You do not have to preallocate
+space to avoid overlapping or request streams from a common global service. This
+general "tree-hashing" scheme is `not unique to numpy`_ but not yet widespread.
+Python has increasingly-flexible mechanisms for parallelization available, and
+this scheme fits in very well with that kind of use.
+
+Using this scheme, an upper bound on the probability of a collision can be
+estimated if one knows the number of streams that you derive. `~SeedSequence`
+hashes its inputs, both the seed and the spawn-tree-path, down to a 128-bit
+pool by default. The probability that there is a collision in
+that pool, pessimistically-estimated ([1]_), will be about :math:`n^2*2^{-128}` where
+`n` is the number of streams spawned. If a program uses an aggressive million
+streams, about :math:`2^{20}`, then the probability that at least one pair of
+them are identical is about :math:`2^{-88}`, which is in solidly-ignorable
+territory ([2]_).
+
+.. [1] The algorithm is carefully designed to eliminate a number of possible
+ ways to collide. For example, if one only does one level of spawning, it
+ is guaranteed that all states will be unique. But it's easier to
+ estimate the naive upper bound on a napkin and take comfort knowing
+ that the probability is actually lower.
+
+.. [2] In this calculation, we can ignore the amount of numbers drawn from each
+ stream. Each of the PRNGs we provide has some extra protection built in
+ that avoids overlaps if the `~SeedSequence` pools differ in the
+ slightest bit. `~pcg64.PCG64` has :math:`2^{127}` separate cycles
+ determined by the seed in addition to the position in the
+ :math:`2^{128}` long period for each cycle, so one has to both get on or
+ near the same cycle *and* seed a nearby position in the cycle.
+ `~philox.Philox` has completely independent cycles determined by the seed.
+ `~sfc64.SFC64` incorporates a 64-bit counter so every unique seed is at
+ least :math:`2^{64}` iterations away from any other seed. And
+ finally, `~mt19937.MT19937` has just an unimaginably huge period. Getting
+ a collision internal to `~SeedSequence` is the way a failure would be
+ observed.
+
+.. _`implements an algorithm`: http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html
+.. _`suffers if there are too many 0s`: http://www.math.sci.hiroshima-u.ac.jp/~m-mat/MT/MT2002/emt19937ar.html
+.. _`avalanche properties`: https://en.wikipedia.org/wiki/Avalanche_effect
+.. _`not unique to numpy`: https://www.iro.umontreal.ca/~lecuyer/myftp/papers/parallel-rng-imacs.pdf
+
+
+.. _independent-streams:
+
+Independent Streams
+-------------------
+
+:class:`~philox.Philox` is a counter-based RNG based which generates values by
+encrypting an incrementing counter using weak cryptographic primitives. The
+seed determines the key that is used for the encryption. Unique keys create
+unique, independent streams. :class:`~philox.Philox` lets you bypass the
+seeding algorithm to directly set the 128-bit key. Similar, but different, keys
+will still create independent streams.
+
+.. code-block:: python
+
+ import secrets
+ from numpy.random import Philox
+
+ # 128-bit number as a seed
+ root_seed = secrets.getrandbits(128)
+ streams = [Philox(key=root_seed + stream_id) for stream_id in range(10)]
+
+.. end_block
+
+This scheme does require that you avoid reusing stream IDs. This may require
+coordination between the parallel processes.
+
+
+.. _parallel-jumped:
+
+Jumping the BitGenerator state
+------------------------------
+
+``jumped`` advances the state of the BitGenerator *as-if* a large number of
+random numbers have been drawn, and returns a new instance with this state.
+The specific number of draws varies by BitGenerator, and ranges from
+:math:`2^{64}` to :math:`2^{128}`. Additionally, the *as-if* draws also depend
+on the size of the default random number produced by the specific BitGenerator.
+The BitGenerators that support ``jumped``, along with the period of the
+BitGenerator, the size of the jump and the bits in the default unsigned random
+are listed below.
+
++-----------------+-------------------------+-------------------------+-------------------------+
+| BitGenerator | Period | Jump Size | Bits |
++=================+=========================+=========================+=========================+
+| MT19937 | :math:`2^{19937}` | :math:`2^{128}` | 32 |
++-----------------+-------------------------+-------------------------+-------------------------+
+| PCG64 | :math:`2^{128}` | :math:`~2^{127}` ([3]_) | 64 |
++-----------------+-------------------------+-------------------------+-------------------------+
+| Philox | :math:`2^{256}` | :math:`2^{128}` | 64 |
++-----------------+-------------------------+-------------------------+-------------------------+
+
+.. [3] The jump size is :math:`(\phi-1)*2^{128}` where :math:`\phi` is the
+ golden ratio. As the jumps wrap around the period, the actual distances
+ between neighboring streams will slowly grow smaller than the jump size,
+ but using the golden ratio this way is a classic method of constructing
+ a low-discrepancy sequence that spreads out the states around the period
+ optimally. You will not be able to jump enough to make those distances
+ small enough to overlap in your lifetime.
+
+``jumped`` can be used to produce long blocks which should be long enough to not
+overlap.
+
+.. code-block:: python
+
+ import secrets
+ from numpy.random import PCG64
+
+ seed = secrets.getrandbits(128)
+ blocked_rng = []
+ rng = PCG64(seed)
+ for i in range(10):
+ blocked_rng.append(rng.jumped(i))
+
+.. end_block
+
+When using ``jumped``, one does have to take care not to jump to a stream that
+was already used. In the above example, one could not later use
+``blocked_rng[0].jumped()`` as it would overlap with ``blocked_rng[1]``. Like
+with the independent streams, if the main process here wants to split off 10
+more streams by jumping, then it needs to start with ``range(10, 20)``,
+otherwise it would recreate the same streams. On the other hand, if you
+carefully construct the streams, then you are guaranteed to have streams that
+do not overlap.
diff --git a/doc/source/reference/random/performance.py b/doc/source/reference/random/performance.py
new file mode 100644
index 000000000..28a42eb0d
--- /dev/null
+++ b/doc/source/reference/random/performance.py
@@ -0,0 +1,87 @@
+from collections import OrderedDict
+from timeit import repeat
+
+import pandas as pd
+
+import numpy as np
+from numpy.random import MT19937, PCG64, Philox, SFC64
+
+PRNGS = [MT19937, PCG64, Philox, SFC64]
+
+funcs = OrderedDict()
+integers = 'integers(0, 2**{bits},size=1000000, dtype="uint{bits}")'
+funcs['32-bit Unsigned Ints'] = integers.format(bits=32)
+funcs['64-bit Unsigned Ints'] = integers.format(bits=64)
+funcs['Uniforms'] = 'random(size=1000000)'
+funcs['Normals'] = 'standard_normal(size=1000000)'
+funcs['Exponentials'] = 'standard_exponential(size=1000000)'
+funcs['Gammas'] = 'standard_gamma(3.0,size=1000000)'
+funcs['Binomials'] = 'binomial(9, .1, size=1000000)'
+funcs['Laplaces'] = 'laplace(size=1000000)'
+funcs['Poissons'] = 'poisson(3.0, size=1000000)'
+
+setup = """
+from numpy.random import {prng}, Generator
+rg = Generator({prng}())
+"""
+
+test = "rg.{func}"
+table = OrderedDict()
+for prng in PRNGS:
+ print(prng)
+ col = OrderedDict()
+ for key in funcs:
+ t = repeat(test.format(func=funcs[key]),
+ setup.format(prng=prng().__class__.__name__),
+ number=1, repeat=3)
+ col[key] = 1000 * min(t)
+ col = pd.Series(col)
+ table[prng().__class__.__name__] = col
+
+npfuncs = OrderedDict()
+npfuncs.update(funcs)
+npfuncs['32-bit Unsigned Ints'] = 'randint(2**32,dtype="uint32",size=1000000)'
+npfuncs['64-bit Unsigned Ints'] = 'randint(2**64,dtype="uint64",size=1000000)'
+setup = """
+from numpy.random import RandomState
+rg = RandomState()
+"""
+col = {}
+for key in npfuncs:
+ t = repeat(test.format(func=npfuncs[key]),
+ setup.format(prng=prng().__class__.__name__),
+ number=1, repeat=3)
+ col[key] = 1000 * min(t)
+table['RandomState'] = pd.Series(col)
+
+columns = ['MT19937','PCG64','Philox','SFC64', 'RandomState']
+table = pd.DataFrame(table)
+order = np.log(table).mean().sort_values().index
+table = table.T
+table = table.reindex(columns)
+table = table.T
+table = table.reindex([k for k in funcs], axis=0)
+print(table.to_csv(float_format='%0.1f'))
+
+
+rel = table.loc[:, ['RandomState']].values @ np.ones(
+ (1, table.shape[1])) / table
+rel.pop('RandomState')
+rel = rel.T
+rel['Overall'] = np.exp(np.log(rel).mean(1))
+rel *= 100
+rel = np.round(rel)
+rel = rel.T
+print(rel.to_csv(float_format='%0d'))
+
+# Cross-platform table
+rows = ['32-bit Unsigned Ints','64-bit Unsigned Ints','Uniforms','Normals','Exponentials']
+xplat = rel.reindex(rows, axis=0)
+xplat = 100 * (xplat / xplat.MT19937.values[:,None])
+overall = np.exp(np.log(xplat).mean(0))
+xplat = xplat.T.copy()
+xplat['Overall']=overall
+print(xplat.T.round(1))
+
+
+
diff --git a/doc/source/reference/random/performance.rst b/doc/source/reference/random/performance.rst
new file mode 100644
index 000000000..2d5fca496
--- /dev/null
+++ b/doc/source/reference/random/performance.rst
@@ -0,0 +1,153 @@
+Performance
+-----------
+
+.. currentmodule:: numpy.random
+
+Recommendation
+**************
+The recommended generator for general use is :class:`~pcg64.PCG64`. It is
+statistically high quality, full-featured, and fast on most platforms, but
+somewhat slow when compiled for 32-bit processes.
+
+:class:`~philox.Philox` is fairly slow, but its statistical properties have
+very high quality, and it is easy to get assuredly-independent stream by using
+unique keys. If that is the style you wish to use for parallel streams, or you
+are porting from another system that uses that style, then
+:class:`~philox.Philox` is your choice.
+
+:class:`~sfc64.SFC64` is statistically high quality and very fast. However, it
+lacks jumpability. If you are not using that capability and want lots of speed,
+even on 32-bit processes, this is your choice.
+
+:class:`~mt19937.MT19937` `fails some statistical tests`_ and is not especially
+fast compared to modern PRNGs. For these reasons, we mostly do not recommend
+using it on its own, only through the legacy `~.RandomState` for
+reproducing old results. That said, it has a very long history as a default in
+many systems.
+
+.. _`fails some statistical tests`: https://www.iro.umontreal.ca/~lecuyer/myftp/papers/testu01.pdf
+
+Timings
+*******
+
+The timings below are the time in ns to produce 1 random value from a
+specific distribution. The original :class:`~mt19937.MT19937` generator is
+much slower since it requires 2 32-bit values to equal the output of the
+faster generators.
+
+Integer performance has a similar ordering.
+
+The pattern is similar for other, more complex generators. The normal
+performance of the legacy :class:`~.RandomState` generator is much
+lower than the other since it uses the Box-Muller transformation rather
+than the Ziggurat generator. The performance gap for Exponentials is also
+large due to the cost of computing the log function to invert the CDF.
+The column labeled MT19973 is used the same 32-bit generator as
+:class:`~.RandomState` but produces random values using
+:class:`~Generator`.
+
+.. csv-table::
+ :header: ,MT19937,PCG64,Philox,SFC64,RandomState
+ :widths: 14,14,14,14,14,14
+
+ 32-bit Unsigned Ints,3.2,2.7,4.9,2.7,3.2
+ 64-bit Unsigned Ints,5.6,3.7,6.3,2.9,5.7
+ Uniforms,7.3,4.1,8.1,3.1,7.3
+ Normals,13.1,10.2,13.5,7.8,34.6
+ Exponentials,7.9,5.4,8.5,4.1,40.3
+ Gammas,34.8,28.0,34.7,25.1,58.1
+ Binomials,25.0,21.4,26.1,19.5,25.2
+ Laplaces,45.1,40.7,45.5,38.1,45.6
+ Poissons,67.6,52.4,69.2,46.4,78.1
+
+The next table presents the performance in percentage relative to values
+generated by the legacy generator, `RandomState(MT19937())`. The overall
+performance was computed using a geometric mean.
+
+.. csv-table::
+ :header: ,MT19937,PCG64,Philox,SFC64
+ :widths: 14,14,14,14,14
+
+ 32-bit Unsigned Ints,101,121,67,121
+ 64-bit Unsigned Ints,102,156,91,199
+ Uniforms,100,179,90,235
+ Normals,263,338,257,443
+ Exponentials,507,752,474,985
+ Gammas,167,207,167,231
+ Binomials,101,118,96,129
+ Laplaces,101,112,100,120
+ Poissons,116,149,113,168
+ Overall,144,192,132,225
+
+.. note::
+
+ All timings were taken using Linux on a i5-3570 processor.
+
+Performance on different Operating Systems
+******************************************
+Performance differs across platforms due to compiler and hardware availability
+(e.g., register width) differences. The default bit generator has been chosen
+to perform well on 64-bit platforms. Performance on 32-bit operating systems
+is very different.
+
+The values reported are normalized relative to the speed of MT19937 in
+each table. A value of 100 indicates that the performance matches the MT19937.
+Higher values indicate improved performance. These values cannot be compared
+across tables.
+
+64-bit Linux
+~~~~~~~~~~~~
+
+=================== ========= ======= ======== =======
+Distribution MT19937 PCG64 Philox SFC64
+=================== ========= ======= ======== =======
+32-bit Unsigned Int 100 119.8 67.7 120.2
+64-bit Unsigned Int 100 152.9 90.8 213.3
+Uniforms 100 179.0 87.0 232.0
+Normals 100 128.5 99.2 167.8
+Exponentials 100 148.3 93.0 189.3
+**Overall** 100 144.3 86.8 180.0
+=================== ========= ======= ======== =======
+
+
+64-bit Windows
+~~~~~~~~~~~~~~
+The relative performance on 64-bit Linux and 64-bit Windows is broadly similar.
+
+
+=================== ========= ======= ======== =======
+Distribution MT19937 PCG64 Philox SFC64
+=================== ========= ======= ======== =======
+32-bit Unsigned Int 100 129.1 35.0 135.0
+64-bit Unsigned Int 100 146.9 35.7 176.5
+Uniforms 100 165.0 37.0 192.0
+Normals 100 128.5 48.5 158.0
+Exponentials 100 151.6 39.0 172.8
+**Overall** 100 143.6 38.7 165.7
+=================== ========= ======= ======== =======
+
+
+32-bit Windows
+~~~~~~~~~~~~~~
+
+The performance of 64-bit generators on 32-bit Windows is much lower than on 64-bit
+operating systems due to register width. MT19937, the generator that has been
+in NumPy since 2005, operates on 32-bit integers.
+
+=================== ========= ======= ======== =======
+Distribution MT19937 PCG64 Philox SFC64
+=================== ========= ======= ======== =======
+32-bit Unsigned Int 100 30.5 21.1 77.9
+64-bit Unsigned Int 100 26.3 19.2 97.0
+Uniforms 100 28.0 23.0 106.0
+Normals 100 40.1 31.3 112.6
+Exponentials 100 33.7 26.3 109.8
+**Overall** 100 31.4 23.8 99.8
+=================== ========= ======= ======== =======
+
+
+.. note::
+
+ Linux timings used Ubuntu 18.04 and GCC 7.4. Windows timings were made on
+ Windows 10 using Microsoft C/C++ Optimizing Compiler Version 19 (Visual
+ Studio 2015). All timings were produced on a i5-3570 processor.
diff --git a/doc/source/reference/routines.char.rst b/doc/source/reference/routines.char.rst
index 7413e3615..ed8393855 100644
--- a/doc/source/reference/routines.char.rst
+++ b/doc/source/reference/routines.char.rst
@@ -1,11 +1,13 @@
String operations
*****************
-.. currentmodule:: numpy.core.defchararray
+.. currentmodule:: numpy.char
-This module provides a set of vectorized string operations for arrays
-of type `numpy.string_` or `numpy.unicode_`. All of them are based on
-the string methods in the Python standard library.
+.. module:: numpy.char
+
+The `numpy.char` module provides a set of vectorized string
+operations for arrays of type `numpy.string_` or `numpy.unicode_`.
+All of them are based on the string methods in the Python standard library.
String operations
-----------------
@@ -20,6 +22,7 @@ String operations
center
decode
encode
+ expandtabs
join
ljust
lower
@@ -55,6 +58,7 @@ comparison.
less_equal
greater
less
+ compare_chararrays
String information
------------------
@@ -63,9 +67,11 @@ String information
:toctree: generated/
count
+ endswith
find
index
isalpha
+ isalnum
isdecimal
isdigit
islower
@@ -76,6 +82,7 @@ String information
rfind
rindex
startswith
+ str_len
Convenience class
-----------------
@@ -83,4 +90,6 @@ Convenience class
.. autosummary::
:toctree: generated/
+ array
+ asarray
chararray
diff --git a/doc/source/reference/routines.ctypeslib.rst b/doc/source/reference/routines.ctypeslib.rst
index b04713b61..562638e9c 100644
--- a/doc/source/reference/routines.ctypeslib.rst
+++ b/doc/source/reference/routines.ctypeslib.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.ctypeslib
+
***********************************************************
C-Types Foreign Function Interface (:mod:`numpy.ctypeslib`)
***********************************************************
@@ -6,6 +8,7 @@ C-Types Foreign Function Interface (:mod:`numpy.ctypeslib`)
.. autofunction:: as_array
.. autofunction:: as_ctypes
+.. autofunction:: as_ctypes_type
.. autofunction:: ctypes_load_library
.. autofunction:: load_library
.. autofunction:: ndpointer
diff --git a/doc/source/reference/routines.dtype.rst b/doc/source/reference/routines.dtype.rst
index ec8d2981d..e9189ca07 100644
--- a/doc/source/reference/routines.dtype.rst
+++ b/doc/source/reference/routines.dtype.rst
@@ -17,11 +17,9 @@ Data type routines
Creating data types
-------------------
-
.. autosummary::
:toctree: generated/
-
dtype
format_parser
@@ -53,3 +51,4 @@ Miscellaneous
typename
sctype2char
mintypecode
+ maximum_sctype
diff --git a/doc/source/reference/routines.linalg.rst b/doc/source/reference/routines.linalg.rst
index 0520df413..d42e77ad8 100644
--- a/doc/source/reference/routines.linalg.rst
+++ b/doc/source/reference/routines.linalg.rst
@@ -1,8 +1,23 @@
.. _routines.linalg:
+.. module:: numpy.linalg
+
Linear algebra (:mod:`numpy.linalg`)
************************************
+The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
+low level implementations of standard linear algebra algorithms. Those
+libraries may be provided by NumPy itself using C versions of a subset of their
+reference implementations but, when possible, highly optimized libraries that
+take advantage of specialized processor functionality are preferred. Examples
+of such libraries are OpenBLAS_, MKL (TM), and ATLAS. Because those libraries
+are multithreaded and processor dependent, environmental variables and external
+packages such as threadpoolctl_ may be needed to control the number of threads
+or specify the processor architecture.
+
+.. _OpenBLAS: https://www.openblas.net/
+.. _threadpoolctl: https://github.com/joblib/threadpoolctl
+
.. currentmodule:: numpy
Matrix and vector products
diff --git a/doc/source/reference/routines.ma.rst b/doc/source/reference/routines.ma.rst
index 15f2ba0a4..5b2098c7a 100644
--- a/doc/source/reference/routines.ma.rst
+++ b/doc/source/reference/routines.ma.rst
@@ -68,9 +68,6 @@ Inspecting the array
ma.is_masked
ma.is_mask
- ma.MaskedArray.data
- ma.MaskedArray.mask
- ma.MaskedArray.recordmask
ma.MaskedArray.all
ma.MaskedArray.any
@@ -80,6 +77,12 @@ Inspecting the array
ma.size
+.. autosummary::
+
+ ma.MaskedArray.data
+ ma.MaskedArray.mask
+ ma.MaskedArray.recordmask
+
_____
Manipulating a MaskedArray
@@ -261,17 +264,6 @@ Conversion operations
ma.MaskedArray.tobytes
-Pickling and unpickling
-~~~~~~~~~~~~~~~~~~~~~~~
-.. autosummary::
- :toctree: generated/
-
- ma.dump
- ma.dumps
- ma.load
- ma.loads
-
-
Filling a masked array
~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
@@ -285,8 +277,10 @@ Filling a masked array
ma.MaskedArray.get_fill_value
ma.MaskedArray.set_fill_value
- ma.MaskedArray.fill_value
+.. autosummary::
+
+ ma.MaskedArray.fill_value
_____
diff --git a/doc/source/reference/routines.math.rst b/doc/source/reference/routines.math.rst
index 821363987..3c2f96830 100644
--- a/doc/source/reference/routines.math.rst
+++ b/doc/source/reference/routines.math.rst
@@ -141,6 +141,7 @@ Handling complex numbers
real
imag
conj
+ conjugate
Miscellaneous
diff --git a/doc/source/reference/routines.matlib.rst b/doc/source/reference/routines.matlib.rst
index a35eaec78..c7f675425 100644
--- a/doc/source/reference/routines.matlib.rst
+++ b/doc/source/reference/routines.matlib.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.matlib
+
Matrix library (:mod:`numpy.matlib`)
************************************
diff --git a/doc/source/reference/routines.other.rst b/doc/source/reference/routines.other.rst
index 45b9ac3d9..28c9a1ad1 100644
--- a/doc/source/reference/routines.other.rst
+++ b/doc/source/reference/routines.other.rst
@@ -5,14 +5,6 @@ Miscellaneous routines
.. currentmodule:: numpy
-Buffer objects
---------------
-.. autosummary::
- :toctree: generated/
-
- getbuffer
- newbuffer
-
Performance tuning
------------------
.. autosummary::
@@ -29,6 +21,7 @@ Memory ranges
shares_memory
may_share_memory
+ byte_bounds
Array mixins
------------
@@ -43,3 +36,21 @@ NumPy version comparison
:toctree: generated/
lib.NumpyVersion
+
+Utility
+-------
+
+.. autosummary::
+ :toctree: generated/
+
+ get_include
+ deprecate
+ deprecate_with_doc
+
+Matlab-like Functions
+---------------------
+.. autosummary::
+ :toctree: generated/
+
+ who
+ disp \ No newline at end of file
diff --git a/doc/source/reference/routines.polynomials.package.rst b/doc/source/reference/routines.polynomials.package.rst
index 61cb57fbb..7e40d9f00 100644
--- a/doc/source/reference/routines.polynomials.package.rst
+++ b/doc/source/reference/routines.polynomials.package.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.polynomial
+
Polynomial Package
==================
diff --git a/doc/source/reference/routines.polynomials.polynomial.rst b/doc/source/reference/routines.polynomials.polynomial.rst
index 8194ca867..365c8da98 100644
--- a/doc/source/reference/routines.polynomials.polynomial.rst
+++ b/doc/source/reference/routines.polynomials.polynomial.rst
@@ -1,3 +1,5 @@
+.. module:: numpy.polynomial.polynomial
+
Polynomial Module (:mod:`numpy.polynomial.polynomial`)
======================================================
diff --git a/doc/source/reference/routines.random.rst b/doc/source/reference/routines.random.rst
deleted file mode 100644
index c8b097d7d..000000000
--- a/doc/source/reference/routines.random.rst
+++ /dev/null
@@ -1,81 +0,0 @@
-.. _routines.random:
-
-Random sampling (:mod:`numpy.random`)
-*************************************
-
-.. currentmodule:: numpy.random
-
-Simple random data
-==================
-.. autosummary::
- :toctree: generated/
-
- rand
- randn
- randint
- random_integers
- random_sample
- random
- ranf
- sample
- choice
- bytes
-
-Permutations
-============
-.. autosummary::
- :toctree: generated/
-
- shuffle
- permutation
-
-Distributions
-=============
-.. autosummary::
- :toctree: generated/
-
- beta
- binomial
- chisquare
- dirichlet
- exponential
- f
- gamma
- geometric
- gumbel
- hypergeometric
- laplace
- logistic
- lognormal
- logseries
- multinomial
- multivariate_normal
- negative_binomial
- noncentral_chisquare
- noncentral_f
- normal
- pareto
- poisson
- power
- rayleigh
- standard_cauchy
- standard_exponential
- standard_gamma
- standard_normal
- standard_t
- triangular
- uniform
- vonmises
- wald
- weibull
- zipf
-
-Random generator
-================
-.. autosummary::
- :toctree: generated/
-
- RandomState
- seed
- get_state
- set_state
diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst
index a9e80480b..7a9b97d77 100644
--- a/doc/source/reference/routines.rst
+++ b/doc/source/reference/routines.rst
@@ -41,7 +41,7 @@ indentation.
routines.other
routines.padding
routines.polynomials
- routines.random
+ random/index
routines.set
routines.sort
routines.statistics
diff --git a/doc/source/reference/routines.testing.rst b/doc/source/reference/routines.testing.rst
index 5a52a40d6..98ce3f377 100644
--- a/doc/source/reference/routines.testing.rst
+++ b/doc/source/reference/routines.testing.rst
@@ -1,4 +1,4 @@
-.. _numpy-testing:
+.. module:: numpy.testing
Test Support (:mod:`numpy.testing`)
===================================
@@ -37,11 +37,11 @@ Decorators
.. autosummary::
:toctree: generated/
- decorators.deprecated
- decorators.knownfailureif
- decorators.setastest
- decorators.skipif
- decorators.slow
+ dec.deprecated
+ dec.knownfailureif
+ dec.setastest
+ dec.skipif
+ dec.slow
decorate_methods
Test Running
diff --git a/doc/source/reference/ufuncs.rst b/doc/source/reference/ufuncs.rst
index 3cc956887..3a3b67632 100644
--- a/doc/source/reference/ufuncs.rst
+++ b/doc/source/reference/ufuncs.rst
@@ -16,7 +16,7 @@ A universal function (or :term:`ufunc` for short) is a function that
operates on :class:`ndarrays <ndarray>` in an element-by-element fashion,
supporting :ref:`array broadcasting <ufuncs.broadcasting>`, :ref:`type
casting <ufuncs.casting>`, and several other standard features. That
-is, a ufunc is a ":term:`vectorized`" wrapper for a function that
+is, a ufunc is a ":term:`vectorized <vectorization>`" wrapper for a function that
takes a fixed number of specific inputs and produces a fixed number of
specific outputs.
@@ -59,7 +59,7 @@ understood by four rules:
entry in that dimension will be used for all calculations along
that dimension. In other words, the stepping machinery of the
:term:`ufunc` will simply not step along that dimension (the
- :term:`stride` will be 0 for that dimension).
+ :ref:`stride <memory-layout>` will be 0 for that dimension).
Broadcasting is used throughout NumPy to decide how to handle
disparately shaped arrays; for example, all arithmetic operations (``+``,
@@ -70,7 +70,7 @@ arrays before operation.
.. index:: broadcastable
-A set of arrays is called ":term:`broadcastable`" to the same shape if
+A set of arrays is called "broadcastable" to the same shape if
the above rules produce a valid result, *i.e.*, one of the following
is true:
@@ -118,7 +118,7 @@ all output arrays will be passed to the :obj:`~class.__array_prepare__` and
the highest :obj:`~class.__array_priority__` of any other input to the
universal function. The default :obj:`~class.__array_priority__` of the
ndarray is 0.0, and the default :obj:`~class.__array_priority__` of a subtype
-is 1.0. Matrices have :obj:`~class.__array_priority__` equal to 10.0.
+is 0.0. Matrices have :obj:`~class.__array_priority__` equal to 10.0.
All ufuncs can also take output arguments. If necessary, output will
be cast to the data-type(s) of the provided output array(s). If a class
@@ -228,46 +228,47 @@ can generate this table for your system with the code given in the Figure.
.. admonition:: Figure
- Code segment showing the "can cast safely" table for a 32-bit system.
+ Code segment showing the "can cast safely" table for a 64-bit system.
+ Generally the output depends on the system; your system might result in
+ a different table.
+ >>> mark = {False: ' -', True: ' Y'}
>>> def print_table(ntypes):
- ... print 'X',
- ... for char in ntypes: print char,
- ... print
+ ... print('X ' + ' '.join(ntypes))
... for row in ntypes:
- ... print row,
+ ... print(row, end='')
... for col in ntypes:
- ... print int(np.can_cast(row, col)),
- ... print
+ ... print(mark[np.can_cast(row, col)], end='')
+ ... print()
+ ...
>>> print_table(np.typecodes['All'])
X ? b h i l q p B H I L Q P e f d g F D G S U V O M m
- ? 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1
- b 0 1 1 1 1 1 1 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
- h 0 0 1 1 1 1 1 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0
- i 0 0 0 1 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- l 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- q 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- p 0 0 0 0 1 1 1 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- B 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 0 0
- H 0 0 0 1 1 1 1 0 1 1 1 1 1 0 1 1 1 1 1 1 1 1 1 1 0 0
- I 0 0 0 0 1 1 1 0 0 1 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- L 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- Q 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- P 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0 1 1 0 1 1 1 1 1 1 0 0
- e 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 0 0
- f 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 0 0
- d 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 1 1 1 1 1 1 0 0
- g 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0 0 1 1 1 1 1 0 0
- F 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 0 0
- D 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 0 0
- G 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 0 0
- S 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 0 0
- U 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 0 0
- V 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0
- O 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0
- M 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 0
- m 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1
-
+ ? Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+ b - Y Y Y Y Y Y - - - - - - Y Y Y Y Y Y Y Y Y Y Y - Y
+ h - - Y Y Y Y Y - - - - - - - Y Y Y Y Y Y Y Y Y Y - Y
+ i - - - Y Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ l - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ q - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ p - - - - Y Y Y - - - - - - - - Y Y - Y Y Y Y Y Y - Y
+ B - - Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y Y - Y
+ H - - - Y Y Y Y - Y Y Y Y Y - Y Y Y Y Y Y Y Y Y Y - Y
+ I - - - - Y Y Y - - Y Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ L - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ Q - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ P - - - - - - - - - - Y Y Y - - Y Y - Y Y Y Y Y Y - Y
+ e - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y Y - -
+ f - - - - - - - - - - - - - - Y Y Y Y Y Y Y Y Y Y - -
+ d - - - - - - - - - - - - - - - Y Y - Y Y Y Y Y Y - -
+ g - - - - - - - - - - - - - - - - Y - - Y Y Y Y Y - -
+ F - - - - - - - - - - - - - - - - - Y Y Y Y Y Y Y - -
+ D - - - - - - - - - - - - - - - - - - Y Y Y Y Y Y - -
+ G - - - - - - - - - - - - - - - - - - - Y Y Y Y Y - -
+ S - - - - - - - - - - - - - - - - - - - - Y Y Y Y - -
+ U - - - - - - - - - - - - - - - - - - - - - Y Y Y - -
+ V - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+ O - - - - - - - - - - - - - - - - - - - - - - Y Y - -
+ M - - - - - - - - - - - - - - - - - - - - - - Y Y Y -
+ m - - - - - - - - - - - - - - - - - - - - - - Y Y - Y
You should note that, while included in the table for completeness,
the 'S', 'U', and 'V' types cannot be operated on by ufuncs. Also,
@@ -586,6 +587,7 @@ Math operations
sign
heaviside
conj
+ conjugate
exp
exp2
log
diff --git a/doc/source/release.rst b/doc/source/release.rst
index 7f0197f3f..fb4e2b14d 100644
--- a/doc/source/release.rst
+++ b/doc/source/release.rst
@@ -2,44 +2,58 @@
Release Notes
*************
-.. include:: ../release/1.16.0-notes.rst
-.. include:: ../release/1.15.2-notes.rst
-.. include:: ../release/1.15.1-notes.rst
-.. include:: ../release/1.15.0-notes.rst
-.. include:: ../release/1.14.6-notes.rst
-.. include:: ../release/1.14.5-notes.rst
-.. include:: ../release/1.14.4-notes.rst
-.. include:: ../release/1.14.3-notes.rst
-.. include:: ../release/1.14.2-notes.rst
-.. include:: ../release/1.14.1-notes.rst
-.. include:: ../release/1.14.0-notes.rst
-.. include:: ../release/1.13.3-notes.rst
-.. include:: ../release/1.13.2-notes.rst
-.. include:: ../release/1.13.1-notes.rst
-.. include:: ../release/1.13.0-notes.rst
-.. include:: ../release/1.12.1-notes.rst
-.. include:: ../release/1.12.0-notes.rst
-.. include:: ../release/1.11.3-notes.rst
-.. include:: ../release/1.11.2-notes.rst
-.. include:: ../release/1.11.1-notes.rst
-.. include:: ../release/1.11.0-notes.rst
-.. include:: ../release/1.10.4-notes.rst
-.. include:: ../release/1.10.3-notes.rst
-.. include:: ../release/1.10.2-notes.rst
-.. include:: ../release/1.10.1-notes.rst
-.. include:: ../release/1.10.0-notes.rst
-.. include:: ../release/1.9.2-notes.rst
-.. include:: ../release/1.9.1-notes.rst
-.. include:: ../release/1.9.0-notes.rst
-.. include:: ../release/1.8.2-notes.rst
-.. include:: ../release/1.8.1-notes.rst
-.. include:: ../release/1.8.0-notes.rst
-.. include:: ../release/1.7.2-notes.rst
-.. include:: ../release/1.7.1-notes.rst
-.. include:: ../release/1.7.0-notes.rst
-.. include:: ../release/1.6.2-notes.rst
-.. include:: ../release/1.6.1-notes.rst
-.. include:: ../release/1.6.0-notes.rst
-.. include:: ../release/1.5.0-notes.rst
-.. include:: ../release/1.4.0-notes.rst
-.. include:: ../release/1.3.0-notes.rst
+.. toctree::
+ :maxdepth: 3
+
+ 1.18.0 <release/1.18.0-notes>
+ 1.17.1 <release/1.17.2-notes>
+ 1.17.1 <release/1.17.1-notes>
+ 1.17.0 <release/1.17.0-notes>
+ 1.16.5 <release/1.16.5-notes>
+ 1.16.4 <release/1.16.4-notes>
+ 1.16.3 <release/1.16.3-notes>
+ 1.16.2 <release/1.16.2-notes>
+ 1.16.1 <release/1.16.1-notes>
+ 1.16.0 <release/1.16.0-notes>
+ 1.15.4 <release/1.15.4-notes>
+ 1.15.3 <release/1.15.3-notes>
+ 1.15.2 <release/1.15.2-notes>
+ 1.15.1 <release/1.15.1-notes>
+ 1.15.0 <release/1.15.0-notes>
+ 1.14.6 <release/1.14.6-notes>
+ 1.14.5 <release/1.14.5-notes>
+ 1.14.4 <release/1.14.4-notes>
+ 1.14.3 <release/1.14.3-notes>
+ 1.14.2 <release/1.14.2-notes>
+ 1.14.1 <release/1.14.1-notes>
+ 1.14.0 <release/1.14.0-notes>
+ 1.13.3 <release/1.13.3-notes>
+ 1.13.2 <release/1.13.2-notes>
+ 1.13.1 <release/1.13.1-notes>
+ 1.13.0 <release/1.13.0-notes>
+ 1.12.1 <release/1.12.1-notes>
+ 1.12.0 <release/1.12.0-notes>
+ 1.11.3 <release/1.11.3-notes>
+ 1.11.2 <release/1.11.2-notes>
+ 1.11.1 <release/1.11.1-notes>
+ 1.11.0 <release/1.11.0-notes>
+ 1.10.4 <release/1.10.4-notes>
+ 1.10.3 <release/1.10.3-notes>
+ 1.10.2 <release/1.10.2-notes>
+ 1.10.1 <release/1.10.1-notes>
+ 1.10.0 <release/1.10.0-notes>
+ 1.9.2 <release/1.9.2-notes>
+ 1.9.1 <release/1.9.1-notes>
+ 1.9.0 <release/1.9.0-notes>
+ 1.8.2 <release/1.8.2-notes>
+ 1.8.1 <release/1.8.1-notes>
+ 1.8.0 <release/1.8.0-notes>
+ 1.7.2 <release/1.7.2-notes>
+ 1.7.1 <release/1.7.1-notes>
+ 1.7.0 <release/1.7.0-notes>
+ 1.6.2 <release/1.6.2-notes>
+ 1.6.1 <release/1.6.1-notes>
+ 1.6.0 <release/1.6.0-notes>
+ 1.5.0 <release/1.5.0-notes>
+ 1.4.0 <release/1.4.0-notes>
+ 1.3.0 <release/1.3.0-notes>
diff --git a/doc/release/1.10.0-notes.rst b/doc/source/release/1.10.0-notes.rst
index 88062e463..88062e463 100644
--- a/doc/release/1.10.0-notes.rst
+++ b/doc/source/release/1.10.0-notes.rst
diff --git a/doc/release/1.10.1-notes.rst b/doc/source/release/1.10.1-notes.rst
index 4e541d279..4e541d279 100644
--- a/doc/release/1.10.1-notes.rst
+++ b/doc/source/release/1.10.1-notes.rst
diff --git a/doc/release/1.10.2-notes.rst b/doc/source/release/1.10.2-notes.rst
index 8c26b463c..8c26b463c 100644
--- a/doc/release/1.10.2-notes.rst
+++ b/doc/source/release/1.10.2-notes.rst
diff --git a/doc/release/1.10.3-notes.rst b/doc/source/release/1.10.3-notes.rst
index 0d4df4ce6..0d4df4ce6 100644
--- a/doc/release/1.10.3-notes.rst
+++ b/doc/source/release/1.10.3-notes.rst
diff --git a/doc/release/1.10.4-notes.rst b/doc/source/release/1.10.4-notes.rst
index 481928ca7..481928ca7 100644
--- a/doc/release/1.10.4-notes.rst
+++ b/doc/source/release/1.10.4-notes.rst
diff --git a/doc/release/1.11.0-notes.rst b/doc/source/release/1.11.0-notes.rst
index 166502ac5..166502ac5 100644
--- a/doc/release/1.11.0-notes.rst
+++ b/doc/source/release/1.11.0-notes.rst
diff --git a/doc/release/1.11.1-notes.rst b/doc/source/release/1.11.1-notes.rst
index 6303c32f0..6303c32f0 100644
--- a/doc/release/1.11.1-notes.rst
+++ b/doc/source/release/1.11.1-notes.rst
diff --git a/doc/release/1.11.2-notes.rst b/doc/source/release/1.11.2-notes.rst
index c954089d5..c954089d5 100644
--- a/doc/release/1.11.2-notes.rst
+++ b/doc/source/release/1.11.2-notes.rst
diff --git a/doc/release/1.11.3-notes.rst b/doc/source/release/1.11.3-notes.rst
index 8381a97f7..8381a97f7 100644
--- a/doc/release/1.11.3-notes.rst
+++ b/doc/source/release/1.11.3-notes.rst
diff --git a/doc/release/1.12.0-notes.rst b/doc/source/release/1.12.0-notes.rst
index 711055d16..711055d16 100644
--- a/doc/release/1.12.0-notes.rst
+++ b/doc/source/release/1.12.0-notes.rst
diff --git a/doc/release/1.12.1-notes.rst b/doc/source/release/1.12.1-notes.rst
index f67dab108..f67dab108 100644
--- a/doc/release/1.12.1-notes.rst
+++ b/doc/source/release/1.12.1-notes.rst
diff --git a/doc/release/1.13.0-notes.rst b/doc/source/release/1.13.0-notes.rst
index 3b719db09..3b719db09 100644
--- a/doc/release/1.13.0-notes.rst
+++ b/doc/source/release/1.13.0-notes.rst
diff --git a/doc/release/1.13.1-notes.rst b/doc/source/release/1.13.1-notes.rst
index 88a4bc3dd..88a4bc3dd 100644
--- a/doc/release/1.13.1-notes.rst
+++ b/doc/source/release/1.13.1-notes.rst
diff --git a/doc/release/1.13.2-notes.rst b/doc/source/release/1.13.2-notes.rst
index f2f9120f5..f2f9120f5 100644
--- a/doc/release/1.13.2-notes.rst
+++ b/doc/source/release/1.13.2-notes.rst
diff --git a/doc/release/1.13.3-notes.rst b/doc/source/release/1.13.3-notes.rst
index 7f7170bcc..7f7170bcc 100644
--- a/doc/release/1.13.3-notes.rst
+++ b/doc/source/release/1.13.3-notes.rst
diff --git a/doc/release/1.14.0-notes.rst b/doc/source/release/1.14.0-notes.rst
index 462631de6..462631de6 100644
--- a/doc/release/1.14.0-notes.rst
+++ b/doc/source/release/1.14.0-notes.rst
diff --git a/doc/release/1.14.1-notes.rst b/doc/source/release/1.14.1-notes.rst
index 7b95c2e28..7b95c2e28 100644
--- a/doc/release/1.14.1-notes.rst
+++ b/doc/source/release/1.14.1-notes.rst
diff --git a/doc/release/1.14.2-notes.rst b/doc/source/release/1.14.2-notes.rst
index 3f47cb5f5..3f47cb5f5 100644
--- a/doc/release/1.14.2-notes.rst
+++ b/doc/source/release/1.14.2-notes.rst
diff --git a/doc/release/1.14.3-notes.rst b/doc/source/release/1.14.3-notes.rst
index 60b631168..60b631168 100644
--- a/doc/release/1.14.3-notes.rst
+++ b/doc/source/release/1.14.3-notes.rst
diff --git a/doc/release/1.14.4-notes.rst b/doc/source/release/1.14.4-notes.rst
index 174094c1c..3fb94383b 100644
--- a/doc/release/1.14.4-notes.rst
+++ b/doc/source/release/1.14.4-notes.rst
@@ -19,7 +19,7 @@ values are now correct.
Note that NumPy will error on import if it detects incorrect float32 `dot`
results. This problem has been seen on the Mac when working in the Anaconda
-enviroment and is due to a subtle interaction between MKL and PyQt5. It is not
+environment and is due to a subtle interaction between MKL and PyQt5. It is not
strictly a NumPy problem, but it is best that users be aware of it. See the
gh-8577 NumPy issue for more information.
diff --git a/doc/release/1.14.5-notes.rst b/doc/source/release/1.14.5-notes.rst
index 9a97cc033..9a97cc033 100644
--- a/doc/release/1.14.5-notes.rst
+++ b/doc/source/release/1.14.5-notes.rst
diff --git a/doc/release/1.14.6-notes.rst b/doc/source/release/1.14.6-notes.rst
index ac6a78272..ac6a78272 100644
--- a/doc/release/1.14.6-notes.rst
+++ b/doc/source/release/1.14.6-notes.rst
diff --git a/doc/release/1.15.0-notes.rst b/doc/source/release/1.15.0-notes.rst
index 7235ca915..7235ca915 100644
--- a/doc/release/1.15.0-notes.rst
+++ b/doc/source/release/1.15.0-notes.rst
diff --git a/doc/release/1.15.1-notes.rst b/doc/source/release/1.15.1-notes.rst
index ddb83303c..ddb83303c 100644
--- a/doc/release/1.15.1-notes.rst
+++ b/doc/source/release/1.15.1-notes.rst
diff --git a/doc/release/1.15.2-notes.rst b/doc/source/release/1.15.2-notes.rst
index a3e61fccd..a3e61fccd 100644
--- a/doc/release/1.15.2-notes.rst
+++ b/doc/source/release/1.15.2-notes.rst
diff --git a/doc/source/release/1.15.3-notes.rst b/doc/source/release/1.15.3-notes.rst
new file mode 100644
index 000000000..753eecec9
--- /dev/null
+++ b/doc/source/release/1.15.3-notes.rst
@@ -0,0 +1,49 @@
+==========================
+NumPy 1.15.3 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.2
+release. The Python versions supported by this release are 2.7, 3.4-3.7. The
+wheels are linked with OpenBLAS v0.3.0, which should fix some of the linalg
+problems reported for NumPy 1.14.
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Allan Haldane
+* Charles Harris
+* Jeroen Demeyer
+* Kevin Sheppard
+* Matthew Bowden +
+* Matti Picus
+* Tyler Reddy
+
+Pull requests merged
+====================
+
+A total of 12 pull requests were merged for this release.
+
+* `#12080 <https://github.com/numpy/numpy/pull/12080>`__: MAINT: Blacklist some MSVC complex functions.
+* `#12083 <https://github.com/numpy/numpy/pull/12083>`__: TST: Add azure CI testing to 1.15.x branch.
+* `#12084 <https://github.com/numpy/numpy/pull/12084>`__: BUG: test_path() now uses Path.resolve()
+* `#12085 <https://github.com/numpy/numpy/pull/12085>`__: TST, MAINT: Fix some failing tests on azure-pipelines mac and...
+* `#12187 <https://github.com/numpy/numpy/pull/12187>`__: BUG: Fix memory leak in mapping.c
+* `#12188 <https://github.com/numpy/numpy/pull/12188>`__: BUG: Allow boolean subtract in histogram
+* `#12189 <https://github.com/numpy/numpy/pull/12189>`__: BUG: Fix in-place permutation
+* `#12190 <https://github.com/numpy/numpy/pull/12190>`__: BUG: limit default for get_num_build_jobs() to 8
+* `#12191 <https://github.com/numpy/numpy/pull/12191>`__: BUG: OBJECT_to_* should check for errors
+* `#12192 <https://github.com/numpy/numpy/pull/12192>`__: DOC: Prepare for NumPy 1.15.3 release.
+* `#12237 <https://github.com/numpy/numpy/pull/12237>`__: BUG: Fix MaskedArray fill_value type conversion.
+* `#12238 <https://github.com/numpy/numpy/pull/12238>`__: TST: Backport azure-pipeline testing fixes for Mac
diff --git a/doc/source/release/1.15.4-notes.rst b/doc/source/release/1.15.4-notes.rst
new file mode 100644
index 000000000..033bd5828
--- /dev/null
+++ b/doc/source/release/1.15.4-notes.rst
@@ -0,0 +1,38 @@
+==========================
+NumPy 1.15.4 Release Notes
+==========================
+
+This is a bugfix release for bugs and regressions reported following the 1.15.3
+release. The Python versions supported by this release are 2.7, 3.4-3.7. The
+wheels are linked with OpenBLAS v0.3.0, which should fix some of the linalg
+problems reported for NumPy 1.14.
+
+Compatibility Note
+==================
+
+The NumPy 1.15.x OS X wheels released on PyPI no longer contain 32-bit
+binaries. That will also be the case in future releases. See
+`#11625 <https://github.com/numpy/numpy/issues/11625>`__ for the related
+discussion. Those needing 32-bit support should look elsewhere or build
+from source.
+
+Contributors
+============
+
+A total of 4 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Matti Picus
+* Sebastian Berg
+* bbbbbbbbba +
+
+Pull requests merged
+====================
+
+A total of 4 pull requests were merged for this release.
+
+* `#12296 <https://github.com/numpy/numpy/pull/12296>`__: BUG: Dealloc cached buffer info
+* `#12297 <https://github.com/numpy/numpy/pull/12297>`__: BUG: Fix fill value in masked array '==' and '!=' ops.
+* `#12307 <https://github.com/numpy/numpy/pull/12307>`__: DOC: Correct the default value of `optimize` in `numpy.einsum`
+* `#12320 <https://github.com/numpy/numpy/pull/12320>`__: REL: Prepare for the NumPy 1.15.4 release
diff --git a/doc/source/release/1.16.0-notes.rst b/doc/source/release/1.16.0-notes.rst
new file mode 100644
index 000000000..1034d6e6c
--- /dev/null
+++ b/doc/source/release/1.16.0-notes.rst
@@ -0,0 +1,536 @@
+==========================
+NumPy 1.16.0 Release Notes
+==========================
+
+This NumPy release is the last one to support Python 2.7 and will be maintained
+as a long term release with bug fixes until 2020. Support for Python 3.4 been
+dropped, the supported Python versions are 2.7 and 3.5-3.7. The wheels on PyPI
+are linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+This release has seen a lot of refactoring and features many bug fixes, improved
+code organization, and better cross platform compatibility. Not all of these
+improvements will be visible to users, but they should help make maintenance
+easier going forward.
+
+
+Highlights
+==========
+
+* Experimental (opt-in only) support for overriding numpy functions,
+ see ``__array_function__`` below.
+
+* The ``matmul`` function is now a ufunc. This provides better
+ performance and allows overriding with ``__array_ufunc__``.
+
+* Improved support for the ARM and POWER architectures.
+
+* Improved support for AIX and PyPy.
+
+* Improved interop with ctypes.
+
+* Improved support for PEP 3118.
+
+
+
+New functions
+=============
+
+* New functions added to the `numpy.lib.recfuntions` module to ease the
+ structured assignment changes:
+
+ * ``assign_fields_by_name``
+ * ``structured_to_unstructured``
+ * ``unstructured_to_structured``
+ * ``apply_along_fields``
+ * ``require_fields``
+
+ See the user guide at <https://docs.scipy.org/doc/numpy/user/basics.rec.html>
+ for more info.
+
+
+New deprecations
+================
+
+* The type dictionaries `numpy.core.typeNA` and `numpy.core.sctypeNA` are
+ deprecated. They were buggy and not documented and will be removed in the
+ 1.18 release. Use`numpy.sctypeDict` instead.
+
+* The `numpy.asscalar` function is deprecated. It is an alias to the more
+ powerful `numpy.ndarray.item`, not tested, and fails for scalars.
+
+* The `numpy.set_array_ops` and `numpy.get_array_ops` functions are deprecated.
+ As part of `NEP 15`, they have been deprecated along with the C-API functions
+ :c:func:`PyArray_SetNumericOps` and :c:func:`PyArray_GetNumericOps`. Users
+ who wish to override the inner loop functions in built-in ufuncs should use
+ :c:func:`PyUFunc_ReplaceLoopBySignature`.
+
+* The `numpy.unravel_index` keyword argument ``dims`` is deprecated, use
+ ``shape`` instead.
+
+* The `numpy.histogram` ``normed`` argument is deprecated. It was deprecated
+ previously, but no warning was issued.
+
+* The ``positive`` operator (``+``) applied to non-numerical arrays is
+ deprecated. See below for details.
+
+* Passing an iterator to the stack functions is deprecated
+
+
+Expired deprecations
+====================
+
+* NaT comparisons now return ``False`` without a warning, finishing a
+ deprecation cycle begun in NumPy 1.11.
+
+* ``np.lib.function_base.unique`` was removed, finishing a deprecation cycle
+ begun in NumPy 1.4. Use `numpy.unique` instead.
+
+* multi-field indexing now returns views instead of copies, finishing a
+ deprecation cycle begun in NumPy 1.7. The change was previously attempted in
+ NumPy 1.14 but reverted until now.
+
+* ``np.PackageLoader`` and ``np.pkgload`` have been removed. These were
+ deprecated in 1.10, had no tests, and seem to no longer work in 1.15.
+
+
+Future changes
+==============
+
+* NumPy 1.17 will drop support for Python 2.7.
+
+
+Compatibility notes
+===================
+
+f2py script on Windows
+----------------------
+On Windows, the installed script for running f2py is now an ``.exe`` file
+rather than a ``*.py`` file and should be run from the command line as ``f2py``
+whenever the ``Scripts`` directory is in the path. Running ``f2py`` as a module
+``python -m numpy.f2py [...]`` will work without path modification in any
+version of NumPy.
+
+NaT comparisons
+---------------
+Consistent with the behavior of NaN, all comparisons other than inequality
+checks with datetime64 or timedelta64 NaT ("not-a-time") values now always
+return ``False``, and inequality checks with NaT now always return ``True``.
+This includes comparisons beteween NaT values. For compatibility with the
+old behavior, use ``np.isnat`` to explicitly check for NaT or convert
+datetime64/timedelta64 arrays with ``.astype(np.int64)`` before making
+comparisons.
+
+complex64/128 alignment has changed
+-----------------------------------
+The memory alignment of complex types is now the same as a C-struct composed of
+two floating point values, while before it was equal to the size of the type.
+For many users (for instance on x64/unix/gcc) this means that complex64 is now
+4-byte aligned instead of 8-byte aligned. An important consequence is that
+aligned structured dtypes may now have a different size. For instance,
+``np.dtype('c8,u1', align=True)`` used to have an itemsize of 16 (on x64/gcc)
+but now it is 12.
+
+More in detail, the complex64 type now has the same alignment as a C-struct
+``struct {float r, i;}``, according to the compiler used to compile numpy, and
+similarly for the complex128 and complex256 types.
+
+nd_grid __len__ removal
+-----------------------
+``len(np.mgrid)`` and ``len(np.ogrid)`` are now considered nonsensical
+and raise a ``TypeError``.
+
+``np.unravel_index`` now accepts ``shape`` keyword argument
+-----------------------------------------------------------
+Previously, only the ``dims`` keyword argument was accepted
+for specification of the shape of the array to be used
+for unraveling. ``dims`` remains supported, but is now deprecated.
+
+multi-field views return a view instead of a copy
+-------------------------------------------------
+Indexing a structured array with multiple fields, e.g., ``arr[['f1', 'f3']]``,
+returns a view into the original array instead of a copy. The returned view
+will often have extra padding bytes corresponding to intervening fields in the
+original array, unlike before, which will affect code such as
+``arr[['f1', 'f3']].view('float64')``. This change has been planned since numpy
+1.7. Operations hitting this path have emitted ``FutureWarnings`` since then.
+Additional ``FutureWarnings`` about this change were added in 1.12.
+
+To help users update their code to account for these changes, a number of
+functions have been added to the ``numpy.lib.recfunctions`` module which
+safely allow such operations. For instance, the code above can be replaced
+with ``structured_to_unstructured(arr[['f1', 'f3']], dtype='float64')``.
+See the "accessing multiple fields" section of the
+`user guide <https://docs.scipy.org/doc/numpy/user/basics.rec.html#accessing-multiple-fields>`__.
+
+
+C API changes
+=============
+
+The :c:data:`NPY_API_VERSION` was incremented to 0x0000D, due to the addition
+of:
+
+* :c:member:`PyUFuncObject.core_dim_flags`
+* :c:member:`PyUFuncObject.core_dim_sizes`
+* :c:member:`PyUFuncObject.identity_value`
+* :c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`
+
+
+New Features
+============
+
+Integrated squared error (ISE) estimator added to ``histogram``
+---------------------------------------------------------------
+This method (``bins='stone'``) for optimizing the bin number is a
+generalization of the Scott's rule. The Scott's rule assumes the distribution
+is approximately Normal, while the ISE_ is a non-parametric method based on
+cross-validation.
+
+.. _ISE: https://en.wikipedia.org/wiki/Histogram#Minimizing_cross-validation_estimated_squared_error
+
+``max_rows`` keyword added for ``np.loadtxt``
+---------------------------------------------
+New keyword ``max_rows`` in `numpy.loadtxt` sets the maximum rows of the
+content to be read after ``skiprows``, as in `numpy.genfromtxt`.
+
+modulus operator support added for ``np.timedelta64`` operands
+--------------------------------------------------------------
+The modulus (remainder) operator is now supported for two operands
+of type ``np.timedelta64``. The operands may have different units
+and the return value will match the type of the operands.
+
+
+Improvements
+============
+
+no-copy pickling of numpy arrays
+--------------------------------
+Up to protocol 4, numpy array pickling created 2 spurious copies of the data
+being serialized. With pickle protocol 5, and the ``PickleBuffer`` API, a
+large variety of numpy arrays can now be serialized without any copy using
+out-of-band buffers, and with one less copy using in-band buffers. This
+results, for large arrays, in an up to 66% drop in peak memory usage.
+
+build shell independence
+------------------------
+NumPy builds should no longer interact with the host machine
+shell directly. ``exec_command`` has been replaced with
+``subprocess.check_output`` where appropriate.
+
+`np.polynomial.Polynomial` classes render in LaTeX in Jupyter notebooks
+-----------------------------------------------------------------------
+When used in a front-end that supports it, `Polynomial` instances are now
+rendered through LaTeX. The current format is experimental, and is subject to
+change.
+
+``randint`` and ``choice`` now work on empty distributions
+----------------------------------------------------------
+Even when no elements needed to be drawn, ``np.random.randint`` and
+``np.random.choice`` raised an error when the arguments described an empty
+distribution. This has been fixed so that e.g.
+``np.random.choice([], 0) == np.array([], dtype=float64)``.
+
+``linalg.lstsq``, ``linalg.qr``, and ``linalg.svd`` now work with empty arrays
+------------------------------------------------------------------------------
+Previously, a ``LinAlgError`` would be raised when an empty matrix/empty
+matrices (with zero rows and/or columns) is/are passed in. Now outputs of
+appropriate shapes are returned.
+
+Chain exceptions to give better error messages for invalid PEP3118 format strings
+---------------------------------------------------------------------------------
+This should help track down problems.
+
+Einsum optimization path updates and efficiency improvements
+------------------------------------------------------------
+Einsum was synchronized with the current upstream work.
+
+`numpy.angle` and `numpy.expand_dims` now work on ``ndarray`` subclasses
+------------------------------------------------------------------------
+In particular, they now work for masked arrays.
+
+``NPY_NO_DEPRECATED_API`` compiler warning suppression
+------------------------------------------------------
+Setting ``NPY_NO_DEPRECATED_API`` to a value of 0 will suppress the current compiler
+warnings when the deprecated numpy API is used.
+
+``np.diff`` Added kwargs prepend and append
+-------------------------------------------
+New kwargs ``prepend`` and ``append``, allow for values to be inserted on
+either end of the differences. Similar to options for `ediff1d`. Now the
+inverse of `cumsum` can be obtained easily via ``prepend=0``.
+
+ARM support updated
+-------------------
+Support for ARM CPUs has been updated to accommodate 32 and 64 bit targets,
+and also big and little endian byte ordering. AARCH32 memory alignment issues
+have been addressed. CI testing has been expanded to include AARCH64 targets
+via the services of shippable.com.
+
+Appending to build flags
+------------------------
+`numpy.distutils` has always overridden rather than appended to `LDFLAGS` and
+other similar such environment variables for compiling Fortran extensions.
+Now, if the `NPY_DISTUTILS_APPEND_FLAGS` environment variable is set to 1, the
+behavior will be appending. This applied to: `LDFLAGS`, `F77FLAGS`,
+`F90FLAGS`, `FREEFLAGS`, `FOPT`, `FDEBUG`, and `FFLAGS`. See gh-11525 for more
+details.
+
+Generalized ufunc signatures now allow fixed-size dimensions
+------------------------------------------------------------
+By using a numerical value in the signature of a generalized ufunc, one can
+indicate that the given function requires input or output to have dimensions
+with the given size. E.g., the signature of a function that converts a polar
+angle to a two-dimensional cartesian unit vector would be ``()->(2)``; that
+for one that converts two spherical angles to a three-dimensional unit vector
+would be ``(),()->(3)``; and that for the cross product of two
+three-dimensional vectors would be ``(3),(3)->(3)``.
+
+Note that to the elementary function these dimensions are not treated any
+differently from variable ones indicated with a name starting with a letter;
+the loop still is passed the corresponding size, but it can now count on that
+size being equal to the fixed one given in the signature.
+
+Generalized ufunc signatures now allow flexible dimensions
+----------------------------------------------------------
+Some functions, in particular numpy's implementation of ``@`` as ``matmul``,
+are very similar to generalized ufuncs in that they operate over core
+dimensions, but one could not present them as such because they were able to
+deal with inputs in which a dimension is missing. To support this, it is now
+allowed to postfix a dimension name with a question mark to indicate that the
+dimension does not necessarily have to be present.
+
+With this addition, the signature for ``matmul`` can be expressed as
+``(m?,n),(n,p?)->(m?,p?)``. This indicates that if, e.g., the second operand
+has only one dimension, for the purposes of the elementary function it will be
+treated as if that input has core shape ``(n, 1)``, and the output has the
+corresponding core shape of ``(m, 1)``. The actual output array, however, has
+the flexible dimension removed, i.e., it will have shape ``(..., m)``.
+Similarly, if both arguments have only a single dimension, the inputs will be
+presented as having shapes ``(1, n)`` and ``(n, 1)`` to the elementary
+function, and the output as ``(1, 1)``, while the actual output array returned
+will have shape ``()``. In this way, the signature allows one to use a
+single elementary function for four related but different signatures,
+``(m,n),(n,p)->(m,p)``, ``(n),(n,p)->(p)``, ``(m,n),(n)->(m)`` and
+``(n),(n)->()``.
+
+``np.clip`` and the ``clip`` method check for memory overlap
+------------------------------------------------------------
+The ``out`` argument to these functions is now always tested for memory overlap
+to avoid corrupted results when memory overlap occurs.
+
+New value ``unscaled`` for option ``cov`` in ``np.polyfit``
+-----------------------------------------------------------
+A further possible value has been added to the ``cov`` parameter of the
+``np.polyfit`` function. With ``cov='unscaled'`` the scaling of the covariance
+matrix is disabled completely (similar to setting ``absolute_sigma=True`` in
+``scipy.optimize.curve_fit``). This would be useful in occasions, where the
+weights are given by 1/sigma with sigma being the (known) standard errors of
+(Gaussian distributed) data points, in which case the unscaled matrix is
+already a correct estimate for the covariance matrix.
+
+Detailed docstrings for scalar numeric types
+--------------------------------------------
+The ``help`` function, when applied to numeric types such as `numpy.intc`,
+`numpy.int_`, and `numpy.longlong`, now lists all of the aliased names for that
+type, distinguishing between platform -dependent and -independent aliases.
+
+``__module__`` attribute now points to public modules
+-----------------------------------------------------
+The ``__module__`` attribute on most NumPy functions has been updated to refer
+to the preferred public module from which to access a function, rather than
+the module in which the function happens to be defined. This produces more
+informative displays for functions in tools such as IPython, e.g., instead of
+``<function 'numpy.core.fromnumeric.sum'>`` you now see
+``<function 'numpy.sum'>``.
+
+Large allocations marked as suitable for transparent hugepages
+--------------------------------------------------------------
+On systems that support transparent hugepages over the madvise system call
+numpy now marks that large memory allocations can be backed by hugepages which
+reduces page fault overhead and can in some fault heavy cases improve
+performance significantly. On Linux the setting for huge pages to be used,
+`/sys/kernel/mm/transparent_hugepage/enabled`, must be at least `madvise`.
+Systems which already have it set to `always` will not see much difference as
+the kernel will automatically use huge pages where appropriate.
+
+Users of very old Linux kernels (~3.x and older) should make sure that
+`/sys/kernel/mm/transparent_hugepage/defrag` is not set to `always` to avoid
+performance problems due concurrency issues in the memory defragmentation.
+
+Alpine Linux (and other musl c library distros) support
+-------------------------------------------------------
+We now default to use `fenv.h` for floating point status error reporting.
+Previously we had a broken default that sometimes would not report underflow,
+overflow, and invalid floating point operations. Now we can support non-glibc
+distrubutions like Alpine Linux as long as they ship `fenv.h`.
+
+Speedup ``np.block`` for large arrays
+-------------------------------------
+Large arrays (greater than ``512 * 512``) now use a blocking algorithm based on
+copying the data directly into the appropriate slice of the resulting array.
+This results in significant speedups for these large arrays, particularly for
+arrays being blocked along more than 2 dimensions.
+
+``arr.ctypes.data_as(...)`` holds a reference to arr
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Previously the caller was responsible for keeping the array alive for the
+lifetime of the pointer.
+
+Speedup ``np.take`` for read-only arrays
+----------------------------------------
+The implementation of ``np.take`` no longer makes an unnecessary copy of the
+source array when its ``writeable`` flag is set to ``False``.
+
+Support path-like objects for more functions
+--------------------------------------------
+The ``np.core.records.fromfile`` function now supports ``pathlib.Path``
+and other path-like objects in addition to a file object. Furthermore, the
+``np.load`` function now also supports path-like objects when using memory
+mapping (``mmap_mode`` keyword argument).
+
+Better behaviour of ufunc identities during reductions
+------------------------------------------------------
+Universal functions have an ``.identity`` which is used when ``.reduce`` is
+called on an empty axis.
+
+As of this release, the logical binary ufuncs, `logical_and`, `logical_or`,
+and `logical_xor`, now have ``identity`` s of type `bool`, where previously they
+were of type `int`. This restores the 1.14 behavior of getting ``bool`` s when
+reducing empty object arrays with these ufuncs, while also keeping the 1.15
+behavior of getting ``int`` s when reducing empty object arrays with arithmetic
+ufuncs like ``add`` and ``multiply``.
+
+Additionally, `logaddexp` now has an identity of ``-inf``, allowing it to be
+called on empty sequences, where previously it could not be.
+
+This is possible thanks to the new
+:c:func:`PyUFunc_FromFuncAndDataAndSignatureAndIdentity`, which allows
+arbitrary values to be used as identities now.
+
+Improved conversion from ctypes objects
+---------------------------------------
+Numpy has always supported taking a value or type from ``ctypes`` and
+converting it into an array or dtype, but only behaved correctly for simpler
+types. As of this release, this caveat is lifted - now:
+
+* The ``_pack_`` attribute of ``ctypes.Structure``, used to emulate C's
+ ``__attribute__((packed))``, is respected.
+* Endianness of all ctypes objects is preserved
+* ``ctypes.Union`` is supported
+* Non-representable constructs raise exceptions, rather than producing
+ dangerously incorrect results:
+
+ * Bitfields are no longer interpreted as sub-arrays
+ * Pointers are no longer replaced with the type that they point to
+
+A new ``ndpointer.contents`` member
+-----------------------------------
+This matches the ``.contents`` member of normal ctypes arrays, and can be used
+to construct an ``np.array`` around the pointers contents. This replaces
+``np.array(some_nd_pointer)``, which stopped working in 1.15. As a side effect
+of this change, ``ndpointer`` now supports dtypes with overlapping fields and
+padding.
+
+``matmul`` is now a ``ufunc``
+-----------------------------
+`numpy.matmul` is now a ufunc which means that both the function and the
+``__matmul__`` operator can now be overridden by ``__array_ufunc__``. Its
+implementation has also changed. It uses the same BLAS routines as
+`numpy.dot`, ensuring its performance is similar for large matrices.
+
+Start and stop arrays for ``linspace``, ``logspace`` and ``geomspace``
+----------------------------------------------------------------------
+These functions used to be limited to scalar stop and start values, but can
+now take arrays, which will be properly broadcast and result in an output
+which has one axis prepended. This can be used, e.g., to obtain linearly
+interpolated points between sets of points.
+
+CI extended with additional services
+------------------------------------
+We now use additional free CI services, thanks to the companies that provide:
+
+* Codecoverage testing via codecov.io
+* Arm testing via shippable.com
+* Additional test runs on azure pipelines
+
+These are in addition to our continued use of travis, appveyor (for wheels) and
+LGTM
+
+
+Changes
+=======
+
+Comparison ufuncs will now error rather than return NotImplemented
+------------------------------------------------------------------
+Previously, comparison ufuncs such as ``np.equal`` would return
+`NotImplemented` if their arguments had structured dtypes, to help comparison
+operators such as ``__eq__`` deal with those. This is no longer needed, as the
+relevant logic has moved to the comparison operators proper (which thus do
+continue to return `NotImplemented` as needed). Hence, like all other ufuncs,
+the comparison ufuncs will now error on structured dtypes.
+
+Positive will now raise a deprecation warning for non-numerical arrays
+----------------------------------------------------------------------
+Previously, ``+array`` unconditionally returned a copy. Now, it will
+raise a ``DeprecationWarning`` if the array is not numerical (i.e.,
+if ``np.positive(array)`` raises a ``TypeError``. For ``ndarray``
+subclasses that override the default ``__array_ufunc__`` implementation,
+the ``TypeError`` is passed on.
+
+``NDArrayOperatorsMixin`` now implements matrix multiplication
+--------------------------------------------------------------
+Previously, ``np.lib.mixins.NDArrayOperatorsMixin`` did not implement the
+special methods for Python's matrix multiplication operator (``@``). This has
+changed now that ``matmul`` is a ufunc and can be overridden using
+``__array_ufunc__``.
+
+The scaling of the covariance matrix in ``np.polyfit`` is different
+-------------------------------------------------------------------
+So far, ``np.polyfit`` used a non-standard factor in the scaling of the the
+covariance matrix. Namely, rather than using the standard ``chisq/(M-N)``, it
+scaled it with ``chisq/(M-N-2)`` where M is the number of data points and N is the
+number of parameters. This scaling is inconsistent with other fitting programs
+such as e.g. ``scipy.optimize.curve_fit`` and was changed to ``chisq/(M-N)``.
+
+``maximum`` and ``minimum`` no longer emit warnings
+---------------------------------------------------
+As part of code introduced in 1.10, ``float32`` and ``float64`` set invalid
+float status when a Nan is encountered in `numpy.maximum` and `numpy.minimum`,
+when using SSE2 semantics. This caused a `RuntimeWarning` to sometimes be
+emitted. In 1.15 we fixed the inconsistencies which caused the warnings to
+become more conspicuous. Now no warnings will be emitted.
+
+Umath and multiarray c-extension modules merged into a single module
+--------------------------------------------------------------------
+The two modules were merged, according to `NEP 15`_. Previously `np.core.umath`
+and `np.core.multiarray` were seperate c-extension modules. They are now python
+wrappers to the single `np.core/_multiarray_math` c-extension module.
+
+.. _`NEP 15` : http://www.numpy.org/neps/nep-0015-merge-multiarray-umath.html
+
+``getfield`` validity checks extended
+-------------------------------------
+`numpy.ndarray.getfield` now checks the dtype and offset arguments to prevent
+accessing invalid memory locations.
+
+NumPy functions now support overrides with ``__array_function__``
+-----------------------------------------------------------------
+NumPy has a new experimental mechanism for overriding the implementation of
+almost all NumPy functions on non-NumPy arrays by defining an
+``__array_function__`` method, as described in `NEP 18`_.
+
+This feature is not yet been enabled by default, but has been released to
+facilitate experimentation by potential users. See the NEP for details on
+setting the appropriate environment variable. We expect the NumPy 1.17 release
+will enable overrides by default, which will also be more performant due to a
+new implementation written in C.
+
+.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
+
+Arrays based off readonly buffers cannot be set ``writeable``
+-------------------------------------------------------------
+We now disallow setting the ``writeable`` flag True on arrays created
+from ``fromstring(readonly-buffer)``.
diff --git a/doc/source/release/1.16.1-notes.rst b/doc/source/release/1.16.1-notes.rst
new file mode 100644
index 000000000..2a190ef91
--- /dev/null
+++ b/doc/source/release/1.16.1-notes.rst
@@ -0,0 +1,107 @@
+==========================
+NumPy 1.16.1 Release Notes
+==========================
+
+The NumPy 1.16.1 release fixes bugs reported against the 1.16.0 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+If you are installing using pip, you may encounter a problem with older
+installed versions of NumPy that pip did not delete becoming mixed with the
+current version, resulting in an ``ImportError``. That problem is particularly
+common on Debian derived distributions due to a modified pip. The fix is to
+make sure all previous NumPy versions installed by pip have been removed. See
+`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
+issue. Note that previously this problem resulted in an ``AttributeError``.
+
+
+Contributors
+============
+
+A total of 16 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Antoine Pitrou
+* Arcesio Castaneda Medina +
+* Charles Harris
+* Chris Markiewicz +
+* Christoph Gohlke
+* Christopher J. Markiewicz +
+* Daniel Hrisca +
+* EelcoPeacs +
+* Eric Wieser
+* Kevin Sheppard
+* Matti Picus
+* OBATA Akio +
+* Ralf Gommers
+* Sebastian Berg
+* Stephan Hoyer
+* Tyler Reddy
+
+
+Enhancements
+============
+
+* `#12767 <https://github.com/numpy/numpy/pull/12767>`__: ENH: add mm->q floordiv
+* `#12768 <https://github.com/numpy/numpy/pull/12768>`__: ENH: port np.core.overrides to C for speed
+* `#12769 <https://github.com/numpy/numpy/pull/12769>`__: ENH: Add np.ctypeslib.as_ctypes_type(dtype), improve `np.ctypeslib.as_ctypes`
+* `#12773 <https://github.com/numpy/numpy/pull/12773>`__: ENH: add "max difference" messages to np.testing.assert_array_equal...
+* `#12820 <https://github.com/numpy/numpy/pull/12820>`__: ENH: Add mm->qm divmod
+* `#12890 <https://github.com/numpy/numpy/pull/12890>`__: ENH: add _dtype_ctype to namespace for freeze analysis
+
+
+Compatibility notes
+===================
+
+* The changed error message emited by array comparison testing functions may
+ affect doctests. See below for detail.
+
+* Casting from double and single denormals to float16 has been corrected. In
+ some rare cases, this may result in results being rounded up instead of down,
+ changing the last bit (ULP) of the result.
+
+
+New Features
+============
+
+divmod operation is now supported for two ``timedelta64`` operands
+------------------------------------------------------------------
+The divmod operator now handles two ``np.timedelta64`` operands, with
+type signature ``mm->qm``.
+
+
+Improvements
+============
+
+Further improvements to ``ctypes`` support in ``np.ctypeslib``
+--------------------------------------------------------------
+A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
+used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
+new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
+array types, including structures, booleans, and integers of non-native
+endianness.
+
+Array comparison assertions include maximum differences
+-------------------------------------------------------
+Error messages from array comparison tests such as
+`np.testing.assert_allclose` now include "max absolute difference" and
+"max relative difference," in addition to the previous "mismatch" percentage.
+This information makes it easier to update absolute and relative error
+tolerances.
+
+
+Changes
+=======
+
+``timedelta64 % 0`` behavior adjusted to return ``NaT``
+-------------------------------------------------------
+The modulus operation with two ``np.timedelta64`` operands now returns
+``NaT`` in the case of division by zero, rather than returning zero
+
+
+
diff --git a/doc/source/release/1.16.2-notes.rst b/doc/source/release/1.16.2-notes.rst
new file mode 100644
index 000000000..62b90dc40
--- /dev/null
+++ b/doc/source/release/1.16.2-notes.rst
@@ -0,0 +1,70 @@
+==========================
+NumPy 1.16.2 Release Notes
+==========================
+
+NumPy 1.16.2 is a quick release fixing several problems encountered on Windows.
+The Python versions supported are 2.7 and 3.5-3.7. The Windows problems
+addressed are:
+
+- DLL load problems for NumPy wheels on Windows,
+- distutils command line parsing on Windows.
+
+There is also a regression fix correcting signed zeros produced by divmod, see
+below for details.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS > v0.3.4.
+
+If you are installing using pip, you may encounter a problem with older
+installed versions of NumPy that pip did not delete becoming mixed with the
+current version, resulting in an ``ImportError``. That problem is particularly
+common on Debian derived distributions due to a modified pip. The fix is to
+make sure all previous NumPy versions installed by pip have been removed. See
+`#12736 <https://github.com/numpy/numpy/issues/12736>`__ for discussion of the
+issue.
+
+
+Compatibility notes
+===================
+
+Signed zero when using divmod
+-----------------------------
+Starting in version 1.12.0, numpy incorrectly returned a negatively signed zero
+when using the ``divmod`` and ``floor_divide`` functions when the result was
+zero. For example::
+
+ >>> np.zeros(10)//1
+ array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.])
+
+With this release, the result is correctly returned as a positively signed
+zero::
+
+ >>> np.zeros(10)//1
+ array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+
+
+Contributors
+============
+
+A total of 5 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Matti Picus
+* Tyler Reddy
+* Tony LaTorre +
+
+
+Pull requests merged
+====================
+
+A total of 7 pull requests were merged for this release.
+
+* `#12909 <https://github.com/numpy/numpy/pull/12909>`__: TST: fix vmImage dispatch in Azure
+* `#12923 <https://github.com/numpy/numpy/pull/12923>`__: MAINT: remove complicated test of multiarray import failure mode
+* `#13020 <https://github.com/numpy/numpy/pull/13020>`__: BUG: fix signed zero behavior in npy_divmod
+* `#13026 <https://github.com/numpy/numpy/pull/13026>`__: MAINT: Add functions to parse shell-strings in the platform-native...
+* `#13028 <https://github.com/numpy/numpy/pull/13028>`__: BUG: Fix regression in parsing of F90 and F77 environment variables
+* `#13038 <https://github.com/numpy/numpy/pull/13038>`__: BUG: parse shell escaping in extra_compile_args and extra_link_args
+* `#13041 <https://github.com/numpy/numpy/pull/13041>`__: BLD: Windows absolute path DLL loading
diff --git a/doc/source/release/1.16.3-notes.rst b/doc/source/release/1.16.3-notes.rst
new file mode 100644
index 000000000..181a7264d
--- /dev/null
+++ b/doc/source/release/1.16.3-notes.rst
@@ -0,0 +1,46 @@
+==========================
+NumPy 1.16.3 Release Notes
+==========================
+
+The NumPy 1.16.3 release fixes bugs reported against the 1.16.2 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.4+, which should fix the known threading issues
+found in previous OpenBLAS versions.
+
+Downstream developers building this release should use Cython >= 0.29.2 and,
+if using OpenBLAS, OpenBLAS > v0.3.4.
+
+The most noticeable change in this release is that unpickling object arrays
+when loading ``*.npy`` or ``*.npz`` files now requires an explicit opt-in.
+This backwards incompatible change was made in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+Compatibility notes
+===================
+
+Unpickling while loading requires explicit opt-in
+-------------------------------------------------
+The functions ``np.load``, and ``np.lib.format.read_array`` take an
+`allow_pickle` keyword which now defaults to ``False`` in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+Improvements
+============
+
+Covariance in `random.mvnormal` cast to double
+----------------------------------------------
+This should make the tolerance used when checking the singular values of the
+covariance matrix more meaningful.
+
+
+Changes
+=======
+
+``__array_interface__`` offset now works as documented
+------------------------------------------------------
+The interface may use an ``offset`` value that was previously mistakenly
+ignored.
+
diff --git a/doc/source/release/1.16.4-notes.rst b/doc/source/release/1.16.4-notes.rst
new file mode 100644
index 000000000..a236b05c8
--- /dev/null
+++ b/doc/source/release/1.16.4-notes.rst
@@ -0,0 +1,94 @@
+==========================
+NumPy 1.16.4 Release Notes
+==========================
+
+The NumPy 1.16.4 release fixes bugs reported against the 1.16.3 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix issues on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and,
+if using OpenBLAS, OpenBLAS > v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+
+New deprecations
+================
+Writeable flag of C-API wrapped arrays
+--------------------------------------
+When an array is created from the C-API to wrap a pointer to data, the only
+indication we have of the read-write nature of the data is the ``writeable``
+flag set during creation. It is dangerous to force the flag to writeable. In
+the future it will not be possible to switch the writeable flag to ``True``
+from python. This deprecation should not affect many users since arrays
+created in such a manner are very rare in practice and only available through
+the NumPy C-API.
+
+
+Compatibility notes
+===================
+
+Potential changes to the random stream
+--------------------------------------
+Due to bugs in the application of log to random floating point numbers,
+the stream may change when sampling from ``np.random.beta``, ``np.random.binomial``,
+``np.random.laplace``, ``np.random.logistic``, ``np.random.logseries`` or
+``np.random.multinomial`` if a 0 is generated in the underlying MT19937 random stream.
+There is a 1 in :math:`10^{53}` chance of this occurring, and so the probability that
+the stream changes for any given seed is extremely small. If a 0 is encountered in the
+underlying generator, then the incorrect value produced (either ``np.inf``
+or ``np.nan``) is now dropped.
+
+
+Changes
+=======
+
+`numpy.lib.recfunctions.structured_to_unstructured` does not squeeze single-field views
+---------------------------------------------------------------------------------------
+Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
+result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
+was accidental. The old behavior can be retained with
+``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
+``arr['a']``.
+
+
+Contributors
+============
+
+A total of 10 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Charles Harris
+* Eric Wieser
+* Dennis Zollo +
+* Hunter Damron +
+* Jingbei Li +
+* Kevin Sheppard
+* Matti Picus
+* Nicola Soranzo +
+* Sebastian Berg
+* Tyler Reddy
+
+
+Pull requests merged
+====================
+
+A total of 16 pull requests were merged for this release.
+
+* `#13392 <https://github.com/numpy/numpy/pull/13392>`__: BUG: Some PyPy versions lack PyStructSequence_InitType2.
+* `#13394 <https://github.com/numpy/numpy/pull/13394>`__: MAINT, DEP: Fix deprecated ``assertEquals()``
+* `#13396 <https://github.com/numpy/numpy/pull/13396>`__: BUG: Fix structured_to_unstructured on single-field types (backport)
+* `#13549 <https://github.com/numpy/numpy/pull/13549>`__: BLD: Make CI pass again with pytest 4.5
+* `#13552 <https://github.com/numpy/numpy/pull/13552>`__: TST: Register markers in conftest.py.
+* `#13559 <https://github.com/numpy/numpy/pull/13559>`__: BUG: Removes ValueError for empty kwargs in arraymultiter_new
+* `#13560 <https://github.com/numpy/numpy/pull/13560>`__: BUG: Add TypeError to accepted exceptions in crackfortran.
+* `#13561 <https://github.com/numpy/numpy/pull/13561>`__: BUG: Handle subarrays in descr_to_dtype
+* `#13562 <https://github.com/numpy/numpy/pull/13562>`__: BUG: Protect generators from log(0.0)
+* `#13563 <https://github.com/numpy/numpy/pull/13563>`__: BUG: Always return views from structured_to_unstructured when...
+* `#13564 <https://github.com/numpy/numpy/pull/13564>`__: BUG: Catch stderr when checking compiler version
+* `#13565 <https://github.com/numpy/numpy/pull/13565>`__: BUG: longdouble(int) does not work
+* `#13587 <https://github.com/numpy/numpy/pull/13587>`__: BUG: distutils/system_info.py fix missing subprocess import (#13523)
+* `#13620 <https://github.com/numpy/numpy/pull/13620>`__: BUG,DEP: Fix writeable flag setting for arrays without base
+* `#13641 <https://github.com/numpy/numpy/pull/13641>`__: MAINT: Prepare for the 1.16.4 release.
+* `#13644 <https://github.com/numpy/numpy/pull/13644>`__: BUG: special case object arrays when printing rel-, abs-error
diff --git a/doc/source/release/1.16.5-notes.rst b/doc/source/release/1.16.5-notes.rst
new file mode 100644
index 000000000..5b6eb585b
--- /dev/null
+++ b/doc/source/release/1.16.5-notes.rst
@@ -0,0 +1,68 @@
+==========================
+NumPy 1.16.5 Release Notes
+==========================
+
+The NumPy 1.16.5 release fixes bugs reported against the 1.16.4 release, and
+also backports several enhancements from master that seem appropriate for a
+release series that is the last to support Python 2.7. The wheels on PyPI are
+linked with OpenBLAS v0.3.7-dev, which should fix errors on Skylake series
+cpus.
+
+Downstream developers building this release should use Cython >= 0.29.2 and, if
+using OpenBLAS, OpenBLAS >= v0.3.7. The supported Python versions are 2.7 and
+3.5-3.7.
+
+
+Contributors
+============
+
+A total of 18 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Shadchin
+* Allan Haldane
+* Bruce Merry +
+* Charles Harris
+* Colin Snyder +
+* Dan Allan +
+* Emile +
+* Eric Wieser
+* Grey Baker +
+* Maksim Shabunin +
+* Marten van Kerkwijk
+* Matti Picus
+* Peter Andreas Entschev +
+* Ralf Gommers
+* Richard Harris +
+* Sebastian Berg
+* Sergei Lebedev +
+* Stephan Hoyer
+
+Pull requests merged
+====================
+
+A total of 23 pull requests were merged for this release.
+
+* `#13742 <https://github.com/numpy/numpy/pull/13742>`__: ENH: Add project URLs to setup.py
+* `#13823 <https://github.com/numpy/numpy/pull/13823>`__: TEST, ENH: fix tests and ctypes code for PyPy
+* `#13845 <https://github.com/numpy/numpy/pull/13845>`__: BUG: use npy_intp instead of int for indexing array
+* `#13867 <https://github.com/numpy/numpy/pull/13867>`__: TST: Ignore DeprecationWarning during nose imports
+* `#13905 <https://github.com/numpy/numpy/pull/13905>`__: BUG: Fix use-after-free in boolean indexing
+* `#13933 <https://github.com/numpy/numpy/pull/13933>`__: MAINT/BUG/DOC: Fix errors in _add_newdocs
+* `#13984 <https://github.com/numpy/numpy/pull/13984>`__: BUG: fix byte order reversal for datetime64[ns]
+* `#13994 <https://github.com/numpy/numpy/pull/13994>`__: MAINT,BUG: Use nbytes to also catch empty descr during allocation
+* `#14042 <https://github.com/numpy/numpy/pull/14042>`__: BUG: np.array cleared errors occured in PyMemoryView_FromObject
+* `#14043 <https://github.com/numpy/numpy/pull/14043>`__: BUG: Fixes for Undefined Behavior Sanitizer (UBSan) errors.
+* `#14044 <https://github.com/numpy/numpy/pull/14044>`__: BUG: ensure that casting to/from structured is properly checked.
+* `#14045 <https://github.com/numpy/numpy/pull/14045>`__: MAINT: fix histogram*d dispatchers
+* `#14046 <https://github.com/numpy/numpy/pull/14046>`__: BUG: further fixup to histogram2d dispatcher.
+* `#14052 <https://github.com/numpy/numpy/pull/14052>`__: BUG: Replace contextlib.suppress for Python 2.7
+* `#14056 <https://github.com/numpy/numpy/pull/14056>`__: BUG: fix compilation of 3rd party modules with Py_LIMITED_API...
+* `#14057 <https://github.com/numpy/numpy/pull/14057>`__: BUG: Fix memory leak in dtype from dict contructor
+* `#14058 <https://github.com/numpy/numpy/pull/14058>`__: DOC: Document array_function at a higher level.
+* `#14084 <https://github.com/numpy/numpy/pull/14084>`__: BUG, DOC: add new recfunctions to `__all__`
+* `#14162 <https://github.com/numpy/numpy/pull/14162>`__: BUG: Remove stray print that causes a SystemError on python 3.7
+* `#14297 <https://github.com/numpy/numpy/pull/14297>`__: TST: Pin pytest version to 5.0.1.
+* `#14322 <https://github.com/numpy/numpy/pull/14322>`__: ENH: Enable huge pages in all Linux builds
+* `#14346 <https://github.com/numpy/numpy/pull/14346>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14382 <https://github.com/numpy/numpy/pull/14382>`__: REL: Prepare for the NumPy 1.16.5 release.
diff --git a/doc/source/release/1.17.0-notes.rst b/doc/source/release/1.17.0-notes.rst
new file mode 100644
index 000000000..8d69e36d9
--- /dev/null
+++ b/doc/source/release/1.17.0-notes.rst
@@ -0,0 +1,562 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.0 Release Notes
+==========================
+
+This NumPy release contains a number of new features that should substantially
+improve its performance and usefulness, see Highlights below for a summary. The
+Python versions supported are 3.5-3.7, note that Python 2.7 has been dropped.
+Python 3.8b2 should work with the released source packages, but there are no
+future guarantees.
+
+Downstream developers should use Cython >= 0.29.11 for Python 3.8 support and
+OpenBLAS >= 3.7 (not currently out) to avoid problems on the Skylake
+architecture. The NumPy wheels on PyPI are built from the OpenBLAS development
+branch in order to avoid those problems.
+
+
+Highlights
+==========
+
+* A new extensible `random` module along with four selectable `random number
+ generators <random.BitGenerators>` and improved seeding designed for use in parallel
+ processes has been added. The currently available bit generators are `MT19937
+ <random.mt19937.MT19937>`, `PCG64 <random.pcg64.PCG64>`, `Philox
+ <random.philox.Philox>`, and `SFC64 <random.sfc64.SFC64>`. See below under
+ New Features.
+
+* NumPy's `FFT <fft>` implementation was changed from fftpack to pocketfft,
+ resulting in faster, more accurate transforms and better handling of datasets
+ of prime length. See below under Improvements.
+
+* New radix sort and timsort sorting methods. It is currently not possible to
+ choose which will be used. They are hardwired to the datatype and used
+ when either ``stable`` or ``mergesort`` is passed as the method. See below
+ under Improvements.
+
+* Overriding numpy functions is now possible by default,
+ see ``__array_function__`` below.
+
+
+New functions
+=============
+
+* `numpy.errstate` is now also a function decorator
+
+
+Deprecations
+============
+
+`numpy.polynomial` functions warn when passed ``float`` in place of ``int``
+---------------------------------------------------------------------------
+Previously functions in this module would accept ``float`` values provided they
+were integral (``1.0``, ``2.0``, etc). For consistency with the rest of numpy,
+doing so is now deprecated, and in future will raise a ``TypeError``.
+
+Similarly, passing a float like ``0.5`` in place of an integer will now raise a
+``TypeError`` instead of the previous ``ValueError``.
+
+Deprecate `numpy.distutils.exec_command` and ``temp_file_name``
+---------------------------------------------------------------
+The internal use of these functions has been refactored and there are better
+alternatives. Replace ``exec_command`` with `subprocess.Popen` and
+`temp_file_name <numpy.distutils.exec_command>` with `tempfile.mkstemp`.
+
+Writeable flag of C-API wrapped arrays
+--------------------------------------
+When an array is created from the C-API to wrap a pointer to data, the only
+indication we have of the read-write nature of the data is the ``writeable``
+flag set during creation. It is dangerous to force the flag to writeable.
+In the future it will not be possible to switch the writeable flag to ``True``
+from python.
+This deprecation should not affect many users since arrays created in such
+a manner are very rare in practice and only available through the NumPy C-API.
+
+`numpy.nonzero` should no longer be called on 0d arrays
+-------------------------------------------------------
+The behavior of `numpy.nonzero` on 0d arrays was surprising, making uses of it
+almost always incorrect. If the old behavior was intended, it can be preserved
+without a warning by using ``nonzero(atleast_1d(arr))`` instead of
+``nonzero(arr)``. In a future release, it is most likely this will raise a
+``ValueError``.
+
+Writing to the result of `numpy.broadcast_arrays` will warn
+-----------------------------------------------------------
+
+Commonly `numpy.broadcast_arrays` returns a writeable array with internal
+overlap, making it unsafe to write to. A future version will set the
+``writeable`` flag to ``False``, and require users to manually set it to
+``True`` if they are sure that is what they want to do. Now writing to it will
+emit a deprecation warning with instructions to set the ``writeable`` flag
+``True``. Note that if one were to inspect the flag before setting it, one
+would find it would already be ``True``. Explicitly setting it, though, as one
+will need to do in future versions, clears an internal flag that is used to
+produce the deprecation warning. To help alleviate confusion, an additional
+`FutureWarning` will be emitted when accessing the ``writeable`` flag state to
+clarify the contradiction.
+
+Note that for the C-side buffer protocol such an array will return a
+readonly buffer immediately unless a writable buffer is requested. If
+a writeable buffer is requested a warning will be given. When using
+cython, the ``const`` qualifier should be used with such arrays to avoid
+the warning (e.g. ``cdef const double[::1] view``).
+
+
+Future Changes
+==============
+
+Shape-1 fields in dtypes won't be collapsed to scalars in a future version
+--------------------------------------------------------------------------
+
+Currently, a field specified as ``[(name, dtype, 1)]`` or ``"1type"`` is
+interpreted as a scalar field (i.e., the same as ``[(name, dtype)]`` or
+``[(name, dtype, ()]``). This now raises a FutureWarning; in a future version,
+it will be interpreted as a shape-(1,) field, i.e. the same as ``[(name,
+dtype, (1,))]`` or ``"(1,)type"`` (consistently with ``[(name, dtype, n)]``
+/ ``"ntype"`` with ``n>1``, which is already equivalent to ``[(name, dtype,
+(n,)]`` / ``"(n,)type"``).
+
+
+Compatibility notes
+===================
+
+``float16`` subnormal rounding
+------------------------------
+Casting from a different floating point precision to ``float16`` used incorrect
+rounding in some edge cases. This means in rare cases, subnormal results will
+now be rounded up instead of down, changing the last bit (ULP) of the result.
+
+Signed zero when using divmod
+-----------------------------
+Starting in version `1.12.0`, numpy incorrectly returned a negatively signed zero
+when using the ``divmod`` and ``floor_divide`` functions when the result was
+zero. For example::
+
+ >>> np.zeros(10)//1
+ array([-0., -0., -0., -0., -0., -0., -0., -0., -0., -0.])
+
+With this release, the result is correctly returned as a positively signed
+zero::
+
+ >>> np.zeros(10)//1
+ array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0.])
+
+``MaskedArray.mask`` now returns a view of the mask, not the mask itself
+------------------------------------------------------------------------
+Returning the mask itself was unsafe, as it could be reshaped in place which
+would violate expectations of the masked array code. The behavior of `mask
+<ma.MaskedArray.mask>` is now consistent with `data <ma.MaskedArray.data>`,
+which also returns a view.
+
+The underlying mask can still be accessed with ``._mask`` if it is needed.
+Tests that contain ``assert x.mask is not y.mask`` or similar will need to be
+updated.
+
+Do not lookup ``__buffer__`` attribute in `numpy.frombuffer`
+------------------------------------------------------------
+Looking up ``__buffer__`` attribute in `numpy.frombuffer` was undocumented and
+non-functional. This code was removed. If needed, use
+``frombuffer(memoryview(obj), ...)`` instead.
+
+``out`` is buffered for memory overlaps in `take`, `choose`, `put`
+------------------------------------------------------------------
+If the out argument to these functions is provided and has memory overlap with
+the other arguments, it is now buffered to avoid order-dependent behavior.
+
+Unpickling while loading requires explicit opt-in
+-------------------------------------------------
+The functions `load`, and ``lib.format.read_array`` take an
+``allow_pickle`` keyword which now defaults to ``False`` in response to
+`CVE-2019-6446 <https://nvd.nist.gov/vuln/detail/CVE-2019-6446>`_.
+
+
+.. currentmodule:: numpy.random.mtrand
+
+Potential changes to the random stream in old random module
+-----------------------------------------------------------
+Due to bugs in the application of ``log`` to random floating point numbers,
+the stream may change when sampling from `~RandomState.beta`, `~RandomState.binomial`,
+`~RandomState.laplace`, `~RandomState.logistic`, `~RandomState.logseries` or
+`~RandomState.multinomial` if a ``0`` is generated in the underlying `MT19937
+<~numpy.random.mt11937.MT19937>` random stream. There is a ``1`` in
+:math:`10^{53}` chance of this occurring, so the probability that the stream
+changes for any given seed is extremely small. If a ``0`` is encountered in the
+underlying generator, then the incorrect value produced (either `numpy.inf` or
+`numpy.nan`) is now dropped.
+
+.. currentmodule:: numpy
+
+`i0` now always returns a result with the same shape as the input
+-----------------------------------------------------------------
+Previously, the output was squeezed, such that, e.g., input with just a single
+element would lead to an array scalar being returned, and inputs with shapes
+such as ``(10, 1)`` would yield results that would not broadcast against the
+input.
+
+Note that we generally recommend the SciPy implementation over the numpy one:
+it is a proper ufunc written in C, and more than an order of magnitude faster.
+
+`can_cast` no longer assumes all unsafe casting is allowed
+----------------------------------------------------------
+Previously, `can_cast` returned `True` for almost all inputs for
+``casting='unsafe'``, even for cases where casting was not possible, such as
+from a structured dtype to a regular one. This has been fixed, making it
+more consistent with actual casting using, e.g., the `.astype <ndarray.astype>`
+method.
+
+``ndarray.flags.writeable`` can be switched to true slightly more often
+-----------------------------------------------------------------------
+
+In rare cases, it was not possible to switch an array from not writeable
+to writeable, although a base array is writeable. This can happen if an
+intermediate `ndarray.base` object is writeable. Previously, only the deepest
+base object was considered for this decision. However, in rare cases this
+object does not have the necessary information. In that case switching to
+writeable was never allowed. This has now been fixed.
+
+
+C API changes
+=============
+
+dimension or stride input arguments are now passed by ``npy_intp const*``
+-------------------------------------------------------------------------
+Previously these function arguments were declared as the more strict
+``npy_intp*``, which prevented the caller passing constant data.
+This change is backwards compatible, but now allows code like::
+
+ npy_intp const fixed_dims[] = {1, 2, 3};
+ // no longer complains that the const-qualifier is discarded
+ npy_intp size = PyArray_MultiplyList(fixed_dims, 3);
+
+
+New Features
+============
+
+.. currentmodule:: numpy.random
+
+New extensible `numpy.random` module with selectable random number generators
+-----------------------------------------------------------------------------
+A new extensible `numpy.random` module along with four selectable random number
+generators and improved seeding designed for use in parallel processes has been
+added. The currently available :ref:`Bit Generators <bit_generator>` are
+`~mt19937.MT19937`, `~pcg64.PCG64`, `~philox.Philox`, and `~sfc64.SFC64`.
+``PCG64`` is the new default while ``MT19937`` is retained for backwards
+compatibility. Note that the legacy random module is unchanged and is now
+frozen, your current results will not change. More information is available in
+the :ref:`API change description <new-or-different>` and in the `top-level view
+<numpy.random>` documentation.
+
+.. currentmodule:: numpy
+
+libFLAME
+--------
+Support for building NumPy with the libFLAME linear algebra package as the LAPACK,
+implementation, see
+`libFLAME <https://www.cs.utexas.edu/~flame/web/libFLAME.html>`_ for details.
+
+User-defined BLAS detection order
+---------------------------------
+`distutils` now uses an environment variable, comma-separated and case
+insensitive, to determine the detection order for BLAS libraries.
+By default ``NPY_BLAS_ORDER=mkl,blis,openblas,atlas,accelerate,blas``.
+However, to force the use of OpenBLAS simply do::
+
+ NPY_BLAS_ORDER=openblas python setup.py build
+
+which forces the use of OpenBLAS.
+This may be helpful for users which have a MKL installation but wishes to try
+out different implementations.
+
+User-defined LAPACK detection order
+-----------------------------------
+``numpy.distutils`` now uses an environment variable, comma-separated and case
+insensitive, to determine the detection order for LAPACK libraries.
+By default ``NPY_LAPACK_ORDER=mkl,openblas,flame,atlas,accelerate,lapack``.
+However, to force the use of OpenBLAS simply do::
+
+ NPY_LAPACK_ORDER=openblas python setup.py build
+
+which forces the use of OpenBLAS.
+This may be helpful for users which have a MKL installation but wishes to try
+out different implementations.
+
+`ufunc.reduce` and related functions now accept a ``where`` mask
+----------------------------------------------------------------
+`ufunc.reduce`, `sum`, `prod`, `min`, `max` all
+now accept a ``where`` keyword argument, which can be used to tell which
+elements to include in the reduction. For reductions that do not have an
+identity, it is necessary to also pass in an initial value (e.g.,
+``initial=np.inf`` for `min`). For instance, the equivalent of
+`nansum` would be ``np.sum(a, where=~np.isnan(a))``.
+
+Timsort and radix sort have replaced mergesort for stable sorting
+-----------------------------------------------------------------
+Both radix sort and timsort have been implemented and are now used in place of
+mergesort. Due to the need to maintain backward compatibility, the sorting
+``kind`` options ``"stable"`` and ``"mergesort"`` have been made aliases of
+each other with the actual sort implementation depending on the array type.
+Radix sort is used for small integer types of 16 bits or less and timsort for
+the remaining types. Timsort features improved performace on data containing
+already or nearly sorted data and performs like mergesort on random data and
+requires :math:`O(n/2)` working space. Details of the timsort algorithm can be
+found at `CPython listsort.txt
+<https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+
+`packbits` and `unpackbits` accept an ``order`` keyword
+-------------------------------------------------------
+The ``order`` keyword defaults to ``big``, and will order the **bits**
+accordingly. For ``'order=big'`` 3 will become ``[0, 0, 0, 0, 0, 0, 1, 1]``,
+and ``[1, 1, 0, 0, 0, 0, 0, 0]`` for ``order=little``
+
+`unpackbits` now accepts a ``count`` parameter
+----------------------------------------------
+``count`` allows subsetting the number of bits that will be unpacked up-front,
+rather than reshaping and subsetting later, making the `packbits` operation
+invertible, and the unpacking less wasteful. Counts larger than the number of
+available bits add zero padding. Negative counts trim bits off the end instead
+of counting from the beginning. None counts implement the existing behavior of
+unpacking everything.
+
+`linalg.svd` and `linalg.pinv` can be faster on hermitian inputs
+----------------------------------------------------------------
+These functions now accept a ``hermitian`` argument, matching the one added
+to `linalg.matrix_rank` in 1.14.0.
+
+divmod operation is now supported for two ``timedelta64`` operands
+------------------------------------------------------------------
+The divmod operator now handles two ``timedelta64`` operands, with
+type signature ``mm->qm``.
+
+`fromfile` now takes an ``offset`` argument
+-------------------------------------------
+This function now takes an ``offset`` keyword argument for binary files,
+which specifics the offset (in bytes) from the file's current position.
+Defaults to ``0``.
+
+New mode "empty" for `pad`
+--------------------------
+This mode pads an array to a desired shape without initializing the new
+entries.
+
+`empty_like` and related functions now accept a ``shape`` argument
+------------------------------------------------------------------
+`empty_like`, `full_like`, `ones_like` and `zeros_like` now accept a ``shape``
+keyword argument, which can be used to create a new array
+as the prototype, overriding its shape as well. This is particularly useful
+when combined with the ``__array_function__`` protocol, allowing the creation
+of new arbitrary-shape arrays from NumPy-like libraries when such an array
+is used as the prototype.
+
+Floating point scalars implement ``as_integer_ratio`` to match the builtin float
+--------------------------------------------------------------------------------
+This returns a (numerator, denominator) pair, which can be used to construct a
+`fractions.Fraction`.
+
+Structured ``dtype`` objects can be indexed with multiple fields names
+----------------------------------------------------------------------
+``arr.dtype[['a', 'b']]`` now returns a dtype that is equivalent to
+``arr[['a', 'b']].dtype``, for consistency with
+``arr.dtype['a'] == arr['a'].dtype``.
+
+Like the dtype of structured arrays indexed with a list of fields, this dtype
+has the same ``itemsize`` as the original, but only keeps a subset of the fields.
+
+This means that ``arr[['a', 'b']]`` and ``arr.view(arr.dtype[['a', 'b']])`` are
+equivalent.
+
+``.npy`` files support unicode field names
+------------------------------------------
+A new format version of 3.0 has been introduced, which enables structured types
+with non-latin1 field names. This is used automatically when needed.
+
+
+Improvements
+============
+
+Array comparison assertions include maximum differences
+-------------------------------------------------------
+Error messages from array comparison tests such as
+`testing.assert_allclose` now include "max absolute difference" and
+"max relative difference," in addition to the previous "mismatch" percentage.
+This information makes it easier to update absolute and relative error
+tolerances.
+
+Replacement of the fftpack based `fft` module by the pocketfft library
+----------------------------------------------------------------------
+Both implementations have the same ancestor (Fortran77 FFTPACK by Paul N.
+Swarztrauber), but pocketfft contains additional modifications which improve
+both accuracy and performance in some circumstances. For FFT lengths containing
+large prime factors, pocketfft uses Bluestein's algorithm, which maintains
+:math:`O(N log N)` run time complexity instead of deteriorating towards
+:math:`O(N*N)` for prime lengths. Also, accuracy for real valued FFTs with near
+prime lengths has improved and is on par with complex valued FFTs.
+
+Further improvements to ``ctypes`` support in `numpy.ctypeslib`
+---------------------------------------------------------------
+A new `numpy.ctypeslib.as_ctypes_type` function has been added, which can be
+used to converts a `dtype` into a best-guess `ctypes` type. Thanks to this
+new function, `numpy.ctypeslib.as_ctypes` now supports a much wider range of
+array types, including structures, booleans, and integers of non-native
+endianness.
+
+`numpy.errstate` is now also a function decorator
+-------------------------------------------------
+Currently, if you have a function like::
+
+ def foo():
+ pass
+
+and you want to wrap the whole thing in `errstate`, you have to rewrite it
+like so::
+
+ def foo():
+ with np.errstate(...):
+ pass
+
+but with this change, you can do::
+
+ @np.errstate(...)
+ def foo():
+ pass
+
+thereby saving a level of indentation
+
+`numpy.exp` and `numpy.log` speed up for float32 implementation
+---------------------------------------------------------------
+float32 implementation of `exp` and `log` now benefit from AVX2/AVX512
+instruction set which are detected during runtime. `exp` has a max ulp
+error of 2.52 and `log` has a max ulp error or 3.83.
+
+Improve performance of `numpy.pad`
+----------------------------------
+The performance of the function has been improved for most cases by filling in
+a preallocated array with the desired padded shape instead of using
+concatenation.
+
+`numpy.interp` handles infinities more robustly
+-----------------------------------------------
+In some cases where `interp` would previously return `nan`, it now
+returns an appropriate infinity.
+
+Pathlib support for `fromfile`, `tofile` and `ndarray.dump`
+-----------------------------------------------------------
+`fromfile`, `ndarray.ndarray.tofile` and `ndarray.dump` now support
+the `pathlib.Path` type for the ``file``/``fid`` parameter.
+
+Specialized `isnan`, `isinf`, and `isfinite` ufuncs for bool and int types
+--------------------------------------------------------------------------
+The boolean and integer types are incapable of storing `nan` and `inf` values,
+which allows us to provide specialized ufuncs that are up to 250x faster than
+the previous approach.
+
+`isfinite` supports ``datetime64`` and ``timedelta64`` types
+-----------------------------------------------------------------
+Previously, `isfinite` used to raise a `TypeError` on being used on these
+two types.
+
+New keywords added to `nan_to_num`
+----------------------------------
+`nan_to_num` now accepts keywords ``nan``, ``posinf`` and ``neginf``
+allowing the user to define the value to replace the ``nan``, positive and
+negative ``np.inf`` values respectively.
+
+MemoryErrors caused by allocated overly large arrays are more descriptive
+-------------------------------------------------------------------------
+Often the cause of a MemoryError is incorrect broadcasting, which results in a
+very large and incorrect shape. The message of the error now includes this
+shape to help diagnose the cause of failure.
+
+`floor`, `ceil`, and `trunc` now respect builtin magic methods
+--------------------------------------------------------------
+These ufuncs now call the ``__floor__``, ``__ceil__``, and ``__trunc__``
+methods when called on object arrays, making them compatible with
+`decimal.Decimal` and `fractions.Fraction` objects.
+
+`quantile` now works on `fraction.Fraction` and `decimal.Decimal` objects
+-------------------------------------------------------------------------
+In general, this handles object arrays more gracefully, and avoids floating-
+point operations if exact arithmetic types are used.
+
+Support of object arrays in `matmul`
+------------------------------------
+It is now possible to use `matmul` (or the ``@`` operator) with object arrays.
+For instance, it is now possible to do::
+
+ from fractions import Fraction
+ a = np.array([[Fraction(1, 2), Fraction(1, 3)], [Fraction(1, 3), Fraction(1, 2)]])
+ b = a @ a
+
+
+Changes
+=======
+
+`median` and `percentile` family of functions no longer warn about ``nan``
+--------------------------------------------------------------------------
+`numpy.median`, `numpy.percentile`, and `numpy.quantile` used to emit a
+``RuntimeWarning`` when encountering an `nan`. Since they return the
+``nan`` value, the warning is redundant and has been removed.
+
+``timedelta64 % 0`` behavior adjusted to return ``NaT``
+-------------------------------------------------------
+The modulus operation with two ``np.timedelta64`` operands now returns
+``NaT`` in the case of division by zero, rather than returning zero
+
+NumPy functions now always support overrides with ``__array_function__``
+------------------------------------------------------------------------
+NumPy now always checks the ``__array_function__`` method to implement overrides
+of NumPy functions on non-NumPy arrays, as described in `NEP 18`_. The feature
+was available for testing with NumPy 1.16 if appropriate environment variables
+are set, but is now always enabled.
+
+.. _`NEP 18` : http://www.numpy.org/neps/nep-0018-array-function-protocol.html
+
+``lib.recfunctions.structured_to_unstructured`` does not squeeze single-field views
+-----------------------------------------------------------------------------------
+Previously ``structured_to_unstructured(arr[['a']])`` would produce a squeezed
+result inconsistent with ``structured_to_unstructured(arr[['a', b']])``. This
+was accidental. The old behavior can be retained with
+``structured_to_unstructured(arr[['a']]).squeeze(axis=-1)`` or far more simply,
+``arr['a']``.
+
+`clip` now uses a ufunc under the hood
+--------------------------------------
+This means that registering clip functions for custom dtypes in C via
+``descr->f->fastclip`` is deprecated - they should use the ufunc registration
+mechanism instead, attaching to the ``np.core.umath.clip`` ufunc.
+
+It also means that ``clip`` accepts ``where`` and ``casting`` arguments,
+and can be override with ``__array_ufunc__``.
+
+A consequence of this change is that some behaviors of the old ``clip`` have
+been deprecated:
+
+* Passing ``nan`` to mean "do not clip" as one or both bounds. This didn't work
+ in all cases anyway, and can be better handled by passing infinities of the
+ appropriate sign.
+* Using "unsafe" casting by default when an ``out`` argument is passed. Using
+ ``casting="unsafe"`` explicitly will silence this warning.
+
+Additionally, there are some corner cases with behavior changes:
+
+* Padding ``max < min`` has changed to be more consistent across dtypes, but
+ should not be relied upon.
+* Scalar ``min`` and ``max`` take part in promotion rules like they do in all
+ other ufuncs.
+
+``__array_interface__`` offset now works as documented
+------------------------------------------------------
+The interface may use an ``offset`` value that was mistakenly ignored.
+
+Pickle protocol in `savez` set to 3 for ``force zip64`` flag
+-----------------------------------------------------------------
+`savez` was not using the ``force_zip64`` flag, which limited the size of
+the archive to 2GB. But using the flag requires us to use pickle protocol 3 to
+write ``object`` arrays. The protocol used was bumped to 3, meaning the archive
+will be unreadable by Python2.
+
+Structured arrays indexed with non-existent fields raise ``KeyError`` not ``ValueError``
+----------------------------------------------------------------------------------------
+``arr['bad_field']`` on a structured type raises ``KeyError``, for consistency
+with ``dict['bad_field']``.
+
diff --git a/doc/source/release/1.17.1-notes.rst b/doc/source/release/1.17.1-notes.rst
new file mode 100644
index 000000000..bd837ee5b
--- /dev/null
+++ b/doc/source/release/1.17.1-notes.rst
@@ -0,0 +1,73 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.1 Release Notes
+==========================
+
+This release contains a number of fixes for bugs reported against NumPy 1.17.0
+along with a few documentation and build improvements. The Python versions
+supported are 3.5-3.7, note that Python 2.7 has been dropped. Python 3.8b3
+should work with the released source packages, but there are no future
+guarantees.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid problems on the Skylake architecture. The NumPy wheels
+on PyPI are built from the OpenBLAS development branch in order to avoid those
+problems.
+
+
+Contributors
+============
+
+A total of 17 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* Alexander Jung +
+* Allan Haldane
+* Charles Harris
+* Eric Wieser
+* Giuseppe Cuccu +
+* Hiroyuki V. Yamazaki
+* Jérémie du Boisberranger
+* Kmol Yuan +
+* Matti Picus
+* Max Bolingbroke +
+* Maxwell Aladago +
+* Oleksandr Pavlyk
+* Peter Andreas Entschev
+* Sergei Lebedev
+* Seth Troisi +
+* Vladimir Pershin +
+* Warren Weckesser
+
+
+Pull requests merged
+====================
+
+A total of 24 pull requests were merged for this release.
+
+* `#14156 <https://github.com/numpy/numpy/pull/14156>`__: TST: Allow fuss in testing strided/non-strided exp/log loops
+* `#14157 <https://github.com/numpy/numpy/pull/14157>`__: BUG: avx2_scalef_ps must be static
+* `#14158 <https://github.com/numpy/numpy/pull/14158>`__: BUG: Remove stray print that causes a SystemError on python 3.7.
+* `#14159 <https://github.com/numpy/numpy/pull/14159>`__: BUG: Fix DeprecationWarning in python 3.8.
+* `#14160 <https://github.com/numpy/numpy/pull/14160>`__: BLD: Add missing gcd/lcm definitions to npy_math.h
+* `#14161 <https://github.com/numpy/numpy/pull/14161>`__: DOC, BUILD: cleanups and fix (again) 'build dist'
+* `#14166 <https://github.com/numpy/numpy/pull/14166>`__: TST: Add 3.8-dev to travisCI testing.
+* `#14194 <https://github.com/numpy/numpy/pull/14194>`__: BUG: Remove the broken clip wrapper (Backport)
+* `#14198 <https://github.com/numpy/numpy/pull/14198>`__: DOC: Fix hermitian argument docs in svd.
+* `#14199 <https://github.com/numpy/numpy/pull/14199>`__: MAINT: Workaround for Intel compiler bug leading to failing test
+* `#14200 <https://github.com/numpy/numpy/pull/14200>`__: TST: Clean up of test_pocketfft.py
+* `#14201 <https://github.com/numpy/numpy/pull/14201>`__: BUG: Make advanced indexing result on read-only subclass writeable...
+* `#14236 <https://github.com/numpy/numpy/pull/14236>`__: BUG: Fixed default BitGenerator name
+* `#14237 <https://github.com/numpy/numpy/pull/14237>`__: ENH: add c-imported modules for freeze analysis in np.random
+* `#14296 <https://github.com/numpy/numpy/pull/14296>`__: TST: Pin pytest version to 5.0.1
+* `#14301 <https://github.com/numpy/numpy/pull/14301>`__: BUG: Fix leak in the f2py-generated module init and `PyMem_Del`...
+* `#14302 <https://github.com/numpy/numpy/pull/14302>`__: BUG: Fix formatting error in exception message
+* `#14307 <https://github.com/numpy/numpy/pull/14307>`__: MAINT: random: Match type of SeedSequence.pool_size to DEFAULT_POOL_SIZE.
+* `#14308 <https://github.com/numpy/numpy/pull/14308>`__: BUG: Fix numpy.random bug in platform detection
+* `#14309 <https://github.com/numpy/numpy/pull/14309>`__: ENH: Enable huge pages in all Linux builds
+* `#14330 <https://github.com/numpy/numpy/pull/14330>`__: BUG: Fix segfault in `random.permutation(x)` when x is a string.
+* `#14338 <https://github.com/numpy/numpy/pull/14338>`__: BUG: don't fail when lexsorting some empty arrays (#14228)
+* `#14339 <https://github.com/numpy/numpy/pull/14339>`__: BUG: Fix misuse of .names and .fields in various places (backport...
+* `#14345 <https://github.com/numpy/numpy/pull/14345>`__: BUG: fix behavior of structured_to_unstructured on non-trivial...
+* `#14350 <https://github.com/numpy/numpy/pull/14350>`__: REL: Prepare 1.17.1 release
diff --git a/doc/source/release/1.17.2-notes.rst b/doc/source/release/1.17.2-notes.rst
new file mode 100644
index 000000000..65cdaf903
--- /dev/null
+++ b/doc/source/release/1.17.2-notes.rst
@@ -0,0 +1,49 @@
+.. currentmodule:: numpy
+
+==========================
+NumPy 1.17.2 Release Notes
+==========================
+
+This release contains fixes for bugs reported against NumPy 1.17.1 along with a
+some documentation improvements. The most important fix is for lexsort when the
+keys are of type (u)int8 or (u)int16. If you are currently using 1.17 you
+should upgrade.
+
+The Python versions supported in this release are 3.5-3.7, Python 2.7 has been
+dropped. Python 3.8b4 should work with the released source packages, but there
+are no future guarantees.
+
+Downstream developers should use Cython >= 0.29.13 for Python 3.8 support and
+OpenBLAS >= 3.7 to avoid errors on the Skylake architecture. The NumPy wheels
+on PyPI are built from the OpenBLAS development branch in order to avoid those
+errors.
+
+
+Contributors
+============
+
+A total of 7 people contributed to this release. People with a "+" by their
+names contributed a patch for the first time.
+
+* CakeWithSteak +
+* Charles Harris
+* Dan Allan
+* Hameer Abbasi
+* Lars Grueter
+* Matti Picus
+* Sebastian Berg
+
+
+Pull requests merged
+====================
+
+A total of 8 pull requests were merged for this release.
+
+* `#14418 <https://github.com/numpy/numpy/pull/14418>`__: BUG: Fix aradixsort indirect indexing.
+* `#14420 <https://github.com/numpy/numpy/pull/14420>`__: DOC: Fix a minor typo in dispatch documentation.
+* `#14421 <https://github.com/numpy/numpy/pull/14421>`__: BUG: test, fix regression in converting to ctypes
+* `#14430 <https://github.com/numpy/numpy/pull/14430>`__: BUG: Do not show Override module in private error classes.
+* `#14432 <https://github.com/numpy/numpy/pull/14432>`__: BUG: Fixed maximum relative error reporting in assert_allclose.
+* `#14433 <https://github.com/numpy/numpy/pull/14433>`__: BUG: Fix uint-overflow if padding with linear_ramp and negative...
+* `#14436 <https://github.com/numpy/numpy/pull/14436>`__: BUG: Update 1.17.x with 1.18.0-dev pocketfft.py.
+* `#14446 <https://github.com/numpy/numpy/pull/14446>`__: REL: Prepare for NumPy 1.17.2 release.
diff --git a/doc/source/release/1.18.0-notes.rst b/doc/source/release/1.18.0-notes.rst
new file mode 100644
index 000000000..e66540410
--- /dev/null
+++ b/doc/source/release/1.18.0-notes.rst
@@ -0,0 +1,8 @@
+The NumPy 1.18 release is currently in developement. Please check
+the ``numpy/doc/release/upcoming_changes/`` folder for upcoming
+release notes.
+The ``numpy/doc/release/upcoming_changes/README.txt`` details how
+to add new release notes.
+
+For the work in progress release notes for the current development
+version, see the `devdocs <https://numpy.org/devdocs/release.html>`__.
diff --git a/doc/release/1.3.0-notes.rst b/doc/source/release/1.3.0-notes.rst
index 239714246..239714246 100644
--- a/doc/release/1.3.0-notes.rst
+++ b/doc/source/release/1.3.0-notes.rst
diff --git a/doc/release/1.4.0-notes.rst b/doc/source/release/1.4.0-notes.rst
index 9480a054e..9480a054e 100644
--- a/doc/release/1.4.0-notes.rst
+++ b/doc/source/release/1.4.0-notes.rst
diff --git a/doc/release/1.5.0-notes.rst b/doc/source/release/1.5.0-notes.rst
index a2184ab13..a2184ab13 100644
--- a/doc/release/1.5.0-notes.rst
+++ b/doc/source/release/1.5.0-notes.rst
diff --git a/doc/release/1.6.0-notes.rst b/doc/source/release/1.6.0-notes.rst
index c5f53a0eb..c5f53a0eb 100644
--- a/doc/release/1.6.0-notes.rst
+++ b/doc/source/release/1.6.0-notes.rst
diff --git a/doc/release/1.6.1-notes.rst b/doc/source/release/1.6.1-notes.rst
index 05fcb4ab9..05fcb4ab9 100644
--- a/doc/release/1.6.1-notes.rst
+++ b/doc/source/release/1.6.1-notes.rst
diff --git a/doc/release/1.6.2-notes.rst b/doc/source/release/1.6.2-notes.rst
index 8f0b06f98..8f0b06f98 100644
--- a/doc/release/1.6.2-notes.rst
+++ b/doc/source/release/1.6.2-notes.rst
diff --git a/doc/release/1.7.0-notes.rst b/doc/source/release/1.7.0-notes.rst
index f111f80dc..f111f80dc 100644
--- a/doc/release/1.7.0-notes.rst
+++ b/doc/source/release/1.7.0-notes.rst
diff --git a/doc/release/1.7.1-notes.rst b/doc/source/release/1.7.1-notes.rst
index 04216b0df..04216b0df 100644
--- a/doc/release/1.7.1-notes.rst
+++ b/doc/source/release/1.7.1-notes.rst
diff --git a/doc/release/1.7.2-notes.rst b/doc/source/release/1.7.2-notes.rst
index b0951bd72..b0951bd72 100644
--- a/doc/release/1.7.2-notes.rst
+++ b/doc/source/release/1.7.2-notes.rst
diff --git a/doc/release/1.8.0-notes.rst b/doc/source/release/1.8.0-notes.rst
index 80c39f8bc..80c39f8bc 100644
--- a/doc/release/1.8.0-notes.rst
+++ b/doc/source/release/1.8.0-notes.rst
diff --git a/doc/release/1.8.1-notes.rst b/doc/source/release/1.8.1-notes.rst
index ea34e75ac..ea34e75ac 100644
--- a/doc/release/1.8.1-notes.rst
+++ b/doc/source/release/1.8.1-notes.rst
diff --git a/doc/release/1.8.2-notes.rst b/doc/source/release/1.8.2-notes.rst
index 71e549526..71e549526 100644
--- a/doc/release/1.8.2-notes.rst
+++ b/doc/source/release/1.8.2-notes.rst
diff --git a/doc/release/1.9.0-notes.rst b/doc/source/release/1.9.0-notes.rst
index 7ea29e354..7ea29e354 100644
--- a/doc/release/1.9.0-notes.rst
+++ b/doc/source/release/1.9.0-notes.rst
diff --git a/doc/release/1.9.1-notes.rst b/doc/source/release/1.9.1-notes.rst
index 4558237f4..4558237f4 100644
--- a/doc/release/1.9.1-notes.rst
+++ b/doc/source/release/1.9.1-notes.rst
diff --git a/doc/release/1.9.2-notes.rst b/doc/source/release/1.9.2-notes.rst
index 268f3aa64..268f3aa64 100644
--- a/doc/release/1.9.2-notes.rst
+++ b/doc/source/release/1.9.2-notes.rst
diff --git a/doc/release/template.rst b/doc/source/release/template.rst
index fdfec2be9..cde7646df 100644
--- a/doc/release/template.rst
+++ b/doc/source/release/template.rst
@@ -1,3 +1,5 @@
+:orphan:
+
==========================
NumPy 1.xx.x Release Notes
==========================
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index 65584b1fd..4e9016ee0 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -2,6 +2,10 @@
Broadcasting
************
-.. seealso:: :class:`numpy.broadcast`
+.. seealso::
+ :class:`numpy.broadcast`
+
+ :ref:`array-broadcasting-in-numpy`
+ An introduction to the concepts discussed here
.. automodule:: numpy.doc.broadcasting
diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst
new file mode 100644
index 000000000..f7b8da262
--- /dev/null
+++ b/doc/source/user/basics.dispatch.rst
@@ -0,0 +1,8 @@
+.. _basics.dispatch:
+
+*******************************
+Writing custom array containers
+*******************************
+
+.. automodule:: numpy.doc.dispatch
+
diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst
index 8844adcae..0dca4b884 100644
--- a/doc/source/user/basics.indexing.rst
+++ b/doc/source/user/basics.indexing.rst
@@ -4,6 +4,10 @@
Indexing
********
-.. seealso:: :ref:`Indexing routines <routines.indexing>`
+.. seealso::
+
+ :ref:`Indexing <arrays.indexing>`
+
+ :ref:`Indexing routines <routines.indexing>`
.. automodule:: numpy.doc.indexing
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 21832e5aa..19e37eabc 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -27,13 +27,13 @@ Defining the input
==================
The only mandatory argument of :func:`~numpy.genfromtxt` is the source of
-the data. It can be a string, a list of strings, or a generator. If a
-single string is provided, it is assumed to be the name of a local or
-remote file, or an open file-like object with a :meth:`read` method, for
-example, a file or :class:`io.StringIO` object. If a list of strings
-or a generator returning strings is provided, each string is treated as one
-line in a file. When the URL of a remote file is passed, the file is
-automatically downloaded to the current directory and opened.
+the data. It can be a string, a list of strings, a generator or an open
+file-like object with a :meth:`read` method, for example, a file or
+:class:`io.StringIO` object. If a single string is provided, it is assumed
+to be the name of a local or remote file. If a list of strings or a generator
+returning strings is provided, each string is treated as one line in a file.
+When the URL of a remote file is passed, the file is automatically downloaded
+to the current directory and opened.
Recognized file types are text files and archives. Currently, the function
recognizes :class:`gzip` and :class:`bz2` (`bzip2`) archives. The type of
@@ -521,12 +521,6 @@ provides several convenience functions derived from
:func:`~numpy.genfromtxt`. These functions work the same way as the
original, but they have different default values.
-:func:`~numpy.ndfromtxt`
- Always set ``usemask=False``.
- The output is always a standard :class:`numpy.ndarray`.
-:func:`~numpy.mafromtxt`
- Always set ``usemask=True``.
- The output is always a :class:`~numpy.ma.MaskedArray`
:func:`~numpy.recfromtxt`
Returns a standard :class:`numpy.recarray` (if ``usemask=False``) or a
:class:`~numpy.ma.MaskedRecords` array (if ``usemaske=True``). The
diff --git a/doc/source/user/basics.rst b/doc/source/user/basics.rst
index 7875aff6e..e0fc0ece3 100644
--- a/doc/source/user/basics.rst
+++ b/doc/source/user/basics.rst
@@ -12,4 +12,5 @@ NumPy basics
basics.broadcasting
basics.byteswapping
basics.rec
+ basics.dispatch
basics.subclassing
diff --git a/doc/source/user/building.rst b/doc/source/user/building.rst
index d224951dd..b4b4371e5 100644
--- a/doc/source/user/building.rst
+++ b/doc/source/user/building.rst
@@ -56,7 +56,7 @@ Basic Installation
To install NumPy run::
- python setup.py install
+ pip install .
To perform an in-place build that can be run from the source folder run::
@@ -69,6 +69,15 @@ Using ``virtualenv`` should work as expected.
*Note: for build instructions to do development work on NumPy itself, see*
:ref:`development-environment`.
+Testing
+-------
+
+Make sure to test your builds. To ensure everything stays in shape, see if all tests pass::
+
+ $ python runtests.py -v -m full
+
+For detailed info on testing, see :ref:`testing-builds`.
+
.. _parallel-builds:
Parallel builds
@@ -118,12 +127,71 @@ means that g77 has been used. If libgfortran.so is a dependency, gfortran
has been used. If both are dependencies, this means both have been used, which
is almost always a very bad idea.
+Accelerated BLAS/LAPACK libraries
+---------------------------------
+
+NumPy searches for optimized linear algebra libraries such as BLAS and LAPACK.
+There are specific orders for searching these libraries, as described below.
+
+BLAS
+~~~~
+
+The default order for the libraries are:
+
+1. MKL
+2. BLIS
+3. OpenBLAS
+4. ATLAS
+5. Accelerate (MacOS)
+6. BLAS (NetLIB)
+
+
+If you wish to build against OpenBLAS but you also have BLIS available one
+may predefine the order of searching via the environment variable
+``NPY_BLAS_ORDER`` which is a comma-separated list of the above names which
+is used to determine what to search for, for instance::
+
+ NPY_BLAS_ORDER=ATLAS,blis,openblas,MKL python setup.py build
+
+will prefer to use ATLAS, then BLIS, then OpenBLAS and as a last resort MKL.
+If neither of these exists the build will fail (names are compared
+lower case).
+
+LAPACK
+~~~~~~
+
+The default order for the libraries are:
+
+1. MKL
+2. OpenBLAS
+3. libFLAME
+4. ATLAS
+5. Accelerate (MacOS)
+6. LAPACK (NetLIB)
+
+
+If you wish to build against OpenBLAS but you also have MKL available one
+may predefine the order of searching via the environment variable
+``NPY_LAPACK_ORDER`` which is a comma-separated list of the above names,
+for instance::
+
+ NPY_LAPACK_ORDER=ATLAS,openblas,MKL python setup.py build
+
+will prefer to use ATLAS, then OpenBLAS and as a last resort MKL.
+If neither of these exists the build will fail (names are compared
+lower case).
+
+
Disabling ATLAS and other accelerated libraries
------------------------------------------------
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Usage of ATLAS and other accelerated libraries in NumPy can be disabled
via::
+ NPY_BLAS_ORDER= NPY_LAPACK_ORDER= python setup.py build
+
+or::
+
BLAS=None LAPACK=None ATLAS=None python setup.py build
diff --git a/doc/source/user/c-info.beyond-basics.rst b/doc/source/user/c-info.beyond-basics.rst
index d4d941a5e..dd25861b4 100644
--- a/doc/source/user/c-info.beyond-basics.rst
+++ b/doc/source/user/c-info.beyond-basics.rst
@@ -300,9 +300,10 @@ An example castfunc is:
static void
double_to_float(double *from, float* to, npy_intp n,
- void* ig1, void* ig2);
- while (n--) {
- (*to++) = (double) *(from++);
+ void* ignore1, void* ignore2) {
+ while (n--) {
+ (*to++) = (double) *(from++);
+ }
}
This could then be registered to convert doubles to floats using the
diff --git a/doc/source/user/c-info.how-to-extend.rst b/doc/source/user/c-info.how-to-extend.rst
index 9738168d2..00ef8ab74 100644
--- a/doc/source/user/c-info.how-to-extend.rst
+++ b/doc/source/user/c-info.how-to-extend.rst
@@ -342,7 +342,7 @@ The method is to
4. If you are writing the algorithm, then I recommend that you use the
stride information contained in the array to access the elements of
- the array (the :c:func:`PyArray_GETPTR` macros make this painless). Then,
+ the array (the :c:func:`PyArray_GetPtr` macros make this painless). Then,
you can relax your requirements so as not to force a single-segment
array and the data-copying that might result.
@@ -362,8 +362,7 @@ specific builtin data-type ( *e.g.* float), while specifying a
particular set of requirements ( *e.g.* contiguous, aligned, and
writeable). The syntax is
-.. c:function:: PyObject *PyArray_FROM_OTF( \
- PyObject* obj, int typenum, int requirements)
+:c:func:`PyArray_FROM_OTF`
Return an ndarray from any Python object, *obj*, that can be
converted to an array. The number of dimensions in the returned
@@ -446,33 +445,25 @@ writeable). The syntax is
flags most commonly needed are :c:data:`NPY_ARRAY_IN_ARRAY`,
:c:data:`NPY_OUT_ARRAY`, and :c:data:`NPY_ARRAY_INOUT_ARRAY`:
- .. c:var:: NPY_ARRAY_IN_ARRAY
+ :c:data:`NPY_ARRAY_IN_ARRAY`
- Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \|
- :c:data:`NPY_ARRAY_ALIGNED`. This combination of flags is useful
- for arrays that must be in C-contiguous order and aligned.
- These kinds of arrays are usually input arrays for some
- algorithm.
+ This flag is useful for arrays that must be in C-contiguous
+ order and aligned. These kinds of arrays are usually input
+ arrays for some algorithm.
- .. c:var:: NPY_ARRAY_OUT_ARRAY
+ :c:data:`NPY_ARRAY_OUT_ARRAY`
- Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \|
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE`. This
- combination of flags is useful to specify an array that is
+ This flag is useful to specify an array that is
in C-contiguous order, is aligned, and can be written to
as well. Such an array is usually returned as output
(although normally such output arrays are created from
scratch).
- .. c:var:: NPY_ARRAY_INOUT_ARRAY
+ :c:data:`NPY_ARRAY_INOUT_ARRAY`
- Equivalent to :c:data:`NPY_ARRAY_C_CONTIGUOUS` \|
- :c:data:`NPY_ARRAY_ALIGNED` \| :c:data:`NPY_ARRAY_WRITEABLE` \|
- :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` \|
- :c:data:`NPY_ARRAY_UPDATEIFCOPY`. This combination of flags is
- useful to specify an array that will be used for both
+ This flag is useful to specify an array that will be used for both
input and output. :c:func:`PyArray_ResolveWritebackIfCopy`
- must be called before :func:`Py_DECREF` at
+ must be called before :c:func:`Py_DECREF` at
the end of the interface routine to write back the temporary data
into the original array passed in. Use
of the :c:data:`NPY_ARRAY_WRITEBACKIFCOPY` or
@@ -487,16 +478,16 @@ writeable). The syntax is
Other useful flags that can be OR'd as additional requirements are:
- .. c:var:: NPY_ARRAY_FORCECAST
+ :c:data:`NPY_ARRAY_FORCECAST`
Cast to the desired type, even if it can't be done without losing
information.
- .. c:var:: NPY_ARRAY_ENSURECOPY
+ :c:data:`NPY_ARRAY_ENSURECOPY`
Make sure the resulting array is a copy of the original.
- .. c:var:: NPY_ARRAY_ENSUREARRAY
+ :c:data:`NPY_ARRAY_ENSUREARRAY`
Make sure the resulting object is an actual ndarray and not a sub-
class.
@@ -513,7 +504,7 @@ writeable). The syntax is
Creating a brand-new ndarray
----------------------------
-Quite often new arrays must be created from within extension-module
+Quite often, new arrays must be created from within extension-module
code. Perhaps an output array is needed and you don't want the caller
to have to supply it. Perhaps only a temporary array is needed to hold
an intermediate calculation. Whatever the need there are simple ways
@@ -521,43 +512,9 @@ to get an ndarray object of whatever data-type is needed. The most
general function for doing this is :c:func:`PyArray_NewFromDescr`. All array
creation functions go through this heavily re-used code. Because of
its flexibility, it can be somewhat confusing to use. As a result,
-simpler forms exist that are easier to use.
-
-.. c:function:: PyObject *PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
-
- This function allocates new memory and places it in an ndarray
- with *nd* dimensions whose shape is determined by the array of
- at least *nd* items pointed to by *dims*. The memory for the
- array is uninitialized (unless typenum is :c:data:`NPY_OBJECT` in
- which case each element in the array is set to NULL). The
- *typenum* argument allows specification of any of the builtin
- data-types such as :c:data:`NPY_FLOAT` or :c:data:`NPY_LONG`. The
- memory for the array can be set to zero if desired using
- :c:func:`PyArray_FILLWBYTE` (return_object, 0).
-
-.. c:function:: PyObject *PyArray_SimpleNewFromData( \
- int nd, npy_intp* dims, int typenum, void* data)
-
- Sometimes, you want to wrap memory allocated elsewhere into an
- ndarray object for downstream use. This routine makes it
- straightforward to do that. The first three arguments are the same
- as in :c:func:`PyArray_SimpleNew`, the final argument is a pointer to a
- block of contiguous memory that the ndarray should use as it's
- data-buffer which will be interpreted in C-style contiguous
- fashion. A new reference to an ndarray is returned, but the
- ndarray will not own its data. When this ndarray is deallocated,
- the pointer will not be freed.
-
- You should ensure that the provided memory is not freed while the
- returned array is in existence. The easiest way to handle this is
- if data comes from another reference-counted Python object. The
- reference count on this object should be increased after the
- pointer is passed in, and the base member of the returned ndarray
- should point to the Python object that owns the data. Then, when
- the ndarray is deallocated, the base-member will be DECREF'd
- appropriately. If you want the memory to be freed as soon as the
- ndarray is deallocated then simply set the OWNDATA flag on the
- returned ndarray.
+simpler forms exist that are easier to use. These forms are part of the
+:c:func:`PyArray_SimpleNew` family of functions, which simplify the interface
+by providing default values for common use cases.
Getting at ndarray memory and accessing elements of the ndarray
@@ -573,7 +530,7 @@ specific element of the array is determined only by the array of
npy_intp variables, :c:func:`PyArray_STRIDES` (obj). In particular, this
c-array of integers shows how many **bytes** must be added to the
current element pointer to get to the next element in each dimension.
-For arrays less than 4-dimensions there are :c:func:`PyArray_GETPTR{k}`
+For arrays less than 4-dimensions there are ``PyArray_GETPTR{k}``
(obj, ...) macros where {k} is the integer 1, 2, 3, or 4 that make
using the array strides easier. The arguments .... represent {k} non-
negative integer indices into the array. For example, suppose ``E`` is
@@ -586,7 +543,7 @@ contiguous arrays have particular striding patterns. Two array flags
whether or not the striding pattern of a particular array matches the
C-style contiguous or Fortran-style contiguous or neither. Whether or
not the striding pattern matches a standard C or Fortran one can be
-tested Using :c:func:`PyArray_ISCONTIGUOUS` (obj) and
+tested Using :c:func:`PyArray_IS_C_CONTIGUOUS` (obj) and
:c:func:`PyArray_ISFORTRAN` (obj) respectively. Most third-party
libraries expect contiguous arrays. But, often it is not difficult to
support general-purpose striding. I encourage you to use the striding
diff --git a/doc/source/user/c-info.python-as-glue.rst b/doc/source/user/c-info.python-as-glue.rst
index 01d2a64d1..7b9b096af 100644
--- a/doc/source/user/c-info.python-as-glue.rst
+++ b/doc/source/user/c-info.python-as-glue.rst
@@ -387,7 +387,7 @@ distribution of the ``add.f`` module (as part of the package
Installation of the new package is easy using::
- python setup.py install
+ pip install .
assuming you have the proper permissions to write to the main site-
packages directory for the version of Python you are using. For the
@@ -744,14 +744,14 @@ around this restriction that allow ctypes to integrate with other
objects.
1. Don't set the argtypes attribute of the function object and define an
- :obj:`_as_parameter_` method for the object you want to pass in. The
- :obj:`_as_parameter_` method must return a Python int which will be passed
+ ``_as_parameter_`` method for the object you want to pass in. The
+ ``_as_parameter_`` method must return a Python int which will be passed
directly to the function.
2. Set the argtypes attribute to a list whose entries contain objects
with a classmethod named from_param that knows how to convert your
object to an object that ctypes can understand (an int/long, string,
- unicode, or object with the :obj:`_as_parameter_` attribute).
+ unicode, or object with the ``_as_parameter_`` attribute).
NumPy uses both methods with a preference for the second method
because it can be safer. The ctypes attribute of the ndarray returns
@@ -764,7 +764,7 @@ correct type, shape, and has the correct flags set or risk nasty
crashes if the data-pointer to inappropriate arrays are passed in.
To implement the second method, NumPy provides the class-factory
-function :func:`ndpointer` in the :mod:`ctypeslib` module. This
+function :func:`ndpointer` in the :mod:`numpy.ctypeslib` module. This
class-factory function produces an appropriate class that can be
placed in an argtypes attribute entry of a ctypes function. The class
will contain a from_param method which ctypes will use to convert any
diff --git a/doc/source/user/numpy-for-matlab-users.rst b/doc/source/user/numpy-for-matlab-users.rst
index 399237c21..e53d1ca45 100644
--- a/doc/source/user/numpy-for-matlab-users.rst
+++ b/doc/source/user/numpy-for-matlab-users.rst
@@ -436,7 +436,7 @@ Linear Algebra Equivalents
``a``
* - ``rand(3,4)``
- - ``random.rand(3,4)``
+ - ``random.rand(3,4)`` or ``random.random_sample((3, 4))``
- random 3x4 matrix
* - ``linspace(1,3,4)``
@@ -547,7 +547,7 @@ Linear Algebra Equivalents
- eigenvalues and eigenvectors of ``a``
* - ``[V,D]=eig(a,b)``
- - ``V,D = np.linalg.eig(a,b)``
+ - ``D,V = scipy.linalg.eig(a,b)``
- eigenvalues and eigenvectors of ``a``, ``b``
* - ``[V,D]=eigs(a,k)``
@@ -693,19 +693,19 @@ this is just an example, not a statement of "best practices"):
::
- # Make all numpy available via shorter 'num' prefix
- import numpy as num
+ # Make all numpy available via shorter 'np' prefix
+ import numpy as np
# Make all matlib functions accessible at the top level via M.func()
import numpy.matlib as M
# Make some matlib functions accessible directly at the top level via, e.g. rand(3,3)
from numpy.matlib import rand,zeros,ones,empty,eye
# Define a Hermitian function
def hermitian(A, **kwargs):
- return num.transpose(A,**kwargs).conj()
+ return np.transpose(A,**kwargs).conj()
# Make some shortcuts for transpose,hermitian:
- # num.transpose(A) --> T(A)
+ # np.transpose(A) --> T(A)
# hermitian(A) --> H(A)
- T = num.transpose
+ T = np.transpose
H = hermitian
Links
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index 5ef8b145f..a23a7b2c7 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -25,7 +25,7 @@ The Basics
NumPy's main object is the homogeneous multidimensional array. It is a
table of elements (usually numbers), all of the same type, indexed by a
-tuple of positive integers. In NumPy dimensions are called *axes*.
+tuple of non-negative integers. In NumPy dimensions are called *axes*.
For example, the coordinates of a point in 3D space ``[1, 2, 1]`` has
one axis. That axis has 3 elements in it, so we say it has a length
@@ -206,8 +206,8 @@ of elements that we want, instead of the step::
`empty_like`,
`arange`,
`linspace`,
- `numpy.random.rand`,
- `numpy.random.randn`,
+ `numpy.random.mtrand.RandomState.rand`,
+ `numpy.random.mtrand.RandomState.randn`,
`fromfunction`,
`fromfile`
@@ -270,7 +270,7 @@ can change the printing options using ``set_printoptions``.
::
- >>> np.set_printoptions(threshold=np.nan)
+ >>> np.set_printoptions(threshold=sys.maxsize) # sys module should be imported
Basic Operations
@@ -732,9 +732,9 @@ stacks 1D arrays as columns into a 2D array. It is equivalent to
array([[ 4., 3.],
[ 2., 8.]])
-On the other hand, the function `row_stack` is equivalent to `vstack`
+On the other hand, the function `ma.row_stack` is equivalent to `vstack`
for any input arrays.
-In general, for arrays of with more than two dimensions,
+In general, for arrays with more than two dimensions,
`hstack` stacks along their second
axes, `vstack` stacks along their
first axes, and `concatenate`
@@ -884,6 +884,17 @@ The ``copy`` method makes a complete copy of the array and its data.
[ 8, 10, 10, 11]])
+Sometimes ``copy`` should be called after slicing if the original array is not required anymore.
+For example, suppose ``a`` is a huge intermediate result and the final result ``b`` only contains
+a small fraction of ``a``, a deep copy should be made when constructing ``b`` with slicing::
+
+ >>> a = np.arange(int(1e8))
+ >>> b = a[:100].copy()
+ >>> del a # the memory of ``a`` can be released.
+
+If ``b = a[:100]`` is used instead, ``a`` is referenced by ``b`` and will persist in memory
+even if ``del a`` is executed.
+
Functions and Methods Overview
------------------------------
@@ -1465,5 +1476,5 @@ Further reading
- The `Python tutorial <https://docs.python.org/tutorial/>`__
- :ref:`reference`
- `SciPy Tutorial <https://docs.scipy.org/doc/scipy/reference/tutorial/index.html>`__
-- `SciPy Lecture Notes <https://www.scipy-lectures.org>`__
+- `SciPy Lecture Notes <https://scipy-lectures.org>`__
- A `matlab, R, IDL, NumPy/SciPy dictionary <http://mathesaurus.sf.net/>`__
diff --git a/doc/source/user/theory.broadcast_1.gif b/doc/source/user/theory.broadcast_1.gif
new file mode 100644
index 000000000..541ec734b
--- /dev/null
+++ b/doc/source/user/theory.broadcast_1.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_2.gif b/doc/source/user/theory.broadcast_2.gif
new file mode 100644
index 000000000..163a8473f
--- /dev/null
+++ b/doc/source/user/theory.broadcast_2.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_3.gif b/doc/source/user/theory.broadcast_3.gif
new file mode 100644
index 000000000..83f61f2df
--- /dev/null
+++ b/doc/source/user/theory.broadcast_3.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_4.gif b/doc/source/user/theory.broadcast_4.gif
new file mode 100644
index 000000000..9b21ff582
--- /dev/null
+++ b/doc/source/user/theory.broadcast_4.gif
Binary files differ
diff --git a/doc/source/user/theory.broadcast_5.png b/doc/source/user/theory.broadcast_5.png
new file mode 100644
index 000000000..3aa2f0536
--- /dev/null
+++ b/doc/source/user/theory.broadcast_5.png
Binary files differ
diff --git a/doc/source/user/theory.broadcasting.rst b/doc/source/user/theory.broadcasting.rst
new file mode 100644
index 000000000..b37edeacc
--- /dev/null
+++ b/doc/source/user/theory.broadcasting.rst
@@ -0,0 +1,229 @@
+:orphan:
+
+.. _array-broadcasting-in-numpy:
+
+===========================
+Array Broadcasting in Numpy
+===========================
+
+..
+ Originally part of the scipy.org wiki, available `here
+ <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_ or from the
+ `github repo
+ <https://github.com/scipy/old-wiki/blob/gh-pages/pages/EricsBroadcastingDoc.html>`_
+
+Let's explore a more advanced concept in numpy called broadcasting. The
+term broadcasting describes how numpy treats arrays with different shapes
+during arithmetic operations. Subject to certain constraints, the smaller array
+is "broadcast" across the larger array so that they have compatible shapes.
+Broadcasting provides a means of vectorizing array operations so that looping
+occurs in C instead of Python. It does this without making needless copies of
+data and usually leads to efficient algorithm implementations. There are also
+cases where broadcasting is a bad idea because it leads to inefficient use of
+memory that slows computation. This article provides a gentle introduction to
+broadcasting with numerous examples ranging from simple to involved. It also
+provides hints on when and when not to use broadcasting.
+
+numpy operations are usually done element-by-element which requires two arrays
+to have exactly the same shape:
+
+.. code-block:: python
+ :caption: Example 1
+ :name: example-1
+
+ >>> from numpy import array
+ >>> a = array([1.0, 2.0, 3.0])
+ >>> b = array([2.0, 2.0, 2.0])
+ >>> a * b
+ array([ 2., 4., 6.])
+
+numpy's broadcasting rule relaxes this constraint when the arrays' shapes meet
+certain constraints. The simplest broadcasting example occurs when an array and
+a scalar value are combined in an operation:
+
+.. code-block:: python
+ :caption: Example 2
+ :name: example-2
+
+ >>> from numpy import array
+ >>> a = array([1.0,2.0,3.0])
+ >>> b = 2.0
+ >>> a * b
+ array([ 2., 4., 6.])
+
+The result is equivalent to the previous example where ``b`` was an array. We
+can think of the scalar ``b`` being stretched during the arithmetic operation
+into an array with the same shape as ``a``. The new elements in ``b``, as shown
+in :ref:`figure-1`, are simply copies of the original scalar. The stretching
+analogy is only conceptual. numpy is smart enough to use the original scalar
+value without actually making copies so that broadcasting operations are as
+memory and computationally efficient as possible. Because :ref:`example-2`
+moves less memory, (``b`` is a scalar, not an array) around during the
+multiplication, it is about 10% faster than :ref:`example-1` using the standard
+numpy on Windows 2000 with one million element arrays.
+
+.. figure:: theory.broadcast_1.gif
+ :alt: Vector-Scalar multiplication
+ :name: figure-1
+
+ *Figure 1*
+
+ *In the simplest example of broadcasting, the scalar ``b`` is
+ stretched to become an array of with the same shape as ``a`` so the shapes
+ are compatible for element-by-element multiplication.*
+
+
+The rule governing whether two arrays have compatible shapes for broadcasting
+can be expressed in a single sentence.
+
+.. admonition:: The Broadcasting Rule
+
+ **In order to broadcast, the size of the trailing axes for both arrays in
+ an operation must either be the same size or one of them must be one.**
+
+If this condition is not met, a ``ValueError('frames are not aligned')``
+exception is thrown indicating that the arrays have incompatible shapes. The
+size of the result array created by broadcast operations is the maximum size
+along each dimension from the input arrays. Note that the rule does not say
+anything about the two arrays needing to have the same number of dimensions.
+So, for example, if you have a 256 x 256 x 3 array of RGB values, and you want
+to scale each color in the image by a different value, you can multiply the
+image by a one-dimensional array with 3 values. Lining up the sizes of the
+trailing axes of these arrays according to the broadcast rule shows that they
+are compatible
+
++-------+------------+-------+-------+---+
+|Image | (3d array) | 256 x | 256 x | 3 |
++-------+------------+-------+-------+---+
+|Scale | (1d array) | | | 3 |
++-------+------------+-------+-------+---+
+|Result | (3d array) | 256 x | 256 x | 3 |
++-------+------------+-------+-------+---+
+
+In the following example, both the ``A`` and ``B`` arrays have axes with length
+one that are expanded to a larger size in a broadcast operation.
+
++-------+------------+-----+-----+-----+---+
+|A | (4d array) | 8 x | 1 x | 6 x | 1 |
++-------+------------+-----+-----+-----+---+
+|B | (3d array) | | 7 x | 1 x | 5 |
++-------+------------+-----+-----+-----+---+
+|Result | (4d array) | 8 x | 7 x | 6 x | 5 |
++-------+------------+-----+-----+-----+---+
+
+Below, are several code examples and graphical representations that help make
+the broadcast rule visually obvious. :ref:`example-3` adds a one-dimensional array
+to a two-dimensional array:
+
+.. code-block:: python
+ :caption: Example 3
+ :name: example-3
+
+ >>> from numpy import array
+ >>> a = array([[ 0.0, 0.0, 0.0],
+ ... [10.0, 10.0, 10.0],
+ ... [20.0, 20.0, 20.0],
+ ... [30.0, 30.0, 30.0]])
+ >>> b = array([1.0, 2.0, 3.0])
+ >>> a + b
+ array([[ 1., 2., 3.],
+ [ 11., 12., 13.],
+ [ 21., 22., 23.],
+ [ 31., 32., 33.]])
+
+As shown in :ref:`figure-2`, ``b`` is added to each row of ``a``. When ``b`` is
+longer than the rows of ``a``, as in :ref:`figure-3`, an exception is raised
+because of the incompatible shapes.
+
+.. figure:: theory.broadcast_2.gif
+ :alt: Matrix-Vector
+ :name: figure-2
+
+ *Figure 2*
+
+ *A two dimensional array multiplied by a one dimensional array results in
+ broadcasting if number of 1-d array elements matches the number of 2-d
+ array columns.*
+
+.. figure:: theory.broadcast_3.gif
+ :alt: Matrix-Vector-with-error
+ :name: figure-3
+
+ *Figure 3*
+
+ *When the trailing dimensions of the arrays are unequal, broadcasting fails
+ because it is impossible to align the values in the rows of the 1st array
+ with the elements of the 2nd arrays for element-by-element addition.*
+
+Broadcasting provides a convenient way of taking the outer product (or any
+other outer operation) of two arrays. The following example shows an outer
+addition operation of two 1-d arrays that produces the same result as
+:ref:`example-3`
+
+.. code-block:: python
+ :caption: Example 4
+ :name: example-4
+
+ >>> from numpy import array, newaxis
+ >>> a = array([0.0, 10.0, 20.0, 30.0])
+ >>> b = array([1.0, 2.0, 3.0])
+ >>> a[:,newaxis] + b
+ array([[ 1., 2., 3.],
+ [ 11., 12., 13.],
+ [ 21., 22., 23.],
+ [ 31., 32., 33.]])
+
+Here the newaxis index operator inserts a new axis into ``a``, making it a
+two-dimensional 4x1 array. :ref:`figure-4` illustrates the stretching of both
+arrays to produce the desired 4x3 output array.
+
+.. figure:: theory.broadcast_4.gif
+ :alt: vector-vector with newaxis
+ :name: figure-4
+
+ *Figure 4*
+
+ In some cases, broadcasting stretches both arrays to form an output array
+ larger than either of the initial arrays.*
+
+A Practical Example: Vector Quantization.
+=========================================
+
+Broadcasting comes up quite often in real world problems. A typical example
+occurs in the vector quantization (VQ) algorithm used in information theory,
+classification, and other related areas. The basic operation in VQ [#f0] finds
+the closest point in a set of points, called codes in VQ jargon, to a given
+point, called the observation. In the very simple, two-dimensional case shown
+in :ref:`figure-5`, the values in observation describe the weight and height of an
+athlete to be classified. The codes represent different classes of
+athletes. [#f1]_ Finding the closest point requires calculating the distance
+between observation and each of the codes. The shortest distance provides the
+best match. In this example, ``codes[0]`` is the closest class indicating that
+the athlete is likely a basketball player.
+
+.. figure:: theory.broadcast_5.png
+ :alt: vector quantitization example
+ :name: figure-5
+
+ *Figure 5*
+
+ *The basic operation of vector quantization calculates the distance between
+ an object to be classified, the dark square, and multiple known codes, the
+ gray circles. In this simple case, the codes represent individual classes.
+ More complex cases use multiple codes per class.*
+
+
+.. rubric:: Footnotes
+
+.. [#f0] Vector Quantization J. Makhoul, S. Roucos, and H. Gish, "Vector Quantization in Speech Coding," Proc. IEEE, vol. 73, pp. 1551-1587, Nov. 1985.
+.. [#f1]
+ In this example, weight has more impact on the distance calculation
+ than height because of the larger values. In practice, it is important to
+ normalize the height and weight, often by their standard deviation across the
+ data set, so that both have equal influence on the distance calculation.
+
+.. note::
+
+ The code to produce the figures is part of the `AstroML book
+ <http://www.astroml.org/book_figures/appendix/fig_broadcast_visual.html>`_
+
diff --git a/doc/source/user/whatisnumpy.rst b/doc/source/user/whatisnumpy.rst
index cd74a8de3..abaa2bfed 100644
--- a/doc/source/user/whatisnumpy.rst
+++ b/doc/source/user/whatisnumpy.rst
@@ -91,6 +91,11 @@ idiom is even simpler! This last example illustrates two of NumPy's
features which are the basis of much of its power: vectorization and
broadcasting.
+.. _whatis-vectorization:
+
+Why is NumPy Fast?
+------------------
+
Vectorization describes the absence of any explicit looping, indexing,
etc., in the code - these things are taking place, of course, just
"behind the scenes" in optimized, pre-compiled C code. Vectorized
@@ -120,9 +125,13 @@ the shape of the larger in such a way that the resulting broadcast is
unambiguous. For detailed "rules" of broadcasting see
`numpy.doc.broadcasting`.
+Who Else Uses NumPy?
+--------------------
+
NumPy fully supports an object-oriented approach, starting, once
again, with `ndarray`. For example, `ndarray` is a class, possessing
-numerous methods and attributes. Many of its methods mirror
-functions in the outer-most NumPy namespace, giving the programmer
-complete freedom to code in whichever paradigm she prefers and/or
-which seems most appropriate to the task at hand.
+numerous methods and attributes. Many of its methods are mirrored by
+functions in the outer-most NumPy namespace, allowing the programmer
+to code in whichever paradigm they prefer. This flexibility has allowed the
+NumPy array dialect and NumPy `ndarray` class to become the *de-facto* language
+of multi-dimensional data interchange used in Python.
diff --git a/doc/sphinxext b/doc/sphinxext
-Subproject de21addd6560576151938a4bc59543d55ce6f08
+Subproject a482f66913c1079d7439770f0119b55376bb1b8
diff --git a/numpy/__init__.pxd b/numpy/__init__.pxd
new file mode 100644
index 000000000..23bd22e36
--- /dev/null
+++ b/numpy/__init__.pxd
@@ -0,0 +1,978 @@
+# NumPy static imports for Cython
+#
+# If any of the PyArray_* functions are called, import_array must be
+# called first.
+#
+# This also defines backwards-compatibility buffer acquisition
+# code for use in Python 2.x (or Python <= 2.5 when NumPy starts
+# implementing PEP-3118 directly).
+#
+# Because of laziness, the format string of the buffer is statically
+# allocated. Increase the size if this is not enough, or submit a
+# patch to do this properly.
+#
+# Author: Dag Sverre Seljebotn
+#
+
+DEF _buffer_format_string_len = 255
+
+cimport cpython.buffer as pybuf
+from cpython.ref cimport Py_INCREF
+from cpython.mem cimport PyObject_Malloc, PyObject_Free
+from cpython.object cimport PyObject, PyTypeObject
+from cpython.buffer cimport PyObject_GetBuffer
+from cpython.type cimport type
+cimport libc.stdio as stdio
+
+cdef extern from "Python.h":
+ ctypedef int Py_intptr_t
+
+cdef extern from "numpy/arrayobject.h":
+ ctypedef Py_intptr_t npy_intp
+ ctypedef size_t npy_uintp
+
+ cdef enum NPY_TYPES:
+ NPY_BOOL
+ NPY_BYTE
+ NPY_UBYTE
+ NPY_SHORT
+ NPY_USHORT
+ NPY_INT
+ NPY_UINT
+ NPY_LONG
+ NPY_ULONG
+ NPY_LONGLONG
+ NPY_ULONGLONG
+ NPY_FLOAT
+ NPY_DOUBLE
+ NPY_LONGDOUBLE
+ NPY_CFLOAT
+ NPY_CDOUBLE
+ NPY_CLONGDOUBLE
+ NPY_OBJECT
+ NPY_STRING
+ NPY_UNICODE
+ NPY_VOID
+ NPY_DATETIME
+ NPY_TIMEDELTA
+ NPY_NTYPES
+ NPY_NOTYPE
+
+ NPY_INT8
+ NPY_INT16
+ NPY_INT32
+ NPY_INT64
+ NPY_INT128
+ NPY_INT256
+ NPY_UINT8
+ NPY_UINT16
+ NPY_UINT32
+ NPY_UINT64
+ NPY_UINT128
+ NPY_UINT256
+ NPY_FLOAT16
+ NPY_FLOAT32
+ NPY_FLOAT64
+ NPY_FLOAT80
+ NPY_FLOAT96
+ NPY_FLOAT128
+ NPY_FLOAT256
+ NPY_COMPLEX32
+ NPY_COMPLEX64
+ NPY_COMPLEX128
+ NPY_COMPLEX160
+ NPY_COMPLEX192
+ NPY_COMPLEX256
+ NPY_COMPLEX512
+
+ NPY_INTP
+
+ ctypedef enum NPY_ORDER:
+ NPY_ANYORDER
+ NPY_CORDER
+ NPY_FORTRANORDER
+ NPY_KEEPORDER
+
+ ctypedef enum NPY_CASTING:
+ NPY_NO_CASTING
+ NPY_EQUIV_CASTING
+ NPY_SAFE_CASTING
+ NPY_SAME_KIND_CASTING
+ NPY_UNSAFE_CASTING
+
+ ctypedef enum NPY_CLIPMODE:
+ NPY_CLIP
+ NPY_WRAP
+ NPY_RAISE
+
+ ctypedef enum NPY_SCALARKIND:
+ NPY_NOSCALAR,
+ NPY_BOOL_SCALAR,
+ NPY_INTPOS_SCALAR,
+ NPY_INTNEG_SCALAR,
+ NPY_FLOAT_SCALAR,
+ NPY_COMPLEX_SCALAR,
+ NPY_OBJECT_SCALAR
+
+ ctypedef enum NPY_SORTKIND:
+ NPY_QUICKSORT
+ NPY_HEAPSORT
+ NPY_MERGESORT
+
+ ctypedef enum NPY_SEARCHSIDE:
+ NPY_SEARCHLEFT
+ NPY_SEARCHRIGHT
+
+ enum:
+ # DEPRECATED since NumPy 1.7 ! Do not use in new code!
+ NPY_C_CONTIGUOUS
+ NPY_F_CONTIGUOUS
+ NPY_CONTIGUOUS
+ NPY_FORTRAN
+ NPY_OWNDATA
+ NPY_FORCECAST
+ NPY_ENSURECOPY
+ NPY_ENSUREARRAY
+ NPY_ELEMENTSTRIDES
+ NPY_ALIGNED
+ NPY_NOTSWAPPED
+ NPY_WRITEABLE
+ NPY_UPDATEIFCOPY
+ NPY_ARR_HAS_DESCR
+
+ NPY_BEHAVED
+ NPY_BEHAVED_NS
+ NPY_CARRAY
+ NPY_CARRAY_RO
+ NPY_FARRAY
+ NPY_FARRAY_RO
+ NPY_DEFAULT
+
+ NPY_IN_ARRAY
+ NPY_OUT_ARRAY
+ NPY_INOUT_ARRAY
+ NPY_IN_FARRAY
+ NPY_OUT_FARRAY
+ NPY_INOUT_FARRAY
+
+ NPY_UPDATE_ALL
+
+ enum:
+ # Added in NumPy 1.7 to replace the deprecated enums above.
+ NPY_ARRAY_C_CONTIGUOUS
+ NPY_ARRAY_F_CONTIGUOUS
+ NPY_ARRAY_OWNDATA
+ NPY_ARRAY_FORCECAST
+ NPY_ARRAY_ENSURECOPY
+ NPY_ARRAY_ENSUREARRAY
+ NPY_ARRAY_ELEMENTSTRIDES
+ NPY_ARRAY_ALIGNED
+ NPY_ARRAY_NOTSWAPPED
+ NPY_ARRAY_WRITEABLE
+ NPY_ARRAY_UPDATEIFCOPY
+
+ NPY_ARRAY_BEHAVED
+ NPY_ARRAY_BEHAVED_NS
+ NPY_ARRAY_CARRAY
+ NPY_ARRAY_CARRAY_RO
+ NPY_ARRAY_FARRAY
+ NPY_ARRAY_FARRAY_RO
+ NPY_ARRAY_DEFAULT
+
+ NPY_ARRAY_IN_ARRAY
+ NPY_ARRAY_OUT_ARRAY
+ NPY_ARRAY_INOUT_ARRAY
+ NPY_ARRAY_IN_FARRAY
+ NPY_ARRAY_OUT_FARRAY
+ NPY_ARRAY_INOUT_FARRAY
+
+ NPY_ARRAY_UPDATE_ALL
+
+ cdef enum:
+ NPY_MAXDIMS
+
+ npy_intp NPY_MAX_ELSIZE
+
+ ctypedef void (*PyArray_VectorUnaryFunc)(void *, void *, npy_intp, void *, void *)
+
+ ctypedef struct PyArray_ArrayDescr:
+ # shape is a tuple, but Cython doesn't support "tuple shape"
+ # inside a non-PyObject declaration, so we have to declare it
+ # as just a PyObject*.
+ PyObject* shape
+
+ ctypedef struct PyArray_Descr:
+ pass
+
+ ctypedef class numpy.dtype [object PyArray_Descr, check_size ignore]:
+ # Use PyDataType_* macros when possible, however there are no macros
+ # for accessing some of the fields, so some are defined.
+ cdef PyTypeObject* typeobj
+ cdef char kind
+ cdef char type
+ # Numpy sometimes mutates this without warning (e.g. it'll
+ # sometimes change "|" to "<" in shared dtype objects on
+ # little-endian machines). If this matters to you, use
+ # PyArray_IsNativeByteOrder(dtype.byteorder) instead of
+ # directly accessing this field.
+ cdef char byteorder
+ cdef char flags
+ cdef int type_num
+ cdef int itemsize "elsize"
+ cdef int alignment
+ cdef dict fields
+ cdef tuple names
+ # Use PyDataType_HASSUBARRAY to test whether this field is
+ # valid (the pointer can be NULL). Most users should access
+ # this field via the inline helper method PyDataType_SHAPE.
+ cdef PyArray_ArrayDescr* subarray
+
+ ctypedef extern class numpy.flatiter [object PyArrayIterObject, check_size ignore]:
+ # Use through macros
+ pass
+
+ ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject, check_size ignore]:
+ cdef int numiter
+ cdef npy_intp size, index
+ cdef int nd
+ cdef npy_intp *dimensions
+ cdef void **iters
+
+ ctypedef struct PyArrayObject:
+ # For use in situations where ndarray can't replace PyArrayObject*,
+ # like PyArrayObject**.
+ pass
+
+ ctypedef class numpy.ndarray [object PyArrayObject, check_size ignore]:
+ cdef __cythonbufferdefaults__ = {"mode": "strided"}
+
+ cdef:
+ # Only taking a few of the most commonly used and stable fields.
+ # One should use PyArray_* macros instead to access the C fields.
+ char *data
+ int ndim "nd"
+ npy_intp *shape "dimensions"
+ npy_intp *strides
+ dtype descr # deprecated since NumPy 1.7 !
+ PyObject* base
+
+ # Note: This syntax (function definition in pxd files) is an
+ # experimental exception made for __getbuffer__ and __releasebuffer__
+ # -- the details of this may change.
+ def __getbuffer__(ndarray self, Py_buffer* info, int flags):
+ PyObject_GetBuffer(<object>self, info, flags);
+
+ def __releasebuffer__(ndarray self, Py_buffer* info):
+ # We should call a possible tp_bufferrelease(self, info) but no
+ # interface to that is exposed by cython or python. And currently
+ # the function is NULL in numpy, we rely on refcounting to release
+ # info when self is collected
+ pass
+
+
+ ctypedef unsigned char npy_bool
+
+ ctypedef signed char npy_byte
+ ctypedef signed short npy_short
+ ctypedef signed int npy_int
+ ctypedef signed long npy_long
+ ctypedef signed long long npy_longlong
+
+ ctypedef unsigned char npy_ubyte
+ ctypedef unsigned short npy_ushort
+ ctypedef unsigned int npy_uint
+ ctypedef unsigned long npy_ulong
+ ctypedef unsigned long long npy_ulonglong
+
+ ctypedef float npy_float
+ ctypedef double npy_double
+ ctypedef long double npy_longdouble
+
+ ctypedef signed char npy_int8
+ ctypedef signed short npy_int16
+ ctypedef signed int npy_int32
+ ctypedef signed long long npy_int64
+ ctypedef signed long long npy_int96
+ ctypedef signed long long npy_int128
+
+ ctypedef unsigned char npy_uint8
+ ctypedef unsigned short npy_uint16
+ ctypedef unsigned int npy_uint32
+ ctypedef unsigned long long npy_uint64
+ ctypedef unsigned long long npy_uint96
+ ctypedef unsigned long long npy_uint128
+
+ ctypedef float npy_float32
+ ctypedef double npy_float64
+ ctypedef long double npy_float80
+ ctypedef long double npy_float96
+ ctypedef long double npy_float128
+
+ ctypedef struct npy_cfloat:
+ double real
+ double imag
+
+ ctypedef struct npy_cdouble:
+ double real
+ double imag
+
+ ctypedef struct npy_clongdouble:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex64:
+ float real
+ float imag
+
+ ctypedef struct npy_complex128:
+ double real
+ double imag
+
+ ctypedef struct npy_complex160:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex192:
+ long double real
+ long double imag
+
+ ctypedef struct npy_complex256:
+ long double real
+ long double imag
+
+ ctypedef struct PyArray_Dims:
+ npy_intp *ptr
+ int len
+
+ int _import_array() except -1
+
+ #
+ # Macros from ndarrayobject.h
+ #
+ bint PyArray_CHKFLAGS(ndarray m, int flags)
+ bint PyArray_IS_C_CONTIGUOUS(ndarray arr)
+ bint PyArray_IS_F_CONTIGUOUS(ndarray arr)
+ bint PyArray_ISCONTIGUOUS(ndarray m)
+ bint PyArray_ISWRITEABLE(ndarray m)
+ bint PyArray_ISALIGNED(ndarray m)
+
+ int PyArray_NDIM(ndarray)
+ bint PyArray_ISONESEGMENT(ndarray)
+ bint PyArray_ISFORTRAN(ndarray)
+ int PyArray_FORTRANIF(ndarray)
+
+ void* PyArray_DATA(ndarray)
+ char* PyArray_BYTES(ndarray)
+ npy_intp* PyArray_DIMS(ndarray)
+ npy_intp* PyArray_STRIDES(ndarray)
+ npy_intp PyArray_DIM(ndarray, size_t)
+ npy_intp PyArray_STRIDE(ndarray, size_t)
+
+ PyObject *PyArray_BASE(ndarray) # returns borrowed reference!
+ PyArray_Descr *PyArray_DESCR(ndarray) # returns borrowed reference to dtype!
+ int PyArray_FLAGS(ndarray)
+ npy_intp PyArray_ITEMSIZE(ndarray)
+ int PyArray_TYPE(ndarray arr)
+
+ object PyArray_GETITEM(ndarray arr, void *itemptr)
+ int PyArray_SETITEM(ndarray arr, void *itemptr, object obj)
+
+ bint PyTypeNum_ISBOOL(int)
+ bint PyTypeNum_ISUNSIGNED(int)
+ bint PyTypeNum_ISSIGNED(int)
+ bint PyTypeNum_ISINTEGER(int)
+ bint PyTypeNum_ISFLOAT(int)
+ bint PyTypeNum_ISNUMBER(int)
+ bint PyTypeNum_ISSTRING(int)
+ bint PyTypeNum_ISCOMPLEX(int)
+ bint PyTypeNum_ISPYTHON(int)
+ bint PyTypeNum_ISFLEXIBLE(int)
+ bint PyTypeNum_ISUSERDEF(int)
+ bint PyTypeNum_ISEXTENDED(int)
+ bint PyTypeNum_ISOBJECT(int)
+
+ bint PyDataType_ISBOOL(dtype)
+ bint PyDataType_ISUNSIGNED(dtype)
+ bint PyDataType_ISSIGNED(dtype)
+ bint PyDataType_ISINTEGER(dtype)
+ bint PyDataType_ISFLOAT(dtype)
+ bint PyDataType_ISNUMBER(dtype)
+ bint PyDataType_ISSTRING(dtype)
+ bint PyDataType_ISCOMPLEX(dtype)
+ bint PyDataType_ISPYTHON(dtype)
+ bint PyDataType_ISFLEXIBLE(dtype)
+ bint PyDataType_ISUSERDEF(dtype)
+ bint PyDataType_ISEXTENDED(dtype)
+ bint PyDataType_ISOBJECT(dtype)
+ bint PyDataType_HASFIELDS(dtype)
+ bint PyDataType_HASSUBARRAY(dtype)
+
+ bint PyArray_ISBOOL(ndarray)
+ bint PyArray_ISUNSIGNED(ndarray)
+ bint PyArray_ISSIGNED(ndarray)
+ bint PyArray_ISINTEGER(ndarray)
+ bint PyArray_ISFLOAT(ndarray)
+ bint PyArray_ISNUMBER(ndarray)
+ bint PyArray_ISSTRING(ndarray)
+ bint PyArray_ISCOMPLEX(ndarray)
+ bint PyArray_ISPYTHON(ndarray)
+ bint PyArray_ISFLEXIBLE(ndarray)
+ bint PyArray_ISUSERDEF(ndarray)
+ bint PyArray_ISEXTENDED(ndarray)
+ bint PyArray_ISOBJECT(ndarray)
+ bint PyArray_HASFIELDS(ndarray)
+
+ bint PyArray_ISVARIABLE(ndarray)
+
+ bint PyArray_SAFEALIGNEDCOPY(ndarray)
+ bint PyArray_ISNBO(char) # works on ndarray.byteorder
+ bint PyArray_IsNativeByteOrder(char) # works on ndarray.byteorder
+ bint PyArray_ISNOTSWAPPED(ndarray)
+ bint PyArray_ISBYTESWAPPED(ndarray)
+
+ bint PyArray_FLAGSWAP(ndarray, int)
+
+ bint PyArray_ISCARRAY(ndarray)
+ bint PyArray_ISCARRAY_RO(ndarray)
+ bint PyArray_ISFARRAY(ndarray)
+ bint PyArray_ISFARRAY_RO(ndarray)
+ bint PyArray_ISBEHAVED(ndarray)
+ bint PyArray_ISBEHAVED_RO(ndarray)
+
+
+ bint PyDataType_ISNOTSWAPPED(dtype)
+ bint PyDataType_ISBYTESWAPPED(dtype)
+
+ bint PyArray_DescrCheck(object)
+
+ bint PyArray_Check(object)
+ bint PyArray_CheckExact(object)
+
+ # Cannot be supported due to out arg:
+ # bint PyArray_HasArrayInterfaceType(object, dtype, object, object&)
+ # bint PyArray_HasArrayInterface(op, out)
+
+
+ bint PyArray_IsZeroDim(object)
+ # Cannot be supported due to ## ## in macro:
+ # bint PyArray_IsScalar(object, verbatim work)
+ bint PyArray_CheckScalar(object)
+ bint PyArray_IsPythonNumber(object)
+ bint PyArray_IsPythonScalar(object)
+ bint PyArray_IsAnyScalar(object)
+ bint PyArray_CheckAnyScalar(object)
+ ndarray PyArray_GETCONTIGUOUS(ndarray)
+ bint PyArray_SAMESHAPE(ndarray, ndarray)
+ npy_intp PyArray_SIZE(ndarray)
+ npy_intp PyArray_NBYTES(ndarray)
+
+ object PyArray_FROM_O(object)
+ object PyArray_FROM_OF(object m, int flags)
+ object PyArray_FROM_OT(object m, int type)
+ object PyArray_FROM_OTF(object m, int type, int flags)
+ object PyArray_FROMANY(object m, int type, int min, int max, int flags)
+ object PyArray_ZEROS(int nd, npy_intp* dims, int type, int fortran)
+ object PyArray_EMPTY(int nd, npy_intp* dims, int type, int fortran)
+ void PyArray_FILLWBYTE(object, int val)
+ npy_intp PyArray_REFCOUNT(object)
+ object PyArray_ContiguousFromAny(op, int, int min_depth, int max_depth)
+ unsigned char PyArray_EquivArrTypes(ndarray a1, ndarray a2)
+ bint PyArray_EquivByteorders(int b1, int b2)
+ object PyArray_SimpleNew(int nd, npy_intp* dims, int typenum)
+ object PyArray_SimpleNewFromData(int nd, npy_intp* dims, int typenum, void* data)
+ #object PyArray_SimpleNewFromDescr(int nd, npy_intp* dims, dtype descr)
+ object PyArray_ToScalar(void* data, ndarray arr)
+
+ void* PyArray_GETPTR1(ndarray m, npy_intp i)
+ void* PyArray_GETPTR2(ndarray m, npy_intp i, npy_intp j)
+ void* PyArray_GETPTR3(ndarray m, npy_intp i, npy_intp j, npy_intp k)
+ void* PyArray_GETPTR4(ndarray m, npy_intp i, npy_intp j, npy_intp k, npy_intp l)
+
+ void PyArray_XDECREF_ERR(ndarray)
+ # Cannot be supported due to out arg
+ # void PyArray_DESCR_REPLACE(descr)
+
+
+ object PyArray_Copy(ndarray)
+ object PyArray_FromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_ContiguousFromObject(object op, int type, int min_depth, int max_depth)
+ object PyArray_CopyFromObject(object op, int type, int min_depth, int max_depth)
+
+ object PyArray_Cast(ndarray mp, int type_num)
+ object PyArray_Take(ndarray ap, object items, int axis)
+ object PyArray_Put(ndarray ap, object items, object values)
+
+ void PyArray_ITER_RESET(flatiter it) nogil
+ void PyArray_ITER_NEXT(flatiter it) nogil
+ void PyArray_ITER_GOTO(flatiter it, npy_intp* destination) nogil
+ void PyArray_ITER_GOTO1D(flatiter it, npy_intp ind) nogil
+ void* PyArray_ITER_DATA(flatiter it) nogil
+ bint PyArray_ITER_NOTDONE(flatiter it) nogil
+
+ void PyArray_MultiIter_RESET(broadcast multi) nogil
+ void PyArray_MultiIter_NEXT(broadcast multi) nogil
+ void PyArray_MultiIter_GOTO(broadcast multi, npy_intp dest) nogil
+ void PyArray_MultiIter_GOTO1D(broadcast multi, npy_intp ind) nogil
+ void* PyArray_MultiIter_DATA(broadcast multi, npy_intp i) nogil
+ void PyArray_MultiIter_NEXTi(broadcast multi, npy_intp i) nogil
+ bint PyArray_MultiIter_NOTDONE(broadcast multi) nogil
+
+ # Functions from __multiarray_api.h
+
+ # Functions taking dtype and returning object/ndarray are disabled
+ # for now as they steal dtype references. I'm conservative and disable
+ # more than is probably needed until it can be checked further.
+ int PyArray_SetNumericOps (object)
+ object PyArray_GetNumericOps ()
+ int PyArray_INCREF (ndarray)
+ int PyArray_XDECREF (ndarray)
+ void PyArray_SetStringFunction (object, int)
+ dtype PyArray_DescrFromType (int)
+ object PyArray_TypeObjectFromType (int)
+ char * PyArray_Zero (ndarray)
+ char * PyArray_One (ndarray)
+ #object PyArray_CastToType (ndarray, dtype, int)
+ int PyArray_CastTo (ndarray, ndarray)
+ int PyArray_CastAnyTo (ndarray, ndarray)
+ int PyArray_CanCastSafely (int, int)
+ npy_bool PyArray_CanCastTo (dtype, dtype)
+ int PyArray_ObjectType (object, int)
+ dtype PyArray_DescrFromObject (object, dtype)
+ #ndarray* PyArray_ConvertToCommonType (object, int *)
+ dtype PyArray_DescrFromScalar (object)
+ dtype PyArray_DescrFromTypeObject (object)
+ npy_intp PyArray_Size (object)
+ #object PyArray_Scalar (void *, dtype, object)
+ #object PyArray_FromScalar (object, dtype)
+ void PyArray_ScalarAsCtype (object, void *)
+ #int PyArray_CastScalarToCtype (object, void *, dtype)
+ #int PyArray_CastScalarDirect (object, dtype, void *, int)
+ object PyArray_ScalarFromObject (object)
+ #PyArray_VectorUnaryFunc * PyArray_GetCastFunc (dtype, int)
+ object PyArray_FromDims (int, int *, int)
+ #object PyArray_FromDimsAndDataAndDescr (int, int *, dtype, char *)
+ #object PyArray_FromAny (object, dtype, int, int, int, object)
+ object PyArray_EnsureArray (object)
+ object PyArray_EnsureAnyArray (object)
+ #object PyArray_FromFile (stdio.FILE *, dtype, npy_intp, char *)
+ #object PyArray_FromString (char *, npy_intp, dtype, npy_intp, char *)
+ #object PyArray_FromBuffer (object, dtype, npy_intp, npy_intp)
+ #object PyArray_FromIter (object, dtype, npy_intp)
+ object PyArray_Return (ndarray)
+ #object PyArray_GetField (ndarray, dtype, int)
+ #int PyArray_SetField (ndarray, dtype, int, object)
+ object PyArray_Byteswap (ndarray, npy_bool)
+ object PyArray_Resize (ndarray, PyArray_Dims *, int, NPY_ORDER)
+ int PyArray_MoveInto (ndarray, ndarray)
+ int PyArray_CopyInto (ndarray, ndarray)
+ int PyArray_CopyAnyInto (ndarray, ndarray)
+ int PyArray_CopyObject (ndarray, object)
+ object PyArray_NewCopy (ndarray, NPY_ORDER)
+ object PyArray_ToList (ndarray)
+ object PyArray_ToString (ndarray, NPY_ORDER)
+ int PyArray_ToFile (ndarray, stdio.FILE *, char *, char *)
+ int PyArray_Dump (object, object, int)
+ object PyArray_Dumps (object, int)
+ int PyArray_ValidType (int)
+ void PyArray_UpdateFlags (ndarray, int)
+ object PyArray_New (type, int, npy_intp *, int, npy_intp *, void *, int, int, object)
+ #object PyArray_NewFromDescr (type, dtype, int, npy_intp *, npy_intp *, void *, int, object)
+ #dtype PyArray_DescrNew (dtype)
+ dtype PyArray_DescrNewFromType (int)
+ double PyArray_GetPriority (object, double)
+ object PyArray_IterNew (object)
+ object PyArray_MultiIterNew (int, ...)
+
+ int PyArray_PyIntAsInt (object)
+ npy_intp PyArray_PyIntAsIntp (object)
+ int PyArray_Broadcast (broadcast)
+ void PyArray_FillObjectArray (ndarray, object)
+ int PyArray_FillWithScalar (ndarray, object)
+ npy_bool PyArray_CheckStrides (int, int, npy_intp, npy_intp, npy_intp *, npy_intp *)
+ dtype PyArray_DescrNewByteorder (dtype, char)
+ object PyArray_IterAllButAxis (object, int *)
+ #object PyArray_CheckFromAny (object, dtype, int, int, int, object)
+ #object PyArray_FromArray (ndarray, dtype, int)
+ object PyArray_FromInterface (object)
+ object PyArray_FromStructInterface (object)
+ #object PyArray_FromArrayAttr (object, dtype, object)
+ #NPY_SCALARKIND PyArray_ScalarKind (int, ndarray*)
+ int PyArray_CanCoerceScalar (int, int, NPY_SCALARKIND)
+ object PyArray_NewFlagsObject (object)
+ npy_bool PyArray_CanCastScalar (type, type)
+ #int PyArray_CompareUCS4 (npy_ucs4 *, npy_ucs4 *, register size_t)
+ int PyArray_RemoveSmallest (broadcast)
+ int PyArray_ElementStrides (object)
+ void PyArray_Item_INCREF (char *, dtype)
+ void PyArray_Item_XDECREF (char *, dtype)
+ object PyArray_FieldNames (object)
+ object PyArray_Transpose (ndarray, PyArray_Dims *)
+ object PyArray_TakeFrom (ndarray, object, int, ndarray, NPY_CLIPMODE)
+ object PyArray_PutTo (ndarray, object, object, NPY_CLIPMODE)
+ object PyArray_PutMask (ndarray, object, object)
+ object PyArray_Repeat (ndarray, object, int)
+ object PyArray_Choose (ndarray, object, ndarray, NPY_CLIPMODE)
+ int PyArray_Sort (ndarray, int, NPY_SORTKIND)
+ object PyArray_ArgSort (ndarray, int, NPY_SORTKIND)
+ object PyArray_SearchSorted (ndarray, object, NPY_SEARCHSIDE)
+ object PyArray_ArgMax (ndarray, int, ndarray)
+ object PyArray_ArgMin (ndarray, int, ndarray)
+ object PyArray_Reshape (ndarray, object)
+ object PyArray_Newshape (ndarray, PyArray_Dims *, NPY_ORDER)
+ object PyArray_Squeeze (ndarray)
+ #object PyArray_View (ndarray, dtype, type)
+ object PyArray_SwapAxes (ndarray, int, int)
+ object PyArray_Max (ndarray, int, ndarray)
+ object PyArray_Min (ndarray, int, ndarray)
+ object PyArray_Ptp (ndarray, int, ndarray)
+ object PyArray_Mean (ndarray, int, int, ndarray)
+ object PyArray_Trace (ndarray, int, int, int, int, ndarray)
+ object PyArray_Diagonal (ndarray, int, int, int)
+ object PyArray_Clip (ndarray, object, object, ndarray)
+ object PyArray_Conjugate (ndarray, ndarray)
+ object PyArray_Nonzero (ndarray)
+ object PyArray_Std (ndarray, int, int, ndarray, int)
+ object PyArray_Sum (ndarray, int, int, ndarray)
+ object PyArray_CumSum (ndarray, int, int, ndarray)
+ object PyArray_Prod (ndarray, int, int, ndarray)
+ object PyArray_CumProd (ndarray, int, int, ndarray)
+ object PyArray_All (ndarray, int, ndarray)
+ object PyArray_Any (ndarray, int, ndarray)
+ object PyArray_Compress (ndarray, object, int, ndarray)
+ object PyArray_Flatten (ndarray, NPY_ORDER)
+ object PyArray_Ravel (ndarray, NPY_ORDER)
+ npy_intp PyArray_MultiplyList (npy_intp *, int)
+ int PyArray_MultiplyIntList (int *, int)
+ void * PyArray_GetPtr (ndarray, npy_intp*)
+ int PyArray_CompareLists (npy_intp *, npy_intp *, int)
+ #int PyArray_AsCArray (object*, void *, npy_intp *, int, dtype)
+ #int PyArray_As1D (object*, char **, int *, int)
+ #int PyArray_As2D (object*, char ***, int *, int *, int)
+ int PyArray_Free (object, void *)
+ #int PyArray_Converter (object, object*)
+ int PyArray_IntpFromSequence (object, npy_intp *, int)
+ object PyArray_Concatenate (object, int)
+ object PyArray_InnerProduct (object, object)
+ object PyArray_MatrixProduct (object, object)
+ object PyArray_CopyAndTranspose (object)
+ object PyArray_Correlate (object, object, int)
+ int PyArray_TypestrConvert (int, int)
+ #int PyArray_DescrConverter (object, dtype*)
+ #int PyArray_DescrConverter2 (object, dtype*)
+ int PyArray_IntpConverter (object, PyArray_Dims *)
+ #int PyArray_BufferConverter (object, chunk)
+ int PyArray_AxisConverter (object, int *)
+ int PyArray_BoolConverter (object, npy_bool *)
+ int PyArray_ByteorderConverter (object, char *)
+ int PyArray_OrderConverter (object, NPY_ORDER *)
+ unsigned char PyArray_EquivTypes (dtype, dtype)
+ #object PyArray_Zeros (int, npy_intp *, dtype, int)
+ #object PyArray_Empty (int, npy_intp *, dtype, int)
+ object PyArray_Where (object, object, object)
+ object PyArray_Arange (double, double, double, int)
+ #object PyArray_ArangeObj (object, object, object, dtype)
+ int PyArray_SortkindConverter (object, NPY_SORTKIND *)
+ object PyArray_LexSort (object, int)
+ object PyArray_Round (ndarray, int, ndarray)
+ unsigned char PyArray_EquivTypenums (int, int)
+ int PyArray_RegisterDataType (dtype)
+ int PyArray_RegisterCastFunc (dtype, int, PyArray_VectorUnaryFunc *)
+ int PyArray_RegisterCanCast (dtype, int, NPY_SCALARKIND)
+ #void PyArray_InitArrFuncs (PyArray_ArrFuncs *)
+ object PyArray_IntTupleFromIntp (int, npy_intp *)
+ int PyArray_TypeNumFromName (char *)
+ int PyArray_ClipmodeConverter (object, NPY_CLIPMODE *)
+ #int PyArray_OutputConverter (object, ndarray*)
+ object PyArray_BroadcastToShape (object, npy_intp *, int)
+ void _PyArray_SigintHandler (int)
+ void* _PyArray_GetSigintBuf ()
+ #int PyArray_DescrAlignConverter (object, dtype*)
+ #int PyArray_DescrAlignConverter2 (object, dtype*)
+ int PyArray_SearchsideConverter (object, void *)
+ object PyArray_CheckAxis (ndarray, int *, int)
+ npy_intp PyArray_OverflowMultiplyList (npy_intp *, int)
+ int PyArray_CompareString (char *, char *, size_t)
+ int PyArray_SetBaseObject(ndarray, base) # NOTE: steals a reference to base! Use "set_array_base()" instead.
+
+
+# Typedefs that matches the runtime dtype objects in
+# the numpy module.
+
+# The ones that are commented out needs an IFDEF function
+# in Cython to enable them only on the right systems.
+
+ctypedef npy_int8 int8_t
+ctypedef npy_int16 int16_t
+ctypedef npy_int32 int32_t
+ctypedef npy_int64 int64_t
+#ctypedef npy_int96 int96_t
+#ctypedef npy_int128 int128_t
+
+ctypedef npy_uint8 uint8_t
+ctypedef npy_uint16 uint16_t
+ctypedef npy_uint32 uint32_t
+ctypedef npy_uint64 uint64_t
+#ctypedef npy_uint96 uint96_t
+#ctypedef npy_uint128 uint128_t
+
+ctypedef npy_float32 float32_t
+ctypedef npy_float64 float64_t
+#ctypedef npy_float80 float80_t
+#ctypedef npy_float128 float128_t
+
+ctypedef float complex complex64_t
+ctypedef double complex complex128_t
+
+# The int types are mapped a bit surprising --
+# numpy.int corresponds to 'l' and numpy.long to 'q'
+ctypedef npy_long int_t
+ctypedef npy_longlong long_t
+ctypedef npy_longlong longlong_t
+
+ctypedef npy_ulong uint_t
+ctypedef npy_ulonglong ulong_t
+ctypedef npy_ulonglong ulonglong_t
+
+ctypedef npy_intp intp_t
+ctypedef npy_uintp uintp_t
+
+ctypedef npy_double float_t
+ctypedef npy_double double_t
+ctypedef npy_longdouble longdouble_t
+
+ctypedef npy_cfloat cfloat_t
+ctypedef npy_cdouble cdouble_t
+ctypedef npy_clongdouble clongdouble_t
+
+ctypedef npy_cdouble complex_t
+
+cdef inline object PyArray_MultiIterNew1(a):
+ return PyArray_MultiIterNew(1, <void*>a)
+
+cdef inline object PyArray_MultiIterNew2(a, b):
+ return PyArray_MultiIterNew(2, <void*>a, <void*>b)
+
+cdef inline object PyArray_MultiIterNew3(a, b, c):
+ return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
+
+cdef inline object PyArray_MultiIterNew4(a, b, c, d):
+ return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
+
+cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
+ return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
+
+cdef inline tuple PyDataType_SHAPE(dtype d):
+ if PyDataType_HASSUBARRAY(d):
+ return <tuple>d.subarray.shape
+ else:
+ return ()
+
+cdef inline char* _util_dtypestring(dtype descr, char* f, char* end, int* offset) except NULL:
+ # Recursive utility function used in __getbuffer__ to get format
+ # string. The new location in the format string is returned.
+
+ cdef dtype child
+ cdef int endian_detector = 1
+ cdef bint little_endian = ((<char*>&endian_detector)[0] != 0)
+ cdef tuple fields
+
+ for childname in descr.names:
+ fields = descr.fields[childname]
+ child, new_offset = fields
+
+ if (end - f) - <int>(new_offset - offset[0]) < 15:
+ raise RuntimeError(u"Format string allocated too short, see comment in numpy.pxd")
+
+ if ((child.byteorder == c'>' and little_endian) or
+ (child.byteorder == c'<' and not little_endian)):
+ raise ValueError(u"Non-native byte order not supported")
+ # One could encode it in the format string and have Cython
+ # complain instead, BUT: < and > in format strings also imply
+ # standardized sizes for datatypes, and we rely on native in
+ # order to avoid reencoding data types based on their size.
+ #
+ # A proper PEP 3118 exporter for other clients than Cython
+ # must deal properly with this!
+
+ # Output padding bytes
+ while offset[0] < new_offset:
+ f[0] = 120 # "x"; pad byte
+ f += 1
+ offset[0] += 1
+
+ offset[0] += child.itemsize
+
+ if not PyDataType_HASFIELDS(child):
+ t = child.type_num
+ if end - f < 5:
+ raise RuntimeError(u"Format string allocated too short.")
+
+ # Until ticket #99 is fixed, use integers to avoid warnings
+ if t == NPY_BYTE: f[0] = 98 #"b"
+ elif t == NPY_UBYTE: f[0] = 66 #"B"
+ elif t == NPY_SHORT: f[0] = 104 #"h"
+ elif t == NPY_USHORT: f[0] = 72 #"H"
+ elif t == NPY_INT: f[0] = 105 #"i"
+ elif t == NPY_UINT: f[0] = 73 #"I"
+ elif t == NPY_LONG: f[0] = 108 #"l"
+ elif t == NPY_ULONG: f[0] = 76 #"L"
+ elif t == NPY_LONGLONG: f[0] = 113 #"q"
+ elif t == NPY_ULONGLONG: f[0] = 81 #"Q"
+ elif t == NPY_FLOAT: f[0] = 102 #"f"
+ elif t == NPY_DOUBLE: f[0] = 100 #"d"
+ elif t == NPY_LONGDOUBLE: f[0] = 103 #"g"
+ elif t == NPY_CFLOAT: f[0] = 90; f[1] = 102; f += 1 # Zf
+ elif t == NPY_CDOUBLE: f[0] = 90; f[1] = 100; f += 1 # Zd
+ elif t == NPY_CLONGDOUBLE: f[0] = 90; f[1] = 103; f += 1 # Zg
+ elif t == NPY_OBJECT: f[0] = 79 #"O"
+ else:
+ raise ValueError(u"unknown dtype code in numpy.pxd (%d)" % t)
+ f += 1
+ else:
+ # Cython ignores struct boundary information ("T{...}"),
+ # so don't output it
+ f = _util_dtypestring(child, f, end, offset)
+ return f
+
+
+#
+# ufunc API
+#
+
+cdef extern from "numpy/ufuncobject.h":
+
+ ctypedef void (*PyUFuncGenericFunction) (char **, npy_intp *, npy_intp *, void *)
+
+ ctypedef extern class numpy.ufunc [object PyUFuncObject, check_size ignore]:
+ cdef:
+ int nin, nout, nargs
+ int identity
+ PyUFuncGenericFunction *functions
+ void **data
+ int ntypes
+ int check_return
+ char *name
+ char *types
+ char *doc
+ void *ptr
+ PyObject *obj
+ PyObject *userloops
+
+ cdef enum:
+ PyUFunc_Zero
+ PyUFunc_One
+ PyUFunc_None
+ UFUNC_ERR_IGNORE
+ UFUNC_ERR_WARN
+ UFUNC_ERR_RAISE
+ UFUNC_ERR_CALL
+ UFUNC_ERR_PRINT
+ UFUNC_ERR_LOG
+ UFUNC_MASK_DIVIDEBYZERO
+ UFUNC_MASK_OVERFLOW
+ UFUNC_MASK_UNDERFLOW
+ UFUNC_MASK_INVALID
+ UFUNC_SHIFT_DIVIDEBYZERO
+ UFUNC_SHIFT_OVERFLOW
+ UFUNC_SHIFT_UNDERFLOW
+ UFUNC_SHIFT_INVALID
+ UFUNC_FPE_DIVIDEBYZERO
+ UFUNC_FPE_OVERFLOW
+ UFUNC_FPE_UNDERFLOW
+ UFUNC_FPE_INVALID
+ UFUNC_ERR_DEFAULT
+ UFUNC_ERR_DEFAULT2
+
+ object PyUFunc_FromFuncAndData(PyUFuncGenericFunction *,
+ void **, char *, int, int, int, int, char *, char *, int)
+ int PyUFunc_RegisterLoopForType(ufunc, int,
+ PyUFuncGenericFunction, int *, void *)
+ int PyUFunc_GenericFunction \
+ (ufunc, PyObject *, PyObject *, PyArrayObject **)
+ void PyUFunc_f_f_As_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_d_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_f_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_g_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F_As_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_F_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_D_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_G_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f_As_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_ff_f \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_dd_d \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_gg_g \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F_As_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_DD_D \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_FF_F \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_GG_G \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_O_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_OO_O_method \
+ (char **, npy_intp *, npy_intp *, void *)
+ void PyUFunc_On_Om \
+ (char **, npy_intp *, npy_intp *, void *)
+ int PyUFunc_GetPyValues \
+ (char *, int *, int *, PyObject **)
+ int PyUFunc_checkfperr \
+ (int, PyObject *, int *)
+ void PyUFunc_clearfperr()
+ int PyUFunc_getfperr()
+ int PyUFunc_handlefperr \
+ (int, PyObject *, int, int *)
+ int PyUFunc_ReplaceLoopBySignature \
+ (ufunc, PyUFuncGenericFunction, int *, PyUFuncGenericFunction *)
+ object PyUFunc_FromFuncAndDataAndSignature \
+ (PyUFuncGenericFunction *, void **, char *, int, int, int,
+ int, char *, char *, int, char *)
+
+ int _import_umath() except -1
+
+cdef inline void set_array_base(ndarray arr, object base):
+ Py_INCREF(base) # important to do this before stealing the reference below!
+ PyArray_SetBaseObject(arr, base)
+
+cdef inline object get_array_base(ndarray arr):
+ base = PyArray_BASE(arr)
+ if base is NULL:
+ return None
+ return <object>base
+
+# Versions of the import_* functions which are more suitable for
+# Cython code.
+cdef inline int import_array() except -1:
+ try:
+ _import_array()
+ except Exception:
+ raise ImportError("numpy.core.multiarray failed to import")
+
+cdef inline int import_umath() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
+
+cdef inline int import_ufunc() except -1:
+ try:
+ _import_umath()
+ except Exception:
+ raise ImportError("numpy.core.umath failed to import")
diff --git a/numpy/__init__.py b/numpy/__init__.py
index e1df236bb..fef8245de 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -143,7 +143,9 @@ else:
from .core import *
from . import compat
from . import lib
+ # FIXME: why have numpy.lib if everything is imported here??
from .lib import *
+
from . import linalg
from . import fft
from . import polynomial
@@ -163,13 +165,25 @@ else:
from __builtin__ import bool, int, float, complex, object, unicode, str
from .core import round, abs, max, min
+ # now that numpy modules are imported, can initialize limits
+ core.getlimits._register_known_types()
+ __all__.extend(['bool', 'int', 'float', 'complex', 'object', 'unicode',
+ 'str'])
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+ # Remove things that are in the numpy.lib but not in the numpy namespace
+ # Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
+ # that prevents adding more things to the main namespace by accident.
+ # The list below will grow until the `from .lib import *` fixme above is
+ # taken care of
+ __all__.remove('Arrayterator')
+ del Arrayterator
+
# Filter out Cython harmless warnings
warnings.filterwarnings("ignore", message="numpy.dtype size changed")
warnings.filterwarnings("ignore", message="numpy.ufunc size changed")
@@ -180,9 +194,34 @@ else:
oldnumeric = 'removed'
numarray = 'removed'
- # We don't actually use this ourselves anymore, but I'm not 100% sure that
- # no-one else in the world is using it (though I hope not)
- from .testing import Tester
+ if sys.version_info[:2] >= (3, 7):
+ # Importing Tester requires importing all of UnitTest which is not a
+ # cheap import Since it is mainly used in test suits, we lazy import it
+ # here to save on the order of 10 ms of import time for most users
+ #
+ # The previous way Tester was imported also had a side effect of adding
+ # the full `numpy.testing` namespace
+ #
+ # module level getattr is only supported in 3.7 onwards
+ # https://www.python.org/dev/peps/pep-0562/
+ def __getattr__(attr):
+ if attr == 'testing':
+ import numpy.testing as testing
+ return testing
+ elif attr == 'Tester':
+ from .testing import Tester
+ return Tester
+ else:
+ raise AttributeError("module {!r} has no attribute "
+ "{!r}".format(__name__, attr))
+
+ def __dir__():
+ return __all__ + ['Tester', 'testing']
+
+ else:
+ # We don't actually use this ourselves anymore, but I'm not 100% sure that
+ # no-one else in the world is using it (though I hope not)
+ from .testing import Tester
# Pytest testing
from numpy._pytesttester import PytestTester
diff --git a/numpy/_build_utils/src/apple_sgemv_fix.c b/numpy/_build_utils/src/apple_sgemv_fix.c
index 4c9c82ece..b1dbeb681 100644
--- a/numpy/_build_utils/src/apple_sgemv_fix.c
+++ b/numpy/_build_utils/src/apple_sgemv_fix.c
@@ -29,6 +29,9 @@
#include <dlfcn.h>
#include <stdlib.h>
#include <stdio.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <string.h>
/* ----------------------------------------------------------------- */
/* Original cblas_sgemv */
@@ -66,17 +69,40 @@ static int AVX_and_10_9 = 0;
/* Dynamic check for AVX support
* __builtin_cpu_supports("avx") is available in gcc 4.8,
* but clang and icc do not currently support it. */
-#define cpu_supports_avx()\
-(system("sysctl -n machdep.cpu.features | grep -q AVX") == 0)
-
+static inline int
+cpu_supports_avx()
+{
+ int enabled, r;
+ size_t length = sizeof(enabled);
+ r = sysctlbyname("hw.optional.avx1_0", &enabled, &length, NULL, 0);
+ if ( r == 0 && enabled != 0) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
+
/* Check if we are using MacOS X version 10.9 */
-#define using_mavericks()\
-(system("sw_vers -productVersion | grep -q 10\\.9\\.") == 0)
+static inline int
+using_mavericks()
+{
+ int r;
+ char str[32] = {0};
+ size_t size = sizeof(str);
+ r = sysctlbyname("kern.osproductversion", str, &size, NULL, 0);
+ if ( r == 0 && strncmp(str, "10.9", strlen("10.9")) == 0) {
+ return 1;
+ }
+ else {
+ return 0;
+ }
+}
__attribute__((destructor))
static void unloadlib(void)
{
- if (veclib) dlclose(veclib);
+ if (veclib) dlclose(veclib);
}
__attribute__((constructor))
@@ -198,30 +224,30 @@ void cblas_sgemv(const enum CBLAS_ORDER order,
const float *X, const int incX, const float beta,
float *Y, const int incY)
{
- char TA;
- if (order == CblasColMajor)
- {
- if (TransA == CblasNoTrans) TA = 'N';
- else if (TransA == CblasTrans) TA = 'T';
- else if (TransA == CblasConjTrans) TA = 'C';
- else
- {
- cblas_xerbla(2, "cblas_sgemv","Illegal TransA setting, %d\n", TransA);
- }
- sgemv_(&TA, &M, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY);
- }
- else if (order == CblasRowMajor)
- {
- if (TransA == CblasNoTrans) TA = 'T';
- else if (TransA == CblasTrans) TA = 'N';
- else if (TransA == CblasConjTrans) TA = 'N';
- else
- {
- cblas_xerbla(2, "cblas_sgemv", "Illegal TransA setting, %d\n", TransA);
- return;
- }
- sgemv_(&TA, &N, &M, &alpha, A, &lda, X, &incX, &beta, Y, &incY);
- }
- else
- cblas_xerbla(1, "cblas_sgemv", "Illegal Order setting, %d\n", order);
+ char TA;
+ if (order == CblasColMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'N';
+ else if (TransA == CblasTrans) TA = 'T';
+ else if (TransA == CblasConjTrans) TA = 'C';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemv","Illegal TransA setting, %d\n", TransA);
+ }
+ sgemv_(&TA, &M, &N, &alpha, A, &lda, X, &incX, &beta, Y, &incY);
+ }
+ else if (order == CblasRowMajor)
+ {
+ if (TransA == CblasNoTrans) TA = 'T';
+ else if (TransA == CblasTrans) TA = 'N';
+ else if (TransA == CblasConjTrans) TA = 'N';
+ else
+ {
+ cblas_xerbla(2, "cblas_sgemv", "Illegal TransA setting, %d\n", TransA);
+ return;
+ }
+ sgemv_(&TA, &N, &M, &alpha, A, &lda, X, &incX, &beta, Y, &incY);
+ }
+ else
+ cblas_xerbla(1, "cblas_sgemv", "Illegal Order setting, %d\n", order);
}
diff --git a/numpy/_globals.py b/numpy/_globals.py
index 9a7b458f1..f5c0761b5 100644
--- a/numpy/_globals.py
+++ b/numpy/_globals.py
@@ -17,7 +17,6 @@ motivated this module.
"""
from __future__ import division, absolute_import, print_function
-
__ALL__ = [
'ModuleDeprecationWarning', 'VisibleDeprecationWarning', '_NoValue'
]
@@ -39,7 +38,9 @@ class ModuleDeprecationWarning(DeprecationWarning):
nose tester will let pass without making tests fail.
"""
- pass
+
+
+ModuleDeprecationWarning.__module__ = 'numpy'
class VisibleDeprecationWarning(UserWarning):
@@ -50,7 +51,10 @@ class VisibleDeprecationWarning(UserWarning):
the usage is most likely a user bug.
"""
- pass
+
+
+VisibleDeprecationWarning.__module__ = 'numpy'
+
class _NoValueType(object):
"""Special keyword value.
@@ -73,4 +77,5 @@ class _NoValueType(object):
def __repr__(self):
return "<no value>"
+
_NoValue = _NoValueType()
diff --git a/numpy/_pytesttester.py b/numpy/_pytesttester.py
index 30ecc69c7..b25224c20 100644
--- a/numpy/_pytesttester.py
+++ b/numpy/_pytesttester.py
@@ -48,10 +48,9 @@ class PytestTester(object):
"""
Pytest test runner.
- This class is made available in ``numpy.testing``, and a test function
- is typically added to a package's __init__.py like so::
+ A test function is typically added to a package's __init__.py like so::
- from numpy.testing import PytestTester
+ from numpy._pytesttester import PytestTester
test = PytestTester(__name__).test
del PytestTester
@@ -68,6 +67,12 @@ class PytestTester(object):
module_name : module name
The name of the module to test.
+ Notes
+ -----
+ Unlike the previous ``nose``-based implementation, this class is not
+ publicly exposed as it performs some ``numpy``-specific warning
+ suppression.
+
"""
def __init__(self, module_name):
self.module_name = module_name
@@ -105,24 +110,18 @@ class PytestTester(object):
Notes
-----
- Each NumPy module exposes `test` in its namespace to run all tests for it.
- For example, to run all tests for numpy.lib:
+ Each NumPy module exposes `test` in its namespace to run all tests for
+ it. For example, to run all tests for numpy.lib:
>>> np.lib.test() #doctest: +SKIP
Examples
--------
>>> result = np.lib.test() #doctest: +SKIP
- Running unit tests for numpy.lib
...
- Ran 976 tests in 3.933s
-
- OK
-
- >>> result.errors #doctest: +SKIP
- []
- >>> result.knownfail #doctest: +SKIP
- []
+ 1023 passed, 2 skipped, 6 deselected, 1 xfailed in 10.39 seconds
+ >>> result
+ True
"""
import pytest
diff --git a/numpy/compat/_inspect.py b/numpy/compat/_inspect.py
index 76bf544a5..439d0d2c2 100644
--- a/numpy/compat/_inspect.py
+++ b/numpy/compat/_inspect.py
@@ -184,9 +184,8 @@ def formatargvalues(args, varargs, varkw, locals,
def convert(name, locals=locals,
formatarg=formatarg, formatvalue=formatvalue):
return formatarg(name) + formatvalue(locals[name])
- specs = []
- for i in range(len(args)):
- specs.append(strseq(args[i], convert, join))
+ specs = [strseq(arg, convert, join) for arg in args]
+
if varargs:
specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
if varkw:
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index ce4543bc3..c9ed9d52c 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -8,17 +8,23 @@ __all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
'integer_types', 'is_pathlib_path', 'npy_load_module', 'Path',
- 'contextlib_nullcontext']
+ 'pickle', 'contextlib_nullcontext', 'os_fspath', 'os_PathLike']
import sys
+import os
try:
- from pathlib import Path
+ from pathlib import Path, PurePath
except ImportError:
- Path = None
+ Path = PurePath = None
if sys.version_info[0] >= 3:
import io
+ try:
+ import pickle5 as pickle
+ except ImportError:
+ import pickle
+
long = int
integer_types = (int,)
basestring = str
@@ -51,8 +57,9 @@ if sys.version_info[0] >= 3:
strchar = 'U'
-
else:
+ import cpickle as pickle
+
bytes = str
long = long
basestring = basestring
@@ -76,7 +83,6 @@ else:
def sixu(s):
return unicode(s, 'unicode_escape')
-
def getexception():
return sys.exc_info()[1]
@@ -95,6 +101,8 @@ def asunicode_nested(x):
def is_pathlib_path(obj):
"""
Check whether obj is a pathlib.Path object.
+
+ Prefer using `isinstance(obj, os_PathLike)` instead of this function.
"""
return Path is not None and isinstance(obj, Path)
@@ -166,7 +174,6 @@ else:
"""
import imp
- import os
if info is None:
path = os.path.dirname(fn)
fo, fn, info = imp.find_module(name, [path])
@@ -177,3 +184,64 @@ else:
finally:
fo.close()
return mod
+
+# backport abc.ABC
+import abc
+if sys.version_info[:2] >= (3, 4):
+ abc_ABC = abc.ABC
+else:
+ abc_ABC = abc.ABCMeta('ABC', (object,), {'__slots__': ()})
+
+
+# Backport os.fs_path, os.PathLike, and PurePath.__fspath__
+if sys.version_info[:2] >= (3, 6):
+ os_fspath = os.fspath
+ os_PathLike = os.PathLike
+else:
+ def _PurePath__fspath__(self):
+ return str(self)
+
+ class os_PathLike(abc_ABC):
+ """Abstract base class for implementing the file system path protocol."""
+
+ @abc.abstractmethod
+ def __fspath__(self):
+ """Return the file system path representation of the object."""
+ raise NotImplementedError
+
+ @classmethod
+ def __subclasshook__(cls, subclass):
+ if PurePath is not None and issubclass(subclass, PurePath):
+ return True
+ return hasattr(subclass, '__fspath__')
+
+
+ def os_fspath(path):
+ """Return the path representation of a path-like object.
+ If str or bytes is passed in, it is returned unchanged. Otherwise the
+ os.PathLike interface is used to get the path representation. If the
+ path representation is not str or bytes, TypeError is raised. If the
+ provided path is not str, bytes, or os.PathLike, TypeError is raised.
+ """
+ if isinstance(path, (str, bytes)):
+ return path
+
+ # Work from the object's type to match method resolution of other magic
+ # methods.
+ path_type = type(path)
+ try:
+ path_repr = path_type.__fspath__(path)
+ except AttributeError:
+ if hasattr(path_type, '__fspath__'):
+ raise
+ elif PurePath is not None and issubclass(path_type, PurePath):
+ return _PurePath__fspath__(path)
+ else:
+ raise TypeError("expected str, bytes or os.PathLike object, "
+ "not " + path_type.__name__)
+ if isinstance(path_repr, (str, bytes)):
+ return path_repr
+ else:
+ raise TypeError("expected {}.__fspath__() to return str or bytes, "
+ "not {}".format(path_type.__name__,
+ type(path_repr).__name__))
diff --git a/numpy/conftest.py b/numpy/conftest.py
index 4d4d055ec..18d5d1ce9 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -13,6 +13,15 @@ _old_fpu_mode = None
_collect_results = {}
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "valgrind_error: Tests that are known to error under valgrind.")
+ config.addinivalue_line("markers",
+ "leaks_references: Tests that are known to leak references.")
+ config.addinivalue_line("markers",
+ "slow: Tests that are very slow.")
+
+
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
diff --git a/numpy/core/__init__.py b/numpy/core/__init__.py
index 80ce84f00..c3b3f0392 100644
--- a/numpy/core/__init__.py
+++ b/numpy/core/__init__.py
@@ -1,11 +1,19 @@
+"""
+Contains the core of NumPy: ndarray, ufuncs, dtypes, etc.
+
+Please note that this module is private. All functions and objects
+are available in the main ``numpy`` namespace - use that instead.
+
+"""
+
from __future__ import division, absolute_import, print_function
-from .info import __doc__
from numpy.version import version as __version__
+import os
+
# disables OpenBLAS affinity setting of the main thread that limits
# python threads or processes to one core
-import os
env_added = []
for envkey in ['OPENBLAS_MAIN_FREE', 'GOTOBLAS_MAIN_FREE']:
if envkey not in os.environ:
@@ -20,17 +28,12 @@ except ImportError as exc:
IMPORTANT: PLEASE READ THIS FOR ADVICE ON HOW TO SOLVE THIS ISSUE!
-Importing the multiarray numpy extension module failed. Most
-likely you are trying to import a failed build of numpy.
-Here is how to proceed:
-- If you're working with a numpy git repository, try `git clean -xdf`
- (removes all files not under version control) and rebuild numpy.
-- If you are simply trying to use the numpy version that you have installed:
- your installation is broken - please reinstall numpy.
-- If you have already reinstalled and that did not fix the problem, then:
- 1. Check that you are using the Python you expect (you're using %s),
+Importing the numpy c-extensions failed.
+- Try uninstalling and reinstalling numpy.
+- If you have already done that, then:
+ 1. Check that you expected to use Python%d.%d from "%s",
and that you have no directories in your PATH or PYTHONPATH that can
- interfere with the Python and numpy versions you're trying to use.
+ interfere with the Python and numpy version "%s" you're trying to use.
2. If (1) looks fine, you can open a new issue at
https://github.com/numpy/numpy/issues. Please include details on:
- how you installed Python
@@ -39,11 +42,15 @@ Here is how to proceed:
- whether or not you have multiple versions of Python installed
- if you built from source, your compiler versions and ideally a build log
- Note: this error has many possible causes, so please don't comment on
- an existing issue about this - open a new one instead.
+- If you're working with a numpy git repository, try `git clean -xdf`
+ (removes all files not under version control) and rebuild numpy.
+
+Note: this error has many possible causes, so please don't comment on
+an existing issue about this - open a new one instead.
Original error was: %s
-""" % (sys.executable, exc)
+""" % (sys.version_info[0], sys.version_info[1], sys.executable,
+ __version__, exc)
raise ImportError(msg)
finally:
for envkey in env_added:
@@ -53,7 +60,19 @@ del env_added
del os
from . import umath
-from . import _internal # for freeze programs
+
+# Check that multiarray,umath are pure python modules wrapping
+# _multiarray_umath and not either of the old c-extension modules
+if not (hasattr(multiarray, '_multiarray_umath') and
+ hasattr(umath, '_multiarray_umath')):
+ import sys
+ path = sys.modules['numpy'].__path__
+ msg = ("Something is wrong with the numpy installation. "
+ "While importing we detected an older version of "
+ "numpy in {}. One method of fixing this is to repeatedly uninstall "
+ "numpy until none is found, then reinstall this version.")
+ raise ImportError(msg.format(path))
+
from . import numerictypes as nt
multiarray.set_typeDict(nt.sctypeDict)
from . import numeric
@@ -83,6 +102,11 @@ from .numeric import absolute as abs
# do this after everything else, to minimize the chance of this misleadingly
# appearing in an import-time traceback
from . import _add_newdocs
+# add these for module-freeze analysis (like PyInstaller)
+from . import _dtype_ctypes
+from . import _internal
+from . import _dtype
+from . import _methods
__all__ = ['char', 'rec', 'memmap']
__all__ += numeric.__all__
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index 1c82cfde4..dbe3d226f 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -10,6 +10,8 @@ NOTE: Many of the methods of ndarray have corresponding functions.
"""
from __future__ import division, absolute_import, print_function
+import sys
+
from numpy.core import numerictypes as _numerictypes
from numpy.core import dtype
from numpy.core.function_base import add_newdoc
@@ -49,7 +51,7 @@ add_newdoc('numpy.core', 'flatiter',
>>> x = np.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> type(fl)
- <type 'numpy.flatiter'>
+ <class 'numpy.flatiter'>
>>> for item in fl:
... print(item)
...
@@ -92,7 +94,7 @@ add_newdoc('numpy.core', 'flatiter', ('coords',
>>> fl = x.flat
>>> fl.coords
(0, 0)
- >>> fl.next()
+ >>> next(fl)
0
>>> fl.coords
(0, 1)
@@ -111,7 +113,7 @@ add_newdoc('numpy.core', 'flatiter', ('index',
>>> fl = x.flat
>>> fl.index
0
- >>> fl.next()
+ >>> next(fl)
0
>>> fl.index
1
@@ -161,62 +163,63 @@ add_newdoc('numpy.core', 'nditer',
----------
op : ndarray or sequence of array_like
The array(s) to iterate over.
+
flags : sequence of str, optional
- Flags to control the behavior of the iterator.
+ Flags to control the behavior of the iterator.
- * "buffered" enables buffering when required.
- * "c_index" causes a C-order index to be tracked.
- * "f_index" causes a Fortran-order index to be tracked.
- * "multi_index" causes a multi-index, or a tuple of indices
+ * ``buffered`` enables buffering when required.
+ * ``c_index`` causes a C-order index to be tracked.
+ * ``f_index`` causes a Fortran-order index to be tracked.
+ * ``multi_index`` causes a multi-index, or a tuple of indices
with one per iteration dimension, to be tracked.
- * "common_dtype" causes all the operands to be converted to
+ * ``common_dtype`` causes all the operands to be converted to
a common data type, with copying or buffering as necessary.
- * "copy_if_overlap" causes the iterator to determine if read
+ * ``copy_if_overlap`` causes the iterator to determine if read
operands have overlap with write operands, and make temporary
copies as necessary to avoid overlap. False positives (needless
copying) are possible in some cases.
- * "delay_bufalloc" delays allocation of the buffers until
- a reset() call is made. Allows "allocate" operands to
+ * ``delay_bufalloc`` delays allocation of the buffers until
+ a reset() call is made. Allows ``allocate`` operands to
be initialized before their values are copied into the buffers.
- * "external_loop" causes the `values` given to be
+ * ``external_loop`` causes the ``values`` given to be
one-dimensional arrays with multiple values instead of
zero-dimensional arrays.
- * "grow_inner" allows the `value` array sizes to be made
- larger than the buffer size when both "buffered" and
- "external_loop" is used.
- * "ranged" allows the iterator to be restricted to a sub-range
+ * ``grow_inner`` allows the ``value`` array sizes to be made
+ larger than the buffer size when both ``buffered`` and
+ ``external_loop`` is used.
+ * ``ranged`` allows the iterator to be restricted to a sub-range
of the iterindex values.
- * "refs_ok" enables iteration of reference types, such as
+ * ``refs_ok`` enables iteration of reference types, such as
object arrays.
- * "reduce_ok" enables iteration of "readwrite" operands
+ * ``reduce_ok`` enables iteration of ``readwrite`` operands
which are broadcasted, also known as reduction operands.
- * "zerosize_ok" allows `itersize` to be zero.
+ * ``zerosize_ok`` allows `itersize` to be zero.
op_flags : list of list of str, optional
- This is a list of flags for each operand. At minimum, one of
- "readonly", "readwrite", or "writeonly" must be specified.
-
- * "readonly" indicates the operand will only be read from.
- * "readwrite" indicates the operand will be read from and written to.
- * "writeonly" indicates the operand will only be written to.
- * "no_broadcast" prevents the operand from being broadcasted.
- * "contig" forces the operand data to be contiguous.
- * "aligned" forces the operand data to be aligned.
- * "nbo" forces the operand data to be in native byte order.
- * "copy" allows a temporary read-only copy if required.
- * "updateifcopy" allows a temporary read-write copy if required.
- * "allocate" causes the array to be allocated if it is None
- in the `op` parameter.
- * "no_subtype" prevents an "allocate" operand from using a subtype.
- * "arraymask" indicates that this operand is the mask to use
+ This is a list of flags for each operand. At minimum, one of
+ ``readonly``, ``readwrite``, or ``writeonly`` must be specified.
+
+ * ``readonly`` indicates the operand will only be read from.
+ * ``readwrite`` indicates the operand will be read from and written to.
+ * ``writeonly`` indicates the operand will only be written to.
+ * ``no_broadcast`` prevents the operand from being broadcasted.
+ * ``contig`` forces the operand data to be contiguous.
+ * ``aligned`` forces the operand data to be aligned.
+ * ``nbo`` forces the operand data to be in native byte order.
+ * ``copy`` allows a temporary read-only copy if required.
+ * ``updateifcopy`` allows a temporary read-write copy if required.
+ * ``allocate`` causes the array to be allocated if it is None
+ in the ``op`` parameter.
+ * ``no_subtype`` prevents an ``allocate`` operand from using a subtype.
+ * ``arraymask`` indicates that this operand is the mask to use
for selecting elements when writing to operands with the
'writemasked' flag set. The iterator does not enforce this,
but when writing from a buffer back to the array, it only
copies those elements indicated by this mask.
- * 'writemasked' indicates that only elements where the chosen
- 'arraymask' operand is True will be written to.
- * "overlap_assume_elementwise" can be used to mark operands that are
+ * ``writemasked`` indicates that only elements where the chosen
+ ``arraymask`` operand is True will be written to.
+ * ``overlap_assume_elementwise`` can be used to mark operands that are
accessed only in the iterator order, to allow less conservative
- copying when "copy_if_overlap" is present.
+ copying when ``copy_if_overlap`` is present.
op_dtypes : dtype or tuple of dtype(s), optional
The required data type(s) of the operands. If copying or buffering
is enabled, the data will be converted to/from their original types.
@@ -225,7 +228,7 @@ add_newdoc('numpy.core', 'nditer',
Fortran order, 'A' means 'F' order if all the arrays are Fortran
contiguous, 'C' order otherwise, and 'K' means as close to the
order the array elements appear in memory as possible. This also
- affects the element memory order of "allocate" operands, as they
+ affects the element memory order of ``allocate`` operands, as they
are allocated to be compatible with iteration order.
Default is 'K'.
casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
@@ -233,20 +236,20 @@ add_newdoc('numpy.core', 'nditer',
or buffering. Setting this to 'unsafe' is not recommended,
as it can adversely affect accumulations.
- * 'no' means the data types should not be cast at all.
- * 'equiv' means only byte-order changes are allowed.
- * 'safe' means only casts which can preserve values are allowed.
- * 'same_kind' means only safe casts or casts within a kind,
- like float64 to float32, are allowed.
- * 'unsafe' means any data conversions may be done.
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
op_axes : list of list of ints, optional
If provided, is a list of ints or None for each operands.
The list of axes for an operand is a mapping from the dimensions
of the iterator to the dimensions of the operand. A value of
-1 can be placed for entries, causing that dimension to be
- treated as "newaxis".
+ treated as `newaxis`.
itershape : tuple of ints, optional
- The desired shape of the iterator. This allows "allocate" operands
+ The desired shape of the iterator. This allows ``allocate`` operands
with a dimension mapped by op_axes not corresponding to a dimension
of a different operand to get a value not equal to 1 for that
dimension.
@@ -263,19 +266,19 @@ add_newdoc('numpy.core', 'nditer',
finished : bool
Whether the iteration over the operands is finished or not.
has_delayed_bufalloc : bool
- If True, the iterator was created with the "delay_bufalloc" flag,
+ If True, the iterator was created with the ``delay_bufalloc`` flag,
and no reset() function was called on it yet.
has_index : bool
- If True, the iterator was created with either the "c_index" or
- the "f_index" flag, and the property `index` can be used to
+ If True, the iterator was created with either the ``c_index`` or
+ the ``f_index`` flag, and the property `index` can be used to
retrieve it.
has_multi_index : bool
- If True, the iterator was created with the "multi_index" flag,
+ If True, the iterator was created with the ``multi_index`` flag,
and the property `multi_index` can be used to retrieve it.
index
- When the "c_index" or "f_index" flag was used, this property
+ When the ``c_index`` or ``f_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
- and `has_index` is False.
+ and ``has_index`` is False.
iterationneedsapi : bool
Whether iteration requires access to the Python API, for example
if one of the operands is an object array.
@@ -288,11 +291,11 @@ add_newdoc('numpy.core', 'nditer',
and optimized iterator access pattern. Valid only before the iterator
is closed.
multi_index
- When the "multi_index" flag was used, this property
+ When the ``multi_index`` flag was used, this property
provides access to the index. Raises a ValueError if accessed
- accessed and `has_multi_index` is False.
+ accessed and ``has_multi_index`` is False.
ndim : int
- The iterator's dimension.
+ The dimensions of the iterator.
nop : int
The number of iterator operands.
operands : tuple of operand(s)
@@ -301,8 +304,8 @@ add_newdoc('numpy.core', 'nditer',
shape : tuple of ints
Shape tuple, the shape of the iterator.
value
- Value of `operands` at current iteration. Normally, this is a
- tuple of array scalars, but if the flag "external_loop" is used,
+ Value of ``operands`` at current iteration. Normally, this is a
+ tuple of array scalars, but if the flag ``external_loop`` is used,
it is a tuple of one dimensional arrays.
Notes
@@ -313,96 +316,93 @@ add_newdoc('numpy.core', 'nditer',
The Python exposure supplies two iteration interfaces, one which follows
the Python iterator protocol, and another which mirrors the C-style
do-while pattern. The native Python approach is better in most cases, but
- if you need the iterator's coordinates or index, use the C-style pattern.
+ if you need the coordinates or index of an iterator, use the C-style pattern.
Examples
--------
Here is how we might write an ``iter_add`` function, using the
- Python iterator protocol::
-
- def iter_add_py(x, y, out=None):
- addop = np.add
- it = np.nditer([x, y, out], [],
- [['readonly'], ['readonly'], ['writeonly','allocate']])
- with it:
- for (a, b, c) in it:
- addop(a, b, out=c)
- return it.operands[2]
-
- Here is the same function, but following the C-style pattern::
-
- def iter_add(x, y, out=None):
- addop = np.add
-
- it = np.nditer([x, y, out], [],
- [['readonly'], ['readonly'], ['writeonly','allocate']])
- with it:
- while not it.finished:
- addop(it[0], it[1], out=it[2])
- it.iternext()
-
- return it.operands[2]
-
- Here is an example outer product function::
-
- def outer_it(x, y, out=None):
- mulop = np.multiply
-
- it = np.nditer([x, y, out], ['external_loop'],
- [['readonly'], ['readonly'], ['writeonly', 'allocate']],
- op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
- [-1] * x.ndim + list(range(y.ndim)),
- None])
- with it:
- for (a, b, c) in it:
- mulop(a, b, out=c)
- return it.operands[2]
-
- >>> a = np.arange(2)+1
- >>> b = np.arange(3)+1
- >>> outer_it(a,b)
- array([[1, 2, 3],
- [2, 4, 6]])
-
- Here is an example function which operates like a "lambda" ufunc::
-
- def luf(lamdaexpr, *args, **kwargs):
- "luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)"
- nargs = len(args)
- op = (kwargs.get('out',None),) + args
- it = np.nditer(op, ['buffered','external_loop'],
- [['writeonly','allocate','no_broadcast']] +
- [['readonly','nbo','aligned']]*nargs,
- order=kwargs.get('order','K'),
- casting=kwargs.get('casting','safe'),
- buffersize=kwargs.get('buffersize',0))
- while not it.finished:
- it[0] = lamdaexpr(*it[1:])
- it.iternext()
- return it.operands[0]
-
- >>> a = np.arange(5)
- >>> b = np.ones(5)
- >>> luf(lambda i,j:i*i + j/2, a, b)
- array([ 0.5, 1.5, 4.5, 9.5, 16.5])
-
- If operand flags `"writeonly"` or `"readwrite"` are used the operands may
- be views into the original data with the `WRITEBACKIFCOPY` flag. In this case
- nditer must be used as a context manager or the nditer.close
- method must be called before using the result. The temporary
- data will be written back to the original data when the `__exit__`
- function is called but not before:
-
- >>> a = np.arange(6, dtype='i4')[::-2]
- >>> with nditer(a, [],
- ... [['writeonly', 'updateifcopy']],
- ... casting='unsafe',
- ... op_dtypes=[np.dtype('f4')]) as i:
- ... x = i.operands[0]
- ... x[:] = [-1, -2, -3]
- ... # a still unchanged here
- >>> a, x
- array([-1, -2, -3]), array([-1, -2, -3])
+ Python iterator protocol:
+
+ >>> def iter_add_py(x, y, out=None):
+ ... addop = np.add
+ ... it = np.nditer([x, y, out], [],
+ ... [['readonly'], ['readonly'], ['writeonly','allocate']])
+ ... with it:
+ ... for (a, b, c) in it:
+ ... addop(a, b, out=c)
+ ... return it.operands[2]
+
+ Here is the same function, but following the C-style pattern:
+
+ >>> def iter_add(x, y, out=None):
+ ... addop = np.add
+ ... it = np.nditer([x, y, out], [],
+ ... [['readonly'], ['readonly'], ['writeonly','allocate']])
+ ... with it:
+ ... while not it.finished:
+ ... addop(it[0], it[1], out=it[2])
+ ... it.iternext()
+ ... return it.operands[2]
+
+ Here is an example outer product function:
+
+ >>> def outer_it(x, y, out=None):
+ ... mulop = np.multiply
+ ... it = np.nditer([x, y, out], ['external_loop'],
+ ... [['readonly'], ['readonly'], ['writeonly', 'allocate']],
+ ... op_axes=[list(range(x.ndim)) + [-1] * y.ndim,
+ ... [-1] * x.ndim + list(range(y.ndim)),
+ ... None])
+ ... with it:
+ ... for (a, b, c) in it:
+ ... mulop(a, b, out=c)
+ ... return it.operands[2]
+
+ >>> a = np.arange(2)+1
+ >>> b = np.arange(3)+1
+ >>> outer_it(a,b)
+ array([[1, 2, 3],
+ [2, 4, 6]])
+
+ Here is an example function which operates like a "lambda" ufunc:
+
+ >>> def luf(lamdaexpr, *args, **kwargs):
+ ... '''luf(lambdaexpr, op1, ..., opn, out=None, order='K', casting='safe', buffersize=0)'''
+ ... nargs = len(args)
+ ... op = (kwargs.get('out',None),) + args
+ ... it = np.nditer(op, ['buffered','external_loop'],
+ ... [['writeonly','allocate','no_broadcast']] +
+ ... [['readonly','nbo','aligned']]*nargs,
+ ... order=kwargs.get('order','K'),
+ ... casting=kwargs.get('casting','safe'),
+ ... buffersize=kwargs.get('buffersize',0))
+ ... while not it.finished:
+ ... it[0] = lamdaexpr(*it[1:])
+ ... it.iternext()
+ ... return it.operands[0]
+
+ >>> a = np.arange(5)
+ >>> b = np.ones(5)
+ >>> luf(lambda i,j:i*i + j/2, a, b)
+ array([ 0.5, 1.5, 4.5, 9.5, 16.5])
+
+ If operand flags `"writeonly"` or `"readwrite"` are used the
+ operands may be views into the original data with the
+ `WRITEBACKIFCOPY` flag. In this case `nditer` must be used as a
+ context manager or the `nditer.close` method must be called before
+ using the result. The temporary data will be written back to the
+ original data when the `__exit__` function is called but not before:
+
+ >>> a = np.arange(6, dtype='i4')[::-2]
+ >>> with np.nditer(a, [],
+ ... [['writeonly', 'updateifcopy']],
+ ... casting='unsafe',
+ ... op_dtypes=[np.dtype('f4')]) as i:
+ ... x = i.operands[0]
+ ... x[:] = [-1, -2, -3]
+ ... # a still unchanged here
+ >>> a, x
+ (array([-1, -2, -3], dtype=int32), array([-1., -2., -3.], dtype=float32))
It is important to note that once the iterator is exited, dangling
references (like `x` in the example) may or may not share data with
@@ -413,6 +413,8 @@ add_newdoc('numpy.core', 'nditer',
`x.data` will still point at some part of `a.data`, and writing to
one will affect the other.
+ Context management and the `close` method appeared in version 1.15.0.
+
""")
# nditer methods
@@ -428,10 +430,10 @@ add_newdoc('numpy.core', 'nditer', ('copy',
>>> x = np.arange(10)
>>> y = x + 1
>>> it = np.nditer([x, y])
- >>> it.next()
+ >>> next(it)
(array(0), array(1))
>>> it2 = it.copy()
- >>> it2.next()
+ >>> next(it2)
(array(1), array(2))
"""))
@@ -544,7 +546,6 @@ add_newdoc('numpy.core', 'nested_iters',
... print(i.multi_index)
... for y in j:
... print('', j.multi_index, y)
-
(0,)
(0, 0) 0
(0, 1) 1
@@ -569,6 +570,8 @@ add_newdoc('numpy.core', 'nditer', ('close',
Resolve all writeback semantics in writeable operands.
+ .. versionadded:: 1.15.0
+
See Also
--------
@@ -617,9 +620,9 @@ add_newdoc('numpy.core', 'broadcast',
>>> out = np.empty(b.shape)
>>> out.flat = [u+v for (u,v) in b]
>>> out
- array([[ 5., 6., 7.],
- [ 6., 7., 8.],
- [ 7., 8., 9.]])
+ array([[5., 6., 7.],
+ [6., 7., 8.],
+ [7., 8., 9.]])
Compare against built-in broadcasting:
@@ -643,7 +646,7 @@ add_newdoc('numpy.core', 'broadcast', ('index',
>>> b = np.broadcast(x, y)
>>> b.index
0
- >>> b.next(), b.next(), b.next()
+ >>> next(b), next(b), next(b)
((1, 4), (1, 5), (1, 6))
>>> b.index
3
@@ -667,7 +670,7 @@ add_newdoc('numpy.core', 'broadcast', ('iters',
>>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> row, col = b.iters
- >>> row.next(), col.next()
+ >>> next(row), next(col)
(1, 4)
"""))
@@ -762,11 +765,11 @@ add_newdoc('numpy.core', 'broadcast', ('reset',
Examples
--------
>>> x = np.array([1, 2, 3])
- >>> y = np.array([[4], [5], [6]]
+ >>> y = np.array([[4], [5], [6]])
>>> b = np.broadcast(x, y)
>>> b.index
0
- >>> b.next(), b.next(), b.next()
+ >>> next(b), next(b), next(b)
((1, 4), (2, 4), (3, 4))
>>> b.index
3
@@ -796,8 +799,7 @@ add_newdoc('numpy.core.multiarray', 'array',
dtype : data-type, optional
The desired data-type for the array. If not given, then the type will
be determined as the minimum type required to hold the objects in the
- sequence. This argument can only be used to 'upcast' the array. For
- downcasting, use the .astype(t) method.
+ sequence.
copy : bool, optional
If true (default), then the object is copied. Otherwise, a copy will
only be made if __array__ returns a copy, if obj is a nested sequence,
@@ -939,74 +941,14 @@ add_newdoc('numpy.core.multiarray', 'empty',
--------
>>> np.empty([2, 2])
array([[ -9.74499359e+001, 6.69583040e-309],
- [ 2.13182611e-314, 3.06959433e-309]]) #random
+ [ 2.13182611e-314, 3.06959433e-309]]) #uninitialized
>>> np.empty([2, 2], dtype=int)
array([[-1073741821, -1067949133],
- [ 496041986, 19249760]]) #random
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'empty_like',
- """
- empty_like(prototype, dtype=None, order='K', subok=True)
-
- Return a new array with the same shape and type as a given array.
-
- Parameters
- ----------
- prototype : array_like
- The shape and data-type of `prototype` define these same attributes
- of the returned array.
- dtype : data-type, optional
- Overrides the data type of the result.
-
- .. versionadded:: 1.6.0
- order : {'C', 'F', 'A', or 'K'}, optional
- Overrides the memory layout of the result. 'C' means C-order,
- 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
- contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
- as closely as possible.
-
- .. versionadded:: 1.6.0
- subok : bool, optional.
- If True, then the newly created array will use the sub-class
- type of 'a', otherwise it will be a base-class array. Defaults
- to True.
-
- Returns
- -------
- out : ndarray
- Array of uninitialized (arbitrary) data with the same
- shape and type as `prototype`.
-
- See Also
- --------
- ones_like : Return an array of ones with shape and type of input.
- zeros_like : Return an array of zeros with shape and type of input.
- full_like : Return a new array with shape of input filled with value.
- empty : Return a new uninitialized array.
-
- Notes
- -----
- This function does *not* initialize the returned array; to do that use
- `zeros_like` or `ones_like` instead. It may be marginally faster than
- the functions that do set the array values.
-
- Examples
- --------
- >>> a = ([1,2,3], [4,5,6]) # a is array-like
- >>> np.empty_like(a)
- array([[-1073741821, -1073741821, 3], #random
- [ 0, 0, -1073741821]])
- >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
- >>> np.empty_like(a)
- array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000],#random
- [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
+ [ 496041986, 19249760]]) #uninitialized
""")
-
add_newdoc('numpy.core.multiarray', 'scalar',
"""
scalar(dtype, obj)
@@ -1104,9 +1046,14 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
elements is also ignored.
.. deprecated:: 1.14
- If this argument is not provided, `fromstring` falls back on the
- behaviour of `frombuffer` after encoding unicode string inputs as
- either utf-8 (python 3), or the default encoding (python 2).
+ Passing ``sep=''``, the default, is deprecated since it will
+ trigger the deprecated binary mode of this function. This mode
+ interprets `string` as binary bytes, rather than ASCII text with
+ decimal numbers, an operation which is better spelt
+ ``frombuffer(string, dtype, count)``. If `string` contains unicode
+ text, the binary mode of `fromstring` will first encode it into
+ bytes using either utf-8 (python 3) or the default encoding
+ (python 2), neither of which produce sane results.
Returns
-------
@@ -1132,6 +1079,43 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
""")
+add_newdoc('numpy.core.multiarray', 'compare_chararrays',
+ """
+ compare_chararrays(a, b, cmp_op, rstrip)
+
+ Performs element-wise comparison of two string arrays using the
+ comparison operator specified by `cmp_op`.
+
+ Parameters
+ ----------
+ a, b : array_like
+ Arrays to be compared.
+ cmp_op : {"<", "<=", "==", ">=", ">", "!="}
+ Type of comparison.
+ rstrip : Boolean
+ If True, the spaces at the end of Strings are removed before the comparison.
+
+ Returns
+ -------
+ out : ndarray
+ The output array of type Boolean with the same shape as a and b.
+
+ Raises
+ ------
+ ValueError
+ If `cmp_op` is not valid.
+ TypeError
+ If at least one of `a` or `b` is a non-string array
+
+ Examples
+ --------
+ >>> a = np.array(["a", "b", "cde"])
+ >>> b = np.array(["a", "a", "dec"])
+ >>> np.compare_chararrays(a, b, ">", True)
+ array([False, True, False])
+
+ """)
+
add_newdoc('numpy.core.multiarray', 'fromiter',
"""
fromiter(iterable, dtype, count=-1)
@@ -1168,7 +1152,7 @@ add_newdoc('numpy.core.multiarray', 'fromiter',
add_newdoc('numpy.core.multiarray', 'fromfile',
"""
- fromfile(file, dtype=float, count=-1, sep='')
+ fromfile(file, dtype=float, count=-1, sep='', offset=0)
Construct an array from data in a text or binary file.
@@ -1178,8 +1162,12 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Parameters
----------
- file : file or str
+ file : file or str or Path
Open file object or filename.
+
+ .. versionchanged:: 1.17.0
+ `pathlib.Path` objects are now accepted.
+
dtype : data-type
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
@@ -1193,6 +1181,11 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Spaces (" ") in the separator match zero or more whitespace characters.
A separator consisting only of spaces must match at least one
whitespace.
+ offset : int
+ The offset (in bytes) from the file's current position. Defaults to 0.
+ Only permitted for binary files.
+
+ .. versionadded:: 1.17.0
See also
--------
@@ -1212,32 +1205,32 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
--------
Construct an ndarray:
- >>> dt = np.dtype([('time', [('min', int), ('sec', int)]),
+ >>> dt = np.dtype([('time', [('min', np.int64), ('sec', np.int64)]),
... ('temp', float)])
>>> x = np.zeros((1,), dtype=dt)
>>> x['time']['min'] = 10; x['temp'] = 98.25
>>> x
array([((10, 0), 98.25)],
- dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
+ dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
Save the raw data to disk:
- >>> import os
- >>> fname = os.tmpnam()
+ >>> import tempfile
+ >>> fname = tempfile.mkstemp()[1]
>>> x.tofile(fname)
Read the raw data from disk:
>>> np.fromfile(fname, dtype=dt)
array([((10, 0), 98.25)],
- dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
+ dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
The recommended way to store and load data:
>>> np.save(fname, x)
>>> np.load(fname + '.npy')
array([((10, 0), 98.25)],
- dtype=[('time', [('min', '<i4'), ('sec', '<i4')]), ('temp', '<f8')])
+ dtype=[('time', [('min', '<i8'), ('sec', '<i8')]), ('temp', '<f8')])
""")
@@ -1265,17 +1258,16 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
>>> dt = np.dtype(int)
>>> dt = dt.newbyteorder('>')
- >>> np.frombuffer(buf, dtype=dt)
+ >>> np.frombuffer(buf, dtype=dt) # doctest: +SKIP
The data of the resulting array will not be byteswapped, but will be
interpreted correctly.
Examples
--------
- >>> s = 'hello world'
+ >>> s = b'hello world'
>>> np.frombuffer(s, dtype='S1', count=5, offset=6)
- array(['w', 'o', 'r', 'l', 'd'],
- dtype='|S1')
+ array([b'w', b'o', b'r', b'l', b'd'], dtype='|S1')
>>> np.frombuffer(b'\\x01\\x02', dtype=np.uint8)
array([1, 2], dtype=uint8)
@@ -1284,163 +1276,6 @@ add_newdoc('numpy.core.multiarray', 'frombuffer',
""")
-add_newdoc('numpy.core.multiarray', 'concatenate',
- """
- concatenate((a1, a2, ...), axis=0, out=None)
-
- Join a sequence of arrays along an existing axis.
-
- Parameters
- ----------
- a1, a2, ... : sequence of array_like
- The arrays must have the same shape, except in the dimension
- corresponding to `axis` (the first, by default).
- axis : int, optional
- The axis along which the arrays will be joined. If axis is None,
- arrays are flattened before use. Default is 0.
- out : ndarray, optional
- If provided, the destination to place the result. The shape must be
- correct, matching that of what concatenate would have returned if no
- out argument were specified.
-
- Returns
- -------
- res : ndarray
- The concatenated array.
-
- See Also
- --------
- ma.concatenate : Concatenate function that preserves input masks.
- array_split : Split an array into multiple sub-arrays of equal or
- near-equal size.
- split : Split array into a list of multiple sub-arrays of equal size.
- hsplit : Split array into multiple sub-arrays horizontally (column wise)
- vsplit : Split array into multiple sub-arrays vertically (row wise)
- dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
- stack : Stack a sequence of arrays along a new axis.
- hstack : Stack arrays in sequence horizontally (column wise)
- vstack : Stack arrays in sequence vertically (row wise)
- dstack : Stack arrays in sequence depth wise (along third dimension)
- block : Assemble arrays from blocks.
-
- Notes
- -----
- When one or more of the arrays to be concatenated is a MaskedArray,
- this function will return a MaskedArray object instead of an ndarray,
- but the input masks are *not* preserved. In cases where a MaskedArray
- is expected as input, use the ma.concatenate function from the masked
- array module instead.
-
- Examples
- --------
- >>> a = np.array([[1, 2], [3, 4]])
- >>> b = np.array([[5, 6]])
- >>> np.concatenate((a, b), axis=0)
- array([[1, 2],
- [3, 4],
- [5, 6]])
- >>> np.concatenate((a, b.T), axis=1)
- array([[1, 2, 5],
- [3, 4, 6]])
- >>> np.concatenate((a, b), axis=None)
- array([1, 2, 3, 4, 5, 6])
-
- This function will not preserve masking of MaskedArray inputs.
-
- >>> a = np.ma.arange(3)
- >>> a[1] = np.ma.masked
- >>> b = np.arange(2, 5)
- >>> a
- masked_array(data=[0, --, 2],
- mask=[False, True, False],
- fill_value=999999)
- >>> b
- array([2, 3, 4])
- >>> np.concatenate([a, b])
- masked_array(data=[0, 1, 2, 2, 3, 4],
- mask=False,
- fill_value=999999)
- >>> np.ma.concatenate([a, b])
- masked_array(data=[0, --, 2, 2, 3, 4],
- mask=[False, True, False, False, False, False],
- fill_value=999999)
-
- """)
-
-add_newdoc('numpy.core', 'inner',
- """
- inner(a, b)
-
- Inner product of two arrays.
-
- Ordinary inner product of vectors for 1-D arrays (without complex
- conjugation), in higher dimensions a sum product over the last axes.
-
- Parameters
- ----------
- a, b : array_like
- If `a` and `b` are nonscalar, their last dimensions must match.
-
- Returns
- -------
- out : ndarray
- `out.shape = a.shape[:-1] + b.shape[:-1]`
-
- Raises
- ------
- ValueError
- If the last dimension of `a` and `b` has different size.
-
- See Also
- --------
- tensordot : Sum products over arbitrary axes.
- dot : Generalised matrix product, using second last dimension of `b`.
- einsum : Einstein summation convention.
-
- Notes
- -----
- For vectors (1-D arrays) it computes the ordinary inner-product::
-
- np.inner(a, b) = sum(a[:]*b[:])
-
- More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
-
- np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
-
- or explicitly::
-
- np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
- = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
-
- In addition `a` or `b` may be scalars, in which case::
-
- np.inner(a,b) = a*b
-
- Examples
- --------
- Ordinary inner product for vectors:
-
- >>> a = np.array([1,2,3])
- >>> b = np.array([0,1,0])
- >>> np.inner(a, b)
- 2
-
- A multidimensional example:
-
- >>> a = np.arange(24).reshape((2,3,4))
- >>> b = np.arange(4)
- >>> np.inner(a, b)
- array([[ 14, 38, 62],
- [ 86, 110, 134]])
-
- An example where `b` is a scalar:
-
- >>> np.inner(np.eye(2), 7)
- array([[ 7., 0.],
- [ 0., 7.]])
-
- """)
-
add_newdoc('numpy.core', 'fastCopyAndTranspose',
"""_fastCopyAndTranspose(a)""")
@@ -1511,7 +1346,7 @@ add_newdoc('numpy.core.multiarray', 'arange',
add_newdoc('numpy.core.multiarray', '_get_ndarray_c_version',
"""_get_ndarray_c_version()
- Return the compile time NDARRAY_VERSION number.
+ Return the compile time NPY_VERSION (formerly called NDARRAY_VERSION) number.
""")
@@ -1537,6 +1372,12 @@ add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
Set numerical operators for array objects.
+ .. deprecated:: 1.16
+
+ For the general case, use :c:func:`PyUFunc_ReplaceLoopBySignature`.
+ For ndarray subclasses, define the ``__array_ufunc__`` method and
+ override the relevant ufunc.
+
Parameters
----------
op1, op2, ... : callable
@@ -1575,263 +1416,6 @@ add_newdoc('numpy.core.multiarray', 'set_numeric_ops',
""")
-add_newdoc('numpy.core.multiarray', 'where',
- """
- where(condition, [x, y])
-
- Return elements chosen from `x` or `y` depending on `condition`.
-
- .. note::
- When only `condition` is provided, this function is a shorthand for
- ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
- preferred, as it behaves correctly for subclasses. The rest of this
- documentation covers only the case where all three arguments are
- provided.
-
- Parameters
- ----------
- condition : array_like, bool
- Where True, yield `x`, otherwise yield `y`.
- x, y : array_like
- Values from which to choose. `x`, `y` and `condition` need to be
- broadcastable to some shape.
-
- Returns
- -------
- out : ndarray
- An array with elements from `x` where `condition` is True, and elements
- from `y` elsewhere.
-
- See Also
- --------
- choose
- nonzero : The function that is called when x and y are omitted
-
- Notes
- -----
- If all the arrays are 1-D, `where` is equivalent to::
-
- [xv if c else yv
- for c, xv, yv in zip(condition, x, y)]
-
- Examples
- --------
- >>> a = np.arange(10)
- >>> a
- array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
- >>> np.where(a < 5, a, 10*a)
- array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
-
- This can be used on multidimensional arrays too:
-
- >>> np.where([[True, False], [True, True]],
- ... [[1, 2], [3, 4]],
- ... [[9, 8], [7, 6]])
- array([[1, 8],
- [3, 4]])
-
- The shapes of x, y, and the condition are broadcast together:
-
- >>> x, y = np.ogrid[:3, :4]
- >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
- array([[10, 0, 0, 0],
- [10, 11, 1, 1],
- [10, 11, 12, 2]])
-
- >>> a = np.array([[0, 1, 2],
- ... [0, 2, 4],
- ... [0, 3, 6]])
- >>> np.where(a < 4, a, -1) # -1 is broadcast
- array([[ 0, 1, 2],
- [ 0, 2, -1],
- [ 0, 3, -1]])
- """)
-
-
-add_newdoc('numpy.core.multiarray', 'lexsort',
- """
- lexsort(keys, axis=-1)
-
- Perform an indirect stable sort using a sequence of keys.
-
- Given multiple sorting keys, which can be interpreted as columns in a
- spreadsheet, lexsort returns an array of integer indices that describes
- the sort order by multiple columns. The last key in the sequence is used
- for the primary sort order, the second-to-last key for the secondary sort
- order, and so on. The keys argument must be a sequence of objects that
- can be converted to arrays of the same shape. If a 2D array is provided
- for the keys argument, it's rows are interpreted as the sorting keys and
- sorting is according to the last row, second last row etc.
-
- Parameters
- ----------
- keys : (k, N) array or tuple containing k (N,)-shaped sequences
- The `k` different "columns" to be sorted. The last column (or row if
- `keys` is a 2D array) is the primary sort key.
- axis : int, optional
- Axis to be indirectly sorted. By default, sort over the last axis.
-
- Returns
- -------
- indices : (N,) ndarray of ints
- Array of indices that sort the keys along the specified axis.
-
- See Also
- --------
- argsort : Indirect sort.
- ndarray.sort : In-place sort.
- sort : Return a sorted copy of an array.
-
- Examples
- --------
- Sort names: first by surname, then by name.
-
- >>> surnames = ('Hertz', 'Galilei', 'Hertz')
- >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
- >>> ind = np.lexsort((first_names, surnames))
- >>> ind
- array([1, 2, 0])
-
- >>> [surnames[i] + ", " + first_names[i] for i in ind]
- ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
-
- Sort two columns of numbers:
-
- >>> a = [1,5,1,4,3,4,4] # First column
- >>> b = [9,4,0,4,0,2,1] # Second column
- >>> ind = np.lexsort((b,a)) # Sort by a, then by b
- >>> print(ind)
- [2 0 4 6 5 3 1]
-
- >>> [(a[i],b[i]) for i in ind]
- [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
-
- Note that sorting is first according to the elements of ``a``.
- Secondary sorting is according to the elements of ``b``.
-
- A normal ``argsort`` would have yielded:
-
- >>> [(a[i],b[i]) for i in np.argsort(a)]
- [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
-
- Structured arrays are sorted lexically by ``argsort``:
-
- >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
- ... dtype=np.dtype([('x', int), ('y', int)]))
-
- >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
- array([2, 0, 4, 6, 5, 3, 1])
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'can_cast',
- """
- can_cast(from_, to, casting='safe')
-
- Returns True if cast between data types can occur according to the
- casting rule. If from is a scalar or array scalar, also returns
- True if the scalar value can be cast without overflow or truncation
- to an integer.
-
- Parameters
- ----------
- from_ : dtype, dtype specifier, scalar, or array
- Data type, scalar, or array to cast from.
- to : dtype or dtype specifier
- Data type to cast to.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- Controls what kind of data casting may occur.
-
- * 'no' means the data types should not be cast at all.
- * 'equiv' means only byte-order changes are allowed.
- * 'safe' means only casts which can preserve values are allowed.
- * 'same_kind' means only safe casts or casts within a kind,
- like float64 to float32, are allowed.
- * 'unsafe' means any data conversions may be done.
-
- Returns
- -------
- out : bool
- True if cast can occur according to the casting rule.
-
- Notes
- -----
- Starting in NumPy 1.9, can_cast function now returns False in 'safe'
- casting mode for integer/float dtype and string dtype if the string dtype
- length is not long enough to store the max integer/float value converted
- to a string. Previously can_cast in 'safe' mode returned True for
- integer/float dtype and a string dtype of any length.
-
- See also
- --------
- dtype, result_type
-
- Examples
- --------
- Basic examples
-
- >>> np.can_cast(np.int32, np.int64)
- True
- >>> np.can_cast(np.float64, complex)
- True
- >>> np.can_cast(complex, float)
- False
-
- >>> np.can_cast('i8', 'f8')
- True
- >>> np.can_cast('i8', 'f4')
- False
- >>> np.can_cast('i4', 'S4')
- False
-
- Casting scalars
-
- >>> np.can_cast(100, 'i1')
- True
- >>> np.can_cast(150, 'i1')
- False
- >>> np.can_cast(150, 'u1')
- True
-
- >>> np.can_cast(3.5e100, np.float32)
- False
- >>> np.can_cast(1000.0, np.float32)
- True
-
- Array scalar checks the value, array does not
-
- >>> np.can_cast(np.array(1000.0), np.float32)
- True
- >>> np.can_cast(np.array([1000.0]), np.float32)
- False
-
- Using the casting rules
-
- >>> np.can_cast('i8', 'i8', 'no')
- True
- >>> np.can_cast('<i8', '>i8', 'no')
- False
-
- >>> np.can_cast('<i8', '>i8', 'equiv')
- True
- >>> np.can_cast('<i4', '>i8', 'equiv')
- False
-
- >>> np.can_cast('<i4', '>i8', 'safe')
- True
- >>> np.can_cast('<i8', '>i4', 'safe')
- False
-
- >>> np.can_cast('<i8', '>i4', 'same_kind')
- True
- >>> np.can_cast('<i8', '>u4', 'same_kind')
- False
-
- >>> np.can_cast('<i8', '>u4', 'unsafe')
- True
-
- """)
-
add_newdoc('numpy.core.multiarray', 'promote_types',
"""
promote_types(type1, type2)
@@ -1892,443 +1476,64 @@ add_newdoc('numpy.core.multiarray', 'promote_types',
""")
-add_newdoc('numpy.core.multiarray', 'min_scalar_type',
- """
- min_scalar_type(a)
+if sys.version_info.major < 3:
+ add_newdoc('numpy.core.multiarray', 'newbuffer',
+ """
+ newbuffer(size)
- For scalar ``a``, returns the data type with the smallest size
- and smallest scalar kind which can hold its value. For non-scalar
- array ``a``, returns the vector's dtype unmodified.
+ Return a new uninitialized buffer object.
- Floating point values are not demoted to integers,
- and complex values are not demoted to floats.
+ Parameters
+ ----------
+ size : int
+ Size in bytes of returned buffer object.
- Parameters
- ----------
- a : scalar or array_like
- The value whose minimal data type is to be found.
+ Returns
+ -------
+ newbuffer : buffer object
+ Returned, uninitialized buffer object of `size` bytes.
- Returns
- -------
- out : dtype
- The minimal data type.
+ """)
- Notes
- -----
- .. versionadded:: 1.6.0
+ add_newdoc('numpy.core.multiarray', 'getbuffer',
+ """
+ getbuffer(obj [,offset[, size]])
- See Also
- --------
- result_type, promote_types, dtype, can_cast
-
- Examples
- --------
- >>> np.min_scalar_type(10)
- dtype('uint8')
-
- >>> np.min_scalar_type(-260)
- dtype('int16')
-
- >>> np.min_scalar_type(3.1)
- dtype('float16')
-
- >>> np.min_scalar_type(1e50)
- dtype('float64')
-
- >>> np.min_scalar_type(np.arange(4,dtype='f8'))
- dtype('float64')
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'result_type',
- """
- result_type(*arrays_and_dtypes)
-
- Returns the type that results from applying the NumPy
- type promotion rules to the arguments.
-
- Type promotion in NumPy works similarly to the rules in languages
- like C++, with some slight differences. When both scalars and
- arrays are used, the array's type takes precedence and the actual value
- of the scalar is taken into account.
-
- For example, calculating 3*a, where a is an array of 32-bit floats,
- intuitively should result in a 32-bit float output. If the 3 is a
- 32-bit integer, the NumPy rules indicate it can't convert losslessly
- into a 32-bit float, so a 64-bit float should be the result type.
- By examining the value of the constant, '3', we see that it fits in
- an 8-bit integer, which can be cast losslessly into the 32-bit float.
-
- Parameters
- ----------
- arrays_and_dtypes : list of arrays and dtypes
- The operands of some operation whose result type is needed.
-
- Returns
- -------
- out : dtype
- The result type.
-
- See also
- --------
- dtype, promote_types, min_scalar_type, can_cast
-
- Notes
- -----
- .. versionadded:: 1.6.0
-
- The specific algorithm used is as follows.
-
- Categories are determined by first checking which of boolean,
- integer (int/uint), or floating point (float/complex) the maximum
- kind of all the arrays and the scalars are.
-
- If there are only scalars or the maximum category of the scalars
- is higher than the maximum category of the arrays,
- the data types are combined with :func:`promote_types`
- to produce the return value.
-
- Otherwise, `min_scalar_type` is called on each array, and
- the resulting data types are all combined with :func:`promote_types`
- to produce the return value.
-
- The set of int values is not a subset of the uint values for types
- with the same number of bits, something not reflected in
- :func:`min_scalar_type`, but handled as a special case in `result_type`.
-
- Examples
- --------
- >>> np.result_type(3, np.arange(7, dtype='i1'))
- dtype('int8')
-
- >>> np.result_type('i4', 'c8')
- dtype('complex128')
-
- >>> np.result_type(3.0, -2)
- dtype('float64')
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'newbuffer',
- """
- newbuffer(size)
-
- Return a new uninitialized buffer object.
-
- Parameters
- ----------
- size : int
- Size in bytes of returned buffer object.
-
- Returns
- -------
- newbuffer : buffer object
- Returned, uninitialized buffer object of `size` bytes.
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'getbuffer',
- """
- getbuffer(obj [,offset[, size]])
-
- Create a buffer object from the given object referencing a slice of
- length size starting at offset.
-
- Default is the entire buffer. A read-write buffer is attempted followed
- by a read-only buffer.
-
- Parameters
- ----------
- obj : object
-
- offset : int, optional
-
- size : int, optional
-
- Returns
- -------
- buffer_obj : buffer
-
- Examples
- --------
- >>> buf = np.getbuffer(np.ones(5), 1, 3)
- >>> len(buf)
- 3
- >>> buf[0]
- '\\x00'
- >>> buf
- <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
-
- """)
-
-add_newdoc('numpy.core', 'dot',
- """
- dot(a, b, out=None)
-
- Dot product of two arrays. Specifically,
-
- - If both `a` and `b` are 1-D arrays, it is inner product of vectors
- (without complex conjugation).
-
- - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
- but using :func:`matmul` or ``a @ b`` is preferred.
-
- - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
- and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
-
- - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
- the last axis of `a` and `b`.
-
- - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
- sum product over the last axis of `a` and the second-to-last axis of `b`::
-
- dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
-
- Parameters
- ----------
- a : array_like
- First argument.
- b : array_like
- Second argument.
- out : ndarray, optional
- Output argument. This must have the exact kind that would be returned
- if it was not used. In particular, it must have the right type, must be
- C-contiguous, and its dtype must be the dtype that would be returned
- for `dot(a,b)`. This is a performance feature. Therefore, if these
- conditions are not met, an exception is raised, instead of attempting
- to be flexible.
-
- Returns
- -------
- output : ndarray
- Returns the dot product of `a` and `b`. If `a` and `b` are both
- scalars or both 1-D arrays then a scalar is returned; otherwise
- an array is returned.
- If `out` is given, then it is returned.
-
- Raises
- ------
- ValueError
- If the last dimension of `a` is not the same size as
- the second-to-last dimension of `b`.
-
- See Also
- --------
- vdot : Complex-conjugating dot product.
- tensordot : Sum products over arbitrary axes.
- einsum : Einstein summation convention.
- matmul : '@' operator as method with out parameter.
-
- Examples
- --------
- >>> np.dot(3, 4)
- 12
-
- Neither argument is complex-conjugated:
-
- >>> np.dot([2j, 3j], [2j, 3j])
- (-13+0j)
-
- For 2-D arrays it is the matrix product:
-
- >>> a = [[1, 0], [0, 1]]
- >>> b = [[4, 1], [2, 2]]
- >>> np.dot(a, b)
- array([[4, 1],
- [2, 2]])
-
- >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
- >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
- >>> np.dot(a, b)[2,3,2,1,2,2]
- 499128
- >>> sum(a[2,3,2,:] * b[1,2,:,2])
- 499128
-
- """)
-
-add_newdoc('numpy.core', 'matmul',
- """
- matmul(a, b, out=None)
-
- Matrix product of two arrays.
-
- The behavior depends on the arguments in the following way.
+ Create a buffer object from the given object referencing a slice of
+ length size starting at offset.
- - If both arguments are 2-D they are multiplied like conventional
- matrices.
- - If either argument is N-D, N > 2, it is treated as a stack of
- matrices residing in the last two indexes and broadcast accordingly.
- - If the first argument is 1-D, it is promoted to a matrix by
- prepending a 1 to its dimensions. After matrix multiplication
- the prepended 1 is removed.
- - If the second argument is 1-D, it is promoted to a matrix by
- appending a 1 to its dimensions. After matrix multiplication
- the appended 1 is removed.
+ Default is the entire buffer. A read-write buffer is attempted followed
+ by a read-only buffer.
- Multiplication by a scalar is not allowed, use ``*`` instead. Note that
- multiplying a stack of matrices with a vector will result in a stack of
- vectors, but matmul will not recognize it as such.
+ Parameters
+ ----------
+ obj : object
- ``matmul`` differs from ``dot`` in two important ways.
+ offset : int, optional
- - Multiplication by scalars is not allowed.
- - Stacks of matrices are broadcast together as if the matrices
- were elements.
+ size : int, optional
- .. warning::
- This function is preliminary and included in NumPy 1.10.0 for testing
- and documentation. Its semantics will not change, but the number and
- order of the optional arguments will.
+ Returns
+ -------
+ buffer_obj : buffer
- .. versionadded:: 1.10.0
+ Examples
+ --------
+ >>> buf = np.getbuffer(np.ones(5), 1, 3)
+ >>> len(buf)
+ 3
+ >>> buf[0]
+ '\\x00'
+ >>> buf
+ <read-write buffer for 0x8af1e70, size 3, offset 1 at 0x8ba4ec0>
- Parameters
- ----------
- a : array_like
- First argument.
- b : array_like
- Second argument.
- out : ndarray, optional
- Output argument. This must have the exact kind that would be returned
- if it was not used. In particular, it must have the right type, must be
- C-contiguous, and its dtype must be the dtype that would be returned
- for `dot(a,b)`. This is a performance feature. Therefore, if these
- conditions are not met, an exception is raised, instead of attempting
- to be flexible.
-
- Returns
- -------
- output : ndarray
- Returns the dot product of `a` and `b`. If `a` and `b` are both
- 1-D arrays then a scalar is returned; otherwise an array is
- returned. If `out` is given, then it is returned.
-
- Raises
- ------
- ValueError
- If the last dimension of `a` is not the same size as
- the second-to-last dimension of `b`.
-
- If scalar value is passed.
-
- See Also
- --------
- vdot : Complex-conjugating dot product.
- tensordot : Sum products over arbitrary axes.
- einsum : Einstein summation convention.
- dot : alternative matrix product with different broadcasting rules.
-
- Notes
- -----
- The matmul function implements the semantics of the `@` operator introduced
- in Python 3.5 following PEP465.
-
- Examples
- --------
- For 2-D arrays it is the matrix product:
-
- >>> a = [[1, 0], [0, 1]]
- >>> b = [[4, 1], [2, 2]]
- >>> np.matmul(a, b)
- array([[4, 1],
- [2, 2]])
-
- For 2-D mixed with 1-D, the result is the usual.
-
- >>> a = [[1, 0], [0, 1]]
- >>> b = [1, 2]
- >>> np.matmul(a, b)
- array([1, 2])
- >>> np.matmul(b, a)
- array([1, 2])
-
-
- Broadcasting is conventional for stacks of arrays
-
- >>> a = np.arange(2*2*4).reshape((2,2,4))
- >>> b = np.arange(2*2*4).reshape((2,4,2))
- >>> np.matmul(a,b).shape
- (2, 2, 2)
- >>> np.matmul(a,b)[0,1,1]
- 98
- >>> sum(a[0,1,:] * b[0,:,1])
- 98
-
- Vector, vector returns the scalar inner product, but neither argument
- is complex-conjugated:
-
- >>> np.matmul([2j, 3j], [2j, 3j])
- (-13+0j)
-
- Scalar multiplication raises an error.
-
- >>> np.matmul([1,2], 3)
- Traceback (most recent call last):
- ...
- ValueError: Scalar operands are not allowed, use '*' instead
-
- """)
-
-add_newdoc('numpy.core', 'vdot',
- """
- vdot(a, b)
-
- Return the dot product of two vectors.
-
- The vdot(`a`, `b`) function handles complex numbers differently than
- dot(`a`, `b`). If the first argument is complex the complex conjugate
- of the first argument is used for the calculation of the dot product.
-
- Note that `vdot` handles multidimensional arrays differently than `dot`:
- it does *not* perform a matrix product, but flattens input arguments
- to 1-D vectors first. Consequently, it should only be used for vectors.
-
- Parameters
- ----------
- a : array_like
- If `a` is complex the complex conjugate is taken before calculation
- of the dot product.
- b : array_like
- Second argument to the dot product.
-
- Returns
- -------
- output : ndarray
- Dot product of `a` and `b`. Can be an int, float, or
- complex depending on the types of `a` and `b`.
-
- See Also
- --------
- dot : Return the dot product without using the complex conjugate of the
- first argument.
-
- Examples
- --------
- >>> a = np.array([1+2j,3+4j])
- >>> b = np.array([5+6j,7+8j])
- >>> np.vdot(a, b)
- (70-8j)
- >>> np.vdot(b, a)
- (70+8j)
-
- Note that higher-dimensional arrays are flattened!
-
- >>> a = np.array([[1, 4], [5, 6]])
- >>> b = np.array([[4, 1], [2, 2]])
- >>> np.vdot(a, b)
- 30
- >>> np.vdot(b, a)
- 30
- >>> 1*4 + 4*1 + 5*2 + 6*2
- 30
-
- """)
+ """)
add_newdoc('numpy.core.multiarray', 'c_einsum',
"""
c_einsum(subscripts, *operands, out=None, dtype=None, order='K',
casting='safe')
-
+
*This documentation shadows that of the native python implementation of the `einsum` function,
except all references and examples related to the `optimize` argument (v 0.12.0) have been removed.*
@@ -2752,8 +1957,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray',
First mode, `buffer` is None:
>>> np.ndarray(shape=(2,2), dtype=float, order='F')
- array([[ -1.13698227e+002, 4.25087011e-303],
- [ 2.88528414e-306, 3.27025015e-309]]) #random
+ array([[0.0e+000, 0.0e+000], # random
+ [ nan, 2.5e-323]])
Second mode:
@@ -2788,13 +1993,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('__array_struct__',
"""Array protocol: C-struct side."""))
-add_newdoc('numpy.core.multiarray', 'ndarray', ('_as_parameter_',
- """Allow the array to be interpreted as a ctypes object by returning the
- data-memory location as an integer
-
- """))
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('base',
"""
Base object if memory is from some other object.
@@ -2844,32 +2042,30 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('ctypes',
-----
Below are the public attributes of this object which were documented
in "Guide to NumPy" (we have omitted undocumented public attributes,
- as well as documented private attributes):
+ as well as documented private attributes):
.. autoattribute:: numpy.core._internal._ctypes.data
+ :noindex:
.. autoattribute:: numpy.core._internal._ctypes.shape
+ :noindex:
.. autoattribute:: numpy.core._internal._ctypes.strides
+ :noindex:
.. automethod:: numpy.core._internal._ctypes.data_as
+ :noindex:
.. automethod:: numpy.core._internal._ctypes.shape_as
+ :noindex:
.. automethod:: numpy.core._internal._ctypes.strides_as
-
- Be careful using the ctypes attribute - especially on temporary
- arrays or arrays constructed on the fly. For example, calling
- ``(a+b).ctypes.data_as(ctypes.c_void_p)`` returns a pointer to memory
- that is invalid because the array created as (a+b) is deallocated
- before the next Python statement. You can avoid this problem using
- either ``c=a+b`` or ``ct=(a+b).ctypes``. In the latter case, ct will
- hold a reference to the array until ct is deleted or re-assigned.
+ :noindex:
If the ctypes module is not available, then the ctypes attribute
of array objects still returns something useful, but ctypes objects
are not returned and errors may be raised instead. In particular,
- the object will still have the as parameter attribute which will
+ the object will still have the ``as_parameter`` attribute which will
return an integer equal to the data attribute.
Examples
@@ -3067,7 +2263,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('flat',
>>> x.T.flat[3]
5
>>> type(x.flat)
- <type 'numpy.flatiter'>
+ <class 'numpy.flatiter'>
An assignment example:
@@ -3186,7 +2382,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('size',
Notes
-----
- `a.size` returns a standard arbitrary precision Python integer. This
+ `a.size` returns a standard arbitrary precision Python integer. This
may not be the case with other methods of obtaining the same value
(like the suggested ``np.prod(a.shape)``, which returns an instance
of ``np.int_``), and may be relevant if the value is used further in
@@ -3267,8 +2463,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('strides',
add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
"""
- Same as self.transpose(), except that self is returned if
- self.ndim < 2.
+ The transposed array.
+
+ Same as ``self.transpose()``.
Examples
--------
@@ -3285,6 +2482,10 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('T',
>>> x.T
array([ 1., 2., 3., 4.])
+ See Also
+ --------
+ transpose
+
"""))
@@ -3426,7 +2627,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('argmin',
add_newdoc('numpy.core.multiarray', 'ndarray', ('argsort',
"""
- a.argsort(axis=-1, kind='quicksort', order=None)
+ a.argsort(axis=-1, kind=None, order=None)
Returns the indices that would sort this array.
@@ -3502,10 +2703,15 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
Notes
-----
- Starting in NumPy 1.9, astype method now returns an error if the string
- dtype to cast to is not long enough in 'safe' casting mode to hold the max
- value of integer/float array that is being casted. Previously the casting
- was allowed even if the result was truncated.
+ .. versionchanged:: 1.17.0
+ Casting between a simple data type and a structured one is possible only
+ for "unsafe" casting. Casting to multiple fields is allowed, but
+ casting from multiple fields is not.
+
+ .. versionchanged:: 1.9.0
+ Casting from numeric to string types in 'safe' casting mode requires
+ that the string dtype length is long enough to store the max
+ integer/float value converted.
Raises
------
@@ -3517,7 +2723,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('astype',
--------
>>> x = np.array([1, 2, 2.5])
>>> x
- array([ 1. , 2. , 2.5])
+ array([1. , 2. , 2.5])
>>> x.astype(int)
array([1, 2, 2])
@@ -3533,6 +2739,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
Toggle between low-endian and big-endian data representation by
returning a byteswapped array, optionally swapped in-place.
+ Arrays of byte-strings are not swapped. The real and imaginary
+ parts of a complex number are swapped individually.
Parameters
----------
@@ -3548,19 +2756,31 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('byteswap',
Examples
--------
>>> A = np.array([1, 256, 8755], dtype=np.int16)
- >>> map(hex, A)
+ >>> list(map(hex, A))
['0x1', '0x100', '0x2233']
>>> A.byteswap(inplace=True)
array([ 256, 1, 13090], dtype=int16)
- >>> map(hex, A)
+ >>> list(map(hex, A))
['0x100', '0x1', '0x3322']
- Arrays of strings are not swapped
+ Arrays of byte-strings are not swapped
- >>> A = np.array(['ceg', 'fac'])
+ >>> A = np.array([b'ceg', b'fac'])
>>> A.byteswap()
- array(['ceg', 'fac'],
- dtype='|S3')
+ array([b'ceg', b'fac'], dtype='|S3')
+
+ ``A.newbyteorder().byteswap()`` produces an array with the same values
+ but different representation in memory
+
+ >>> A = np.array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0,
+ 0, 0], dtype=uint8)
+ >>> A.newbyteorder().byteswap(inplace=True)
+ array([1, 2, 3])
+ >>> A.view(np.uint8)
+ array([0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0,
+ 0, 3], dtype=uint8)
"""))
@@ -3582,7 +2802,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('choose',
add_newdoc('numpy.core.multiarray', 'ndarray', ('clip',
"""
- a.clip(min=None, max=None, out=None)
+ a.clip(min=None, max=None, out=None, **kwargs)
Return an array whose values are limited to ``[min, max]``.
One of max or min must be given.
@@ -3748,14 +2968,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dot',
>>> a = np.eye(2)
>>> b = np.ones((2, 2)) * 2
>>> a.dot(b)
- array([[ 2., 2.],
- [ 2., 2.]])
+ array([[2., 2.],
+ [2., 2.]])
This array method can be conveniently chained:
>>> a.dot(b).dot(b)
- array([[ 8., 8.],
- [ 8., 8.]])
+ array([[8., 8.],
+ [8., 8.]])
"""))
@@ -3768,9 +2988,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('dump',
Parameters
----------
- file : str
+ file : str or Path
A string naming the dump file.
+ .. versionchanged:: 1.17.0
+ `pathlib.Path` objects are now accepted.
+
"""))
@@ -3808,7 +3031,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('fill',
>>> a = np.empty(2)
>>> a.fill(1)
>>> a
- array([ 1., 1.])
+ array([1., 1.])
"""))
@@ -3877,18 +3100,18 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('getfield',
>>> x = np.diag([1.+1.j]*2)
>>> x[1, 1] = 2 + 4.j
>>> x
- array([[ 1.+1.j, 0.+0.j],
- [ 0.+0.j, 2.+4.j]])
+ array([[1.+1.j, 0.+0.j],
+ [0.+0.j, 2.+4.j]])
>>> x.getfield(np.float64)
- array([[ 1., 0.],
- [ 0., 2.]])
+ array([[1., 0.],
+ [0., 2.]])
By choosing an offset of 8 bytes we can select the complex part of the
array for our view:
>>> x.getfield(np.float64, offset=8)
- array([[ 1., 0.],
- [ 0., 4.]])
+ array([[1., 0.],
+ [0., 4.]])
"""))
@@ -3934,19 +3157,20 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('item',
Examples
--------
+ >>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
- array([[3, 1, 7],
- [2, 8, 3],
- [8, 5, 3]])
+ array([[2, 2, 6],
+ [1, 3, 6],
+ [1, 0, 1]])
>>> x.item(3)
- 2
+ 1
>>> x.item(7)
- 5
+ 0
>>> x.item((0, 1))
- 1
+ 2
>>> x.item((2, 2))
- 3
+ 1
"""))
@@ -3982,24 +3206,25 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('itemset',
Examples
--------
+ >>> np.random.seed(123)
>>> x = np.random.randint(9, size=(3, 3))
>>> x
- array([[3, 1, 7],
- [2, 8, 3],
- [8, 5, 3]])
+ array([[2, 2, 6],
+ [1, 3, 6],
+ [1, 0, 1]])
>>> x.itemset(4, 0)
>>> x.itemset((2, 2), 9)
>>> x
- array([[3, 1, 7],
- [2, 0, 3],
- [8, 5, 9]])
+ array([[2, 2, 6],
+ [1, 0, 6],
+ [1, 0, 9]])
"""))
add_newdoc('numpy.core.multiarray', 'ndarray', ('max',
"""
- a.max(axis=None, out=None, keepdims=False)
+ a.max(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the maximum along a given axis.
@@ -4029,7 +3254,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('mean',
add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""
- a.min(axis=None, out=None, keepdims=False)
+ a.min(axis=None, out=None, keepdims=False, initial=<no value>, where=True)
Return the minimum along a given axis.
@@ -4042,87 +3267,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('min',
"""))
-add_newdoc('numpy.core.multiarray', 'shares_memory',
- """
- shares_memory(a, b, max_work=None)
-
- Determine if two arrays share memory
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem (maximum number
- of candidate solutions to consider). The following special
- values are recognized:
-
- max_work=MAY_SHARE_EXACT (default)
- The problem is solved exactly. In this case, the function returns
- True only if there is an element shared between the arrays.
- max_work=MAY_SHARE_BOUNDS
- Only the memory bounds of a and b are checked.
-
- Raises
- ------
- numpy.TooHardError
- Exceeded max_work.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- may_share_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
-
- """)
-
-
-add_newdoc('numpy.core.multiarray', 'may_share_memory',
- """
- may_share_memory(a, b, max_work=None)
-
- Determine if two arrays might share memory
-
- A return of True does not necessarily mean that the two arrays
- share any element. It just means that they *might*.
-
- Only the memory bounds of a and b are checked by default.
-
- Parameters
- ----------
- a, b : ndarray
- Input arrays
- max_work : int, optional
- Effort to spend on solving the overlap problem. See
- `shares_memory` for details. Default for ``may_share_memory``
- is to do a bounds check.
-
- Returns
- -------
- out : bool
-
- See Also
- --------
- shares_memory
-
- Examples
- --------
- >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
- False
- >>> x = np.zeros([3, 4])
- >>> np.may_share_memory(x[:,0], x[:,1])
- True
-
- """)
-
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('newbyteorder',
"""
arr.newbyteorder(new_order='S')
@@ -4182,7 +3326,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('nonzero',
add_newdoc('numpy.core.multiarray', 'ndarray', ('prod',
"""
- a.prod(axis=None, dtype=None, out=None, keepdims=False)
+ a.prod(axis=None, dtype=None, out=None, keepdims=False, initial=1, where=True)
Return the product of the array elements over the given axis
@@ -4224,81 +3368,6 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('put',
"""))
-add_newdoc('numpy.core.multiarray', 'copyto',
- """
- copyto(dst, src, casting='same_kind', where=True)
-
- Copies values from one array to another, broadcasting as necessary.
-
- Raises a TypeError if the `casting` rule is violated, and if
- `where` is provided, it selects which elements to copy.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dst : ndarray
- The array into which values are copied.
- src : array_like
- The array from which values are copied.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
- Controls what kind of data casting may occur when copying.
-
- * 'no' means the data types should not be cast at all.
- * 'equiv' means only byte-order changes are allowed.
- * 'safe' means only casts which can preserve values are allowed.
- * 'same_kind' means only safe casts or casts within a kind,
- like float64 to float32, are allowed.
- * 'unsafe' means any data conversions may be done.
- where : array_like of bool, optional
- A boolean array which is broadcasted to match the dimensions
- of `dst`, and selects elements to copy from `src` to `dst`
- wherever it contains the value True.
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'putmask',
- """
- putmask(a, mask, values)
-
- Changes elements of an array based on conditional and input values.
-
- Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
-
- If `values` is not the same size as `a` and `mask` then it will repeat.
- This gives behavior different from ``a[mask] = values``.
-
- Parameters
- ----------
- a : array_like
- Target array.
- mask : array_like
- Boolean mask array. It has to be the same shape as `a`.
- values : array_like
- Values to put into `a` where `mask` is True. If `values` is smaller
- than `a` it will be repeated.
-
- See Also
- --------
- place, put, take, copyto
-
- Examples
- --------
- >>> x = np.arange(6).reshape(2, 3)
- >>> np.putmask(x, x>2, x**2)
- >>> x
- array([[ 0, 1, 2],
- [ 9, 16, 25]])
-
- If `values` is smaller than `a` it is repeated:
-
- >>> x = np.arange(5)
- >>> np.putmask(x, x>1, [-33, -44])
- >>> x
- array([ 0, 1, -33, -44, -33])
-
- """)
-
add_newdoc('numpy.core.multiarray', 'ndarray', ('ravel',
"""
@@ -4433,7 +3502,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('resize',
>>> a.resize((1, 1))
Traceback (most recent call last):
...
- ValueError: cannot resize an array that has been referenced ...
+ ValueError: cannot resize an array that references or is referenced ...
Unless `refcheck` is False:
@@ -4506,23 +3575,23 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setfield',
--------
>>> x = np.eye(3)
>>> x.getfield(np.float64)
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
>>> x.setfield(3, np.int32)
>>> x.getfield(np.int32)
array([[3, 3, 3],
[3, 3, 3],
- [3, 3, 3]])
+ [3, 3, 3]], dtype=int32)
>>> x
- array([[ 1.00000000e+000, 1.48219694e-323, 1.48219694e-323],
- [ 1.48219694e-323, 1.00000000e+000, 1.48219694e-323],
- [ 1.48219694e-323, 1.48219694e-323, 1.00000000e+000]])
+ array([[1.0e+000, 1.5e-323, 1.5e-323],
+ [1.5e-323, 1.0e+000, 1.5e-323],
+ [1.5e-323, 1.5e-323, 1.0e+000]])
>>> x.setfield(np.eye(3), np.int32)
>>> x
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
"""))
@@ -4575,6 +3644,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
Examples
--------
+ >>> y = np.array([[3, 1, 7],
+ ... [2, 0, 0],
+ ... [8, 5, 9]])
>>> y
array([[3, 1, 7],
[2, 0, 0],
@@ -4606,9 +3678,9 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('setflags',
add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
"""
- a.sort(axis=-1, kind='quicksort', order=None)
+ a.sort(axis=-1, kind=None, order=None)
- Sort an array, in-place.
+ Sort an array in-place. Refer to `numpy.sort` for full documentation.
Parameters
----------
@@ -4616,7 +3688,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm. Default is 'quicksort'.
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort under the covers and, in general, the
+ actual implementation will vary with datatype. The 'mergesort' option
+ is retained for backwards compatibility.
+
+ .. versionchanged:: 1.15.0.
+ The 'stable' option was added.
+
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
@@ -4634,7 +3713,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
Notes
-----
- See ``sort`` for notes on the different sorting algorithms.
+ See `numpy.sort` for notes on the different sorting algorithms.
Examples
--------
@@ -4654,8 +3733,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('sort',
>>> a = np.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a
- array([('c', 1), ('a', 2)],
- dtype=[('x', '|S1'), ('y', '<i4')])
+ array([(b'c', 1), (b'a', 2)],
+ dtype=[('x', 'S1'), ('y', '<i8')])
"""))
@@ -4711,6 +3790,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('partition',
array([2, 1, 3, 4])
>>> a.partition((1, 3))
+ >>> a
array([1, 2, 3, 4])
"""))
@@ -4747,7 +3827,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('std',
add_newdoc('numpy.core.multiarray', 'ndarray', ('sum',
"""
- a.sum(axis=None, dtype=None, out=None, keepdims=False)
+ a.sum(axis=None, dtype=None, out=None, keepdims=False, initial=0, where=True)
Return the sum of the array elements over the given axis.
@@ -4802,8 +3882,12 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
Parameters
----------
- fid : file or str
+ fid : file or str or Path
An open file object, or a string containing a filename.
+
+ .. versionchanged:: 1.17.0
+ `pathlib.Path` objects are now accepted.
+
sep : str
Separator between array items for text output.
If "" (empty), a binary file is written, equivalent to
@@ -4821,7 +3905,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tofile',
machines with different endianness. Some of these problems can be overcome
by outputting the data as text files, at the expense of speed and file
size.
-
+
When fid is a file object, array contents are directly written to the
file, bypassing the file object's ``write`` method. As a result, tofile
cannot be used with files objects supporting compression (e.g., GzipFile)
@@ -4834,10 +3918,14 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
"""
a.tolist()
- Return the array as a (possibly nested) list.
+ Return the array as an ``a.ndim``-levels deep nested list of Python scalars.
Return a copy of the array data as a (nested) Python list.
- Data items are converted to the nearest compatible Python type.
+ Data items are converted to the nearest compatible builtin Python type, via
+ the `~numpy.ndarray.item` function.
+
+ If ``a.ndim`` is 0, then since the depth of the nested list is 0, it will
+ not be a list at all, but a simple Python scalar.
Parameters
----------
@@ -4845,24 +3933,41 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
Returns
-------
- y : list
+ y : object, or list of object, or list of list of object, or ...
The possibly nested list of array elements.
Notes
-----
- The array may be recreated, ``a = np.array(a.tolist())``.
+ The array may be recreated via ``a = np.array(a.tolist())``, although this
+ may sometimes lose precision.
Examples
--------
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
+
>>> a = np.array([1, 2])
+ >>> list(a)
+ [1, 2]
>>> a.tolist()
[1, 2]
+
+ However, for a 2D array, ``tolist`` applies recursively:
+
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
[array([1, 2]), array([3, 4])]
>>> a.tolist()
[[1, 2], [3, 4]]
+ The base case for this recursion is a 0D array:
+
+ >>> a = np.array(1)
+ >>> list(a)
+ Traceback (most recent call last):
+ ...
+ TypeError: iteration over a 0-d array
+ >>> a.tolist()
+ 1
"""))
@@ -4892,13 +3997,13 @@ tobytesdoc = """
Examples
--------
- >>> x = np.array([[0, 1], [2, 3]])
+ >>> x = np.array([[0, 1], [2, 3]], dtype='<u2')
>>> x.tobytes()
- b'\\x00\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
+ b'\\x00\\x00\\x01\\x00\\x02\\x00\\x03\\x00'
>>> x.tobytes('C') == x.tobytes()
True
>>> x.tobytes('F')
- b'\\x00\\x00\\x00\\x00\\x02\\x00\\x00\\x00\\x01\\x00\\x00\\x00\\x03\\x00\\x00\\x00'
+ b'\\x00\\x00\\x02\\x00\\x01\\x00\\x03\\x00'
"""
@@ -4934,9 +4039,11 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
Returns a view of the array with axes transposed.
- For a 1-D array, this has no effect. (To change between column and
- row vectors, first cast the 1-D array into a matrix object.)
- For a 2-D array, this is the usual matrix transpose.
+ For a 1-D array this has no effect, as a transposed vector is simply the
+ same vector. To convert a 1-D array into a 2D column vector, an additional
+ dimension must be added. `np.atleast2d(a).T` achieves this, as does
+ `a[:, np.newaxis]`.
+ For a 2-D array, this is a standard matrix transpose.
For an n-D array, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
@@ -4962,6 +4069,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('transpose',
See Also
--------
ndarray.T : Array property returning the array transposed.
+ ndarray.reshape : Give a new shape to an array without changing its data.
Examples
--------
@@ -5048,7 +4156,7 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
>>> y
matrix([[513]], dtype=int16)
>>> print(type(y))
- <class 'numpy.matrixlib.defmatrix.matrix'>
+ <class 'numpy.matrix'>
Creating a view on a structured array so it can be used in calculations
@@ -5058,19 +4166,19 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
array([[1, 2],
[3, 4]], dtype=int8)
>>> xv.mean(0)
- array([ 2., 3.])
+ array([2., 3.])
Making changes to the view changes the underlying array
>>> xv[0,1] = 20
- >>> print(x)
- [(1, 20) (3, 4)]
+ >>> x
+ array([(1, 20), (3, 4)], dtype=[('a', 'i1'), ('b', 'i1')])
Using a view to convert an array to a recarray:
>>> z = x.view(np.recarray)
>>> z.a
- array([1], dtype=int8)
+ array([1, 3], dtype=int8)
Views share data:
@@ -5088,8 +4196,8 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('view',
[4, 5]], dtype=int16)
>>> y.view(dtype=[('width', np.int16), ('length', np.int16)])
Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- ValueError: new type not compatible with array.
+ ...
+ ValueError: To change to a dtype of a different size, the array must be C-contiguous
>>> z = y.copy()
>>> z.view(dtype=[('width', np.int16), ('length', np.int16)])
array([[(1, 2)],
@@ -5140,10 +4248,9 @@ add_newdoc('numpy.core.umath', 'frompyfunc',
>>> oct_array = np.frompyfunc(oct, 1, 1)
>>> oct_array(np.array((10, 30, 100)))
- array([012, 036, 0144], dtype=object)
+ array(['0o12', '0o36', '0o144'], dtype=object)
>>> np.array((oct(10), oct(30), oct(100))) # for comparison
- array(['012', '036', '0144'],
- dtype='|S4')
+ array(['0o12', '0o36', '0o144'], dtype='<U5')
""")
@@ -5190,7 +4297,7 @@ add_newdoc('numpy.core.umath', 'geterrobj',
Examples
--------
>>> np.geterrobj() # first get the defaults
- [10000, 0, None]
+ [8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
@@ -5199,13 +4306,13 @@ add_newdoc('numpy.core.umath', 'geterrobj',
>>> old_err = np.seterr(divide='raise')
>>> old_handler = np.seterrcall(err_handler)
>>> np.geterrobj()
- [20000, 2, <function err_handler at 0x91dcaac>]
+ [8192, 521, <function err_handler at 0x91dcaac>]
>>> old_err = np.seterr(all='ignore')
>>> np.base_repr(np.geterrobj()[1], 8)
'0'
>>> old_err = np.seterr(divide='warn', over='log', under='call',
- invalid='print')
+ ... invalid='print')
>>> np.base_repr(np.geterrobj()[1], 8)
'4351'
@@ -5254,7 +4361,7 @@ add_newdoc('numpy.core.umath', 'seterrobj',
--------
>>> old_errobj = np.geterrobj() # first get the defaults
>>> old_errobj
- [10000, 0, None]
+ [8192, 521, None]
>>> def err_handler(type, flag):
... print("Floating point error (%s), with flag %s" % (type, flag))
@@ -5277,180 +4384,6 @@ add_newdoc('numpy.core.umath', 'seterrobj',
#
##############################################################################
-add_newdoc('numpy.core.multiarray', 'bincount',
- """
- bincount(x, weights=None, minlength=0)
-
- Count number of occurrences of each value in array of non-negative ints.
-
- The number of bins (of size 1) is one larger than the largest value in
- `x`. If `minlength` is specified, there will be at least this number
- of bins in the output array (though it will be longer if necessary,
- depending on the contents of `x`).
- Each bin gives the number of occurrences of its index value in `x`.
- If `weights` is specified the input array is weighted by it, i.e. if a
- value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
- of ``out[n] += 1``.
-
- Parameters
- ----------
- x : array_like, 1 dimension, nonnegative ints
- Input array.
- weights : array_like, optional
- Weights, array of the same shape as `x`.
- minlength : int, optional
- A minimum number of bins for the output array.
-
- .. versionadded:: 1.6.0
-
- Returns
- -------
- out : ndarray of ints
- The result of binning the input array.
- The length of `out` is equal to ``np.amax(x)+1``.
-
- Raises
- ------
- ValueError
- If the input is not 1-dimensional, or contains elements with negative
- values, or if `minlength` is negative.
- TypeError
- If the type of the input is float or complex.
-
- See Also
- --------
- histogram, digitize, unique
-
- Examples
- --------
- >>> np.bincount(np.arange(5))
- array([1, 1, 1, 1, 1])
- >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
- array([1, 3, 1, 1, 0, 0, 0, 1])
-
- >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
- >>> np.bincount(x).size == np.amax(x)+1
- True
-
- The input array needs to be of integer dtype, otherwise a
- TypeError is raised:
-
- >>> np.bincount(np.arange(5, dtype=float))
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: array cannot be safely cast to required type
-
- A possible use of ``bincount`` is to perform sums over
- variable-size chunks of an array, using the ``weights`` keyword.
-
- >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
- >>> x = np.array([0, 1, 1, 2, 2, 2])
- >>> np.bincount(x, weights=w)
- array([ 0.3, 0.7, 1.1])
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'ravel_multi_index',
- """
- ravel_multi_index(multi_index, dims, mode='raise', order='C')
-
- Converts a tuple of index arrays into an array of flat
- indices, applying boundary modes to the multi-index.
-
- Parameters
- ----------
- multi_index : tuple of array_like
- A tuple of integer arrays, one array for each dimension.
- dims : tuple of ints
- The shape of array into which the indices from ``multi_index`` apply.
- mode : {'raise', 'wrap', 'clip'}, optional
- Specifies how out-of-bounds indices are handled. Can specify
- either one mode or a tuple of modes, one mode per index.
-
- * 'raise' -- raise an error (default)
- * 'wrap' -- wrap around
- * 'clip' -- clip to the range
-
- In 'clip' mode, a negative index which would normally
- wrap will clip to 0 instead.
- order : {'C', 'F'}, optional
- Determines whether the multi-index should be viewed as
- indexing in row-major (C-style) or column-major
- (Fortran-style) order.
-
- Returns
- -------
- raveled_indices : ndarray
- An array of indices into the flattened version of an array
- of dimensions ``dims``.
-
- See Also
- --------
- unravel_index
-
- Notes
- -----
- .. versionadded:: 1.6.0
-
- Examples
- --------
- >>> arr = np.array([[3,6,6],[4,5,1]])
- >>> np.ravel_multi_index(arr, (7,6))
- array([22, 41, 37])
- >>> np.ravel_multi_index(arr, (7,6), order='F')
- array([31, 41, 13])
- >>> np.ravel_multi_index(arr, (4,6), mode='clip')
- array([22, 23, 19])
- >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
- array([12, 13, 13])
-
- >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
- 1621
- """)
-
-add_newdoc('numpy.core.multiarray', 'unravel_index',
- """
- unravel_index(indices, dims, order='C')
-
- Converts a flat index or array of flat indices into a tuple
- of coordinate arrays.
-
- Parameters
- ----------
- indices : array_like
- An integer array whose elements are indices into the flattened
- version of an array of dimensions ``dims``. Before version 1.6.0,
- this function accepted just one index value.
- dims : tuple of ints
- The shape of the array to use for unraveling ``indices``.
- order : {'C', 'F'}, optional
- Determines whether the indices should be viewed as indexing in
- row-major (C-style) or column-major (Fortran-style) order.
-
- .. versionadded:: 1.6.0
-
- Returns
- -------
- unraveled_coords : tuple of ndarray
- Each array in the tuple has the same shape as the ``indices``
- array.
-
- See Also
- --------
- ravel_multi_index
-
- Examples
- --------
- >>> np.unravel_index([22, 41, 37], (7,6))
- (array([3, 6, 6]), array([4, 5, 1]))
- >>> np.unravel_index([31, 41, 13], (7,6), order='F')
- (array([3, 6, 6]), array([4, 5, 1]))
-
- >>> np.unravel_index(1621, (6,7,8,9))
- (3, 1, 4, 1)
-
- """)
-
add_newdoc('numpy.core.multiarray', 'add_docstring',
"""
add_docstring(obj, docstring)
@@ -5487,94 +4420,6 @@ add_newdoc('numpy.core.umath', '_add_newdoc_ufunc',
and then throwing away the ufunc.
""")
-add_newdoc('numpy.core.multiarray', 'packbits',
- """
- packbits(myarray, axis=None)
-
- Packs the elements of a binary-valued array into bits in a uint8 array.
-
- The result is padded to full bytes by inserting zero bits at the end.
-
- Parameters
- ----------
- myarray : array_like
- An array of integers or booleans whose elements should be packed to
- bits.
- axis : int, optional
- The dimension over which bit-packing is done.
- ``None`` implies packing the flattened array.
-
- Returns
- -------
- packed : ndarray
- Array of type uint8 whose elements represent bits corresponding to the
- logical (0 or nonzero) value of the input elements. The shape of
- `packed` has the same number of dimensions as the input (unless `axis`
- is None, in which case the output is 1-D).
-
- See Also
- --------
- unpackbits: Unpacks elements of a uint8 array into a binary-valued output
- array.
-
- Examples
- --------
- >>> a = np.array([[[1,0,1],
- ... [0,1,0]],
- ... [[1,1,0],
- ... [0,0,1]]])
- >>> b = np.packbits(a, axis=-1)
- >>> b
- array([[[160],[64]],[[192],[32]]], dtype=uint8)
-
- Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
- and 32 = 0010 0000.
-
- """)
-
-add_newdoc('numpy.core.multiarray', 'unpackbits',
- """
- unpackbits(myarray, axis=None)
-
- Unpacks elements of a uint8 array into a binary-valued output array.
-
- Each element of `myarray` represents a bit-field that should be unpacked
- into a binary-valued output array. The shape of the output array is either
- 1-D (if `axis` is None) or the same shape as the input array with unpacking
- done along the axis specified.
-
- Parameters
- ----------
- myarray : ndarray, uint8 type
- Input array.
- axis : int, optional
- The dimension over which bit-unpacking is done.
- ``None`` implies unpacking the flattened array.
-
- Returns
- -------
- unpacked : ndarray, uint8 type
- The elements are binary-valued (0 or 1).
-
- See Also
- --------
- packbits : Packs the elements of a binary-valued array into bits in a uint8
- array.
-
- Examples
- --------
- >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
- >>> a
- array([[ 2],
- [ 7],
- [23]], dtype=uint8)
- >>> b = np.unpackbits(a, axis=1)
- >>> b
- array([[0, 0, 0, 0, 0, 0, 1, 0],
- [0, 0, 0, 0, 0, 1, 1, 1],
- [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
-
- """)
add_newdoc('numpy.core._multiarray_tests', 'format_float_OSprintf_g',
"""
@@ -5655,10 +4500,12 @@ add_newdoc('numpy.core', 'ufunc',
number of outputs; use `None` for uninitialized outputs to be
allocated by the ufunc.
where : array_like, optional
- Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone. Note that if
- an uninitialized return array is created via the default ``out=None``,
- then the elements where the values are False will remain uninitialized.
+ This condition is broadcast over the input. At locations where the
+ condition is True, the `out` array will be set to the ufunc result.
+ Elsewhere, the `out` array will retain its original value.
+ Note that if an uninitialized `out` array is created via the default
+ ``out=None``, locations within it where the condition is False will
+ remain uninitialized.
**kwargs
For other keyword-only arguments, see the :ref:`ufunc docs <ufuncs.kwargs>`.
@@ -5865,7 +4712,7 @@ add_newdoc('numpy.core', 'ufunc', ('signature',
add_newdoc('numpy.core', 'ufunc', ('reduce',
"""
- reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial)
+ reduce(a, axis=0, dtype=None, out=None, keepdims=False, initial=<no value>, where=True)
Reduces `a`'s dimension by one, by applying ufunc along one axis.
@@ -5910,7 +4757,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
- :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
@@ -5927,9 +4774,17 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
to None - otherwise it defaults to ufunc.identity.
If ``None`` is given, the first element of the reduction is used,
and an error is thrown if the reduction is empty.
-
+
.. versionadded:: 1.15.0
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `a`, and selects elements to include in the reduction. Note
+ that for ufuncs like ``minimum`` that do not have an identity
+ defined, one has to pass in also ``initial``.
+
+ .. versionadded:: 1.17.0
+
Returns
-------
r : ndarray
@@ -5960,20 +4815,25 @@ add_newdoc('numpy.core', 'ufunc', ('reduce',
>>> np.add.reduce(X, 2)
array([[ 1, 5],
[ 9, 13]])
-
- You can use the ``initial`` keyword argument to initialize the reduction with a
- different value.
-
+
+ You can use the ``initial`` keyword argument to initialize the reduction
+ with a different value, and ``where`` to select specific elements to include:
+
>>> np.add.reduce([10], initial=5)
15
- >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initializer=10)
+ >>> np.add.reduce(np.ones((2, 2, 2)), axis=(0, 2), initial=10)
array([14., 14.])
-
+ >>> a = np.array([10., np.nan, 10])
+ >>> np.add.reduce(a, where=~np.isnan(a))
+ 20.0
+
Allows reductions of empty arrays where they would normally fail, i.e.
for ufuncs without an identity.
-
+
>>> np.minimum.reduce([], initial=np.inf)
inf
+ >>> np.minimum.reduce([[1., 2.], [3., 4.]], initial=10., where=[True, False])
+ array([ 1., 10.])
>>> np.minimum.reduce([])
Traceback (most recent call last):
...
@@ -6014,7 +4874,7 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
- :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
@@ -6039,23 +4899,23 @@ add_newdoc('numpy.core', 'ufunc', ('accumulate',
>>> I = np.eye(2)
>>> I
- array([[ 1., 0.],
- [ 0., 1.]])
+ array([[1., 0.],
+ [0., 1.]])
Accumulate along axis 0 (rows), down columns:
>>> np.add.accumulate(I, 0)
- array([[ 1., 0.],
- [ 1., 1.]])
+ array([[1., 0.],
+ [1., 1.]])
>>> np.add.accumulate(I) # no axis specified = axis zero
- array([[ 1., 0.],
- [ 1., 1.]])
+ array([[1., 0.],
+ [1., 1.]])
Accumulate along axis 1 (columns), through rows:
>>> np.add.accumulate(I, 1)
- array([[ 1., 1.],
- [ 0., 1.]])
+ array([[1., 1.],
+ [0., 1.]])
"""))
@@ -6096,7 +4956,7 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
out : ndarray, None, or tuple of ndarray and None, optional
A location into which the result is stored. If not provided or `None`,
a freshly-allocated array is returned. For consistency with
- :ref:`ufunc.__call__`, if given as a keyword, this may be wrapped in a
+ ``ufunc.__call__``, if given as a keyword, this may be wrapped in a
1-element tuple.
.. versionchanged:: 1.13.0
@@ -6132,10 +4992,10 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
>>> x = np.linspace(0, 15, 16).reshape(4,4)
>>> x
- array([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [ 12., 13., 14., 15.]])
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
::
@@ -6147,11 +5007,11 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
# [row1 + row2 + row3 + row4]
>>> np.add.reduceat(x, [0, 3, 1, 2, 0])
- array([[ 12., 15., 18., 21.],
- [ 12., 13., 14., 15.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [ 24., 28., 32., 36.]])
+ array([[12., 15., 18., 21.],
+ [12., 13., 14., 15.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [24., 28., 32., 36.]])
::
@@ -6159,10 +5019,10 @@ add_newdoc('numpy.core', 'ufunc', ('reduceat',
# [col1 * col2 * col3, col4]
>>> np.multiply.reduceat(x, [0, 3], 1)
- array([[ 0., 3.],
- [ 120., 7.],
- [ 720., 11.],
- [ 2184., 15.]])
+ array([[ 0., 3.],
+ [ 120., 7.],
+ [ 720., 11.],
+ [2184., 15.]])
"""))
@@ -6261,14 +5121,14 @@ add_newdoc('numpy.core', 'ufunc', ('at',
>>> a = np.array([1, 2, 3, 4])
>>> np.negative.at(a, [0, 1])
- >>> print(a)
- array([-1, -2, 3, 4])
+ >>> a
+ array([-1, -2, 3, 4])
Increment items 0 and 1, and increment item 2 twice:
>>> a = np.array([1, 2, 3, 4])
>>> np.add.at(a, [0, 1, 2, 2], 1)
- >>> print(a)
+ >>> a
array([2, 3, 5, 4])
Add items 0 and 1 in first array to second array,
@@ -6277,7 +5137,7 @@ add_newdoc('numpy.core', 'ufunc', ('at',
>>> a = np.array([1, 2, 3, 4])
>>> b = np.array([1, 2])
>>> np.add.at(a, [0, 1], b)
- >>> print(a)
+ >>> a
array([2, 4, 3, 4])
"""))
@@ -6342,13 +5202,13 @@ add_newdoc('numpy.core.multiarray', 'dtype',
Structured type, two fields: the first field contains an unsigned int, the
second an int32:
- >>> np.dtype([('f1', np.uint), ('f2', np.int32)])
- dtype([('f1', '<u4'), ('f2', '<i4')])
+ >>> np.dtype([('f1', np.uint64), ('f2', np.int32)])
+ dtype([('f1', '<u8'), ('f2', '<i4')])
Using array-protocol type strings:
>>> np.dtype([('a','f8'),('b','S10')])
- dtype([('a', '<f8'), ('b', '|S10')])
+ dtype([('a', '<f8'), ('b', 'S10')])
Using comma-separated field formats. The shape is (2,3):
@@ -6358,24 +5218,24 @@ add_newdoc('numpy.core.multiarray', 'dtype',
Using tuples. ``int`` is a fixed type, 3 the field's shape. ``void``
is a flexible type, here of size 10:
- >>> np.dtype([('hello',(int,3)),('world',np.void,10)])
- dtype([('hello', '<i4', 3), ('world', '|V10')])
+ >>> np.dtype([('hello',(np.int64,3)),('world',np.void,10)])
+ dtype([('hello', '<i8', (3,)), ('world', 'V10')])
Subdivide ``int16`` into 2 ``int8``'s, called x and y. 0 and 1 are
the offsets in bytes:
>>> np.dtype((np.int16, {'x':(np.int8,0), 'y':(np.int8,1)}))
- dtype(('<i2', [('x', '|i1'), ('y', '|i1')]))
+ dtype((numpy.int16, [('x', 'i1'), ('y', 'i1')]))
Using dictionaries. Two fields named 'gender' and 'age':
>>> np.dtype({'names':['gender','age'], 'formats':['S1',np.uint8]})
- dtype([('gender', '|S1'), ('age', '|u1')])
+ dtype([('gender', 'S1'), ('age', 'u1')])
Offsets in bytes, here 0 and 25:
>>> np.dtype({'surname':('S25',0),'age':(np.uint8,25)})
- dtype([('surname', '|S25'), ('age', '|u1')])
+ dtype([('surname', 'S25'), ('age', 'u1')])
""")
@@ -6391,6 +5251,17 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('alignment',
More information is available in the C-API section of the manual.
+ Examples
+ --------
+
+ >>> x = np.dtype('i4')
+ >>> x.alignment
+ 4
+
+ >>> x = np.dtype(float)
+ >>> x.alignment
+ 8
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
@@ -6437,17 +5308,38 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('byteorder',
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('char',
- """A unique character code for each of the 21 different built-in types."""))
+ """A unique character code for each of the 21 different built-in types.
+
+ Examples
+ --------
+
+ >>> x = np.dtype(float)
+ >>> x.char
+ 'd'
+
+ """))
add_newdoc('numpy.core.multiarray', 'dtype', ('descr',
"""
- PEP3118 interface description of the data-type.
+ `__array_interface__` description of the data-type.
The format is that required by the 'descr' key in the
- PEP3118 `__array_interface__` attribute.
+ `__array_interface__` attribute.
+
+ Warning: This attribute exists specifically for `__array_interface__`,
+ and is not a datatype description compatible with `np.dtype`.
+
+ Examples
+ --------
+
+ >>> x = np.dtype(float)
+ >>> x.descr
+ [('', '<f8')]
+
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.descr
+ [('name', '<U16'), ('grades', '<f8', (2,))]
- Warning: This attribute exists specifically for PEP3118 compliance, and
- is not a datatype description compatible with `np.dtype`.
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('fields',
@@ -6488,6 +5380,18 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('flags',
of these flags is in C-API documentation; they are largely useful
for user-defined data-types.
+ The following example demonstrates that operations on this particular
+ dtype requires Python C-API.
+
+ Examples
+ --------
+
+ >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+ >>> x.flags
+ 16
+ >>> np.core.multiarray.NEEDS_PYAPI
+ 16
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('hasobject',
@@ -6545,6 +5449,7 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('isalignedstruct',
field alignment. This flag is sticky, so when combining multiple
structs together, it is preserved and produces new dtypes which
are also aligned.
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
@@ -6554,6 +5459,19 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('itemsize',
For 18 of the 21 types this number is fixed by the data-type.
For the flexible data-types, this number can be anything.
+ Examples
+ --------
+
+ >>> arr = np.array([[1, 2], [3, 4]])
+ >>> arr.dtype
+ dtype('int64')
+ >>> arr.itemsize
+ 8
+
+ >>> dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
+ >>> dt.itemsize
+ 80
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
@@ -6574,6 +5492,19 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('kind',
V void
= ======================
+ Examples
+ --------
+
+ >>> dt = np.dtype('i4')
+ >>> dt.kind
+ 'i'
+ >>> dt = np.dtype('f8')
+ >>> dt.kind
+ 'f'
+ >>> dt = np.dtype([('field1', 'f8')])
+ >>> dt.kind
+ 'V'
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('name',
@@ -6582,6 +5513,16 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('name',
Un-sized flexible data-type objects do not have this attribute.
+ Examples
+ --------
+
+ >>> x = np.dtype(float)
+ >>> x.name
+ 'float64'
+ >>> x = np.dtype([('a', np.int32, 8), ('b', np.float64, 6)])
+ >>> x.name
+ 'void640'
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('names',
@@ -6605,6 +5546,17 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('num',
These are roughly ordered from least-to-most precision.
+ Examples
+ --------
+
+ >>> dt = np.dtype(str)
+ >>> dt.num
+ 19
+
+ >>> dt = np.dtype(float)
+ >>> dt.num
+ 12
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
@@ -6612,6 +5564,17 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('shape',
Shape tuple of the sub-array if this data type describes a sub-array,
and ``()`` otherwise.
+ Examples
+ --------
+
+ >>> dt = np.dtype(('i4', 4))
+ >>> dt.shape
+ (4,)
+
+ >>> dt = np.dtype(('i4', (2, 3)))
+ >>> dt.shape
+ (2, 3)
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
@@ -6621,6 +5584,20 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('ndim',
.. versionadded:: 1.13.0
+ Examples
+ --------
+ >>> x = np.dtype(float)
+ >>> x.ndim
+ 0
+
+ >>> x = np.dtype((float, 8))
+ >>> x.ndim
+ 1
+
+ >>> x = np.dtype(('i4', (3, 4)))
+ >>> x.ndim
+ 2
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('str',
@@ -6638,6 +5615,41 @@ add_newdoc('numpy.core.multiarray', 'dtype', ('subdtype',
then the extra dimensions implied by *shape* are tacked on to
the end of the retrieved array.
+ See Also
+ --------
+ dtype.base
+
+ Examples
+ --------
+ >>> x = numpy.dtype('8f')
+ >>> x.subdtype
+ (dtype('float32'), (8,))
+
+ >>> x = numpy.dtype('i2')
+ >>> x.subdtype
+ >>>
+
+ """))
+
+add_newdoc('numpy.core.multiarray', 'dtype', ('base',
+ """
+ Returns dtype for the base element of the subarrays,
+ regardless of their dimension or shape.
+
+ See Also
+ --------
+ dtype.subdtype
+
+ Examples
+ --------
+ >>> x = numpy.dtype('8f')
+ >>> x.base
+ dtype('float32')
+
+ >>> x = numpy.dtype('i2')
+ >>> x.base
+ dtype('int16')
+
"""))
add_newdoc('numpy.core.multiarray', 'dtype', ('type',
@@ -6779,7 +5791,7 @@ add_newdoc('numpy.core.multiarray', 'busdaycalendar',
... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
>>> # Default is Monday to Friday weekdays
... bdd.weekmask
- array([ True, True, True, True, True, False, False], dtype='bool')
+ array([ True, True, True, True, True, False, False])
>>> # Any holidays already on the weekend are removed
... bdd.holidays
array(['2011-07-01', '2011-07-04'], dtype='datetime64[D]')
@@ -6791,211 +5803,6 @@ add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('weekmask',
add_newdoc('numpy.core.multiarray', 'busdaycalendar', ('holidays',
"""A copy of the holiday array indicating additional invalid days."""))
-add_newdoc('numpy.core.multiarray', 'is_busday',
- """
- is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
-
- Calculates which of the given dates are valid days, and which are not.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dates : array_like of datetime64[D]
- The array of dates to process.
- weekmask : str or array_like of bool, optional
- A seven-element array indicating which of Monday through Sunday are
- valid days. May be specified as a length-seven list or array, like
- [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
- like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
- weekdays, optionally separated by white space. Valid abbreviations
- are: Mon Tue Wed Thu Fri Sat Sun
- holidays : array_like of datetime64[D], optional
- An array of dates to consider as invalid dates. They may be
- specified in any order, and NaT (not-a-time) dates are ignored.
- This list is saved in a normalized form that is suited for
- fast calculations of valid days.
- busdaycal : busdaycalendar, optional
- A `busdaycalendar` object which specifies the valid days. If this
- parameter is provided, neither weekmask nor holidays may be
- provided.
- out : array of bool, optional
- If provided, this array is filled with the result.
-
- Returns
- -------
- out : array of bool
- An array with the same shape as ``dates``, containing True for
- each valid day, and False for each invalid day.
-
- See Also
- --------
- busdaycalendar: An object that specifies a custom set of valid days.
- busday_offset : Applies an offset counted in valid days.
- busday_count : Counts how many valid days are in a half-open date range.
-
- Examples
- --------
- >>> # The weekdays are Friday, Saturday, and Monday
- ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
- ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
- array([False, False, True], dtype='bool')
- """)
-
-add_newdoc('numpy.core.multiarray', 'busday_offset',
- """
- busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
-
- First adjusts the date to fall on a valid day according to
- the ``roll`` rule, then applies offsets to the given dates
- counted in valid days.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- dates : array_like of datetime64[D]
- The array of dates to process.
- offsets : array_like of int
- The array of offsets, which is broadcast with ``dates``.
- roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
- How to treat dates that do not fall on a valid day. The default
- is 'raise'.
-
- * 'raise' means to raise an exception for an invalid day.
- * 'nat' means to return a NaT (not-a-time) for an invalid day.
- * 'forward' and 'following' mean to take the first valid day
- later in time.
- * 'backward' and 'preceding' mean to take the first valid day
- earlier in time.
- * 'modifiedfollowing' means to take the first valid day
- later in time unless it is across a Month boundary, in which
- case to take the first valid day earlier in time.
- * 'modifiedpreceding' means to take the first valid day
- earlier in time unless it is across a Month boundary, in which
- case to take the first valid day later in time.
- weekmask : str or array_like of bool, optional
- A seven-element array indicating which of Monday through Sunday are
- valid days. May be specified as a length-seven list or array, like
- [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
- like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
- weekdays, optionally separated by white space. Valid abbreviations
- are: Mon Tue Wed Thu Fri Sat Sun
- holidays : array_like of datetime64[D], optional
- An array of dates to consider as invalid dates. They may be
- specified in any order, and NaT (not-a-time) dates are ignored.
- This list is saved in a normalized form that is suited for
- fast calculations of valid days.
- busdaycal : busdaycalendar, optional
- A `busdaycalendar` object which specifies the valid days. If this
- parameter is provided, neither weekmask nor holidays may be
- provided.
- out : array of datetime64[D], optional
- If provided, this array is filled with the result.
-
- Returns
- -------
- out : array of datetime64[D]
- An array with a shape from broadcasting ``dates`` and ``offsets``
- together, containing the dates with offsets applied.
-
- See Also
- --------
- busdaycalendar: An object that specifies a custom set of valid days.
- is_busday : Returns a boolean array indicating valid days.
- busday_count : Counts how many valid days are in a half-open date range.
-
- Examples
- --------
- >>> # First business day in October 2011 (not accounting for holidays)
- ... np.busday_offset('2011-10', 0, roll='forward')
- numpy.datetime64('2011-10-03','D')
- >>> # Last business day in February 2012 (not accounting for holidays)
- ... np.busday_offset('2012-03', -1, roll='forward')
- numpy.datetime64('2012-02-29','D')
- >>> # Third Wednesday in January 2011
- ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
- numpy.datetime64('2011-01-19','D')
- >>> # 2012 Mother's Day in Canada and the U.S.
- ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
- numpy.datetime64('2012-05-13','D')
-
- >>> # First business day on or after a date
- ... np.busday_offset('2011-03-20', 0, roll='forward')
- numpy.datetime64('2011-03-21','D')
- >>> np.busday_offset('2011-03-22', 0, roll='forward')
- numpy.datetime64('2011-03-22','D')
- >>> # First business day after a date
- ... np.busday_offset('2011-03-20', 1, roll='backward')
- numpy.datetime64('2011-03-21','D')
- >>> np.busday_offset('2011-03-22', 1, roll='backward')
- numpy.datetime64('2011-03-23','D')
- """)
-
-add_newdoc('numpy.core.multiarray', 'busday_count',
- """
- busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
-
- Counts the number of valid days between `begindates` and
- `enddates`, not including the day of `enddates`.
-
- If ``enddates`` specifies a date value that is earlier than the
- corresponding ``begindates`` date value, the count will be negative.
-
- .. versionadded:: 1.7.0
-
- Parameters
- ----------
- begindates : array_like of datetime64[D]
- The array of the first dates for counting.
- enddates : array_like of datetime64[D]
- The array of the end dates for counting, which are excluded
- from the count themselves.
- weekmask : str or array_like of bool, optional
- A seven-element array indicating which of Monday through Sunday are
- valid days. May be specified as a length-seven list or array, like
- [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
- like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
- weekdays, optionally separated by white space. Valid abbreviations
- are: Mon Tue Wed Thu Fri Sat Sun
- holidays : array_like of datetime64[D], optional
- An array of dates to consider as invalid dates. They may be
- specified in any order, and NaT (not-a-time) dates are ignored.
- This list is saved in a normalized form that is suited for
- fast calculations of valid days.
- busdaycal : busdaycalendar, optional
- A `busdaycalendar` object which specifies the valid days. If this
- parameter is provided, neither weekmask nor holidays may be
- provided.
- out : array of int, optional
- If provided, this array is filled with the result.
-
- Returns
- -------
- out : array of int
- An array with a shape from broadcasting ``begindates`` and ``enddates``
- together, containing the number of valid days between
- the begin and end dates.
-
- See Also
- --------
- busdaycalendar: An object that specifies a custom set of valid days.
- is_busday : Returns a boolean array indicating valid days.
- busday_offset : Applies an offset counted in valid days.
-
- Examples
- --------
- >>> # Number of weekdays in January 2011
- ... np.busday_count('2011-01', '2011-02')
- 21
- >>> # Number of weekdays in 2011
- ... np.busday_count('2011', '2012')
- 260
- >>> # Number of Saturdays in 2011
- ... np.busday_count('2011', '2012', weekmask='Sat')
- 53
- """)
-
add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
"""
normalize_axis_index(axis, ndim, msg_prefix=None)
@@ -7047,67 +5854,6 @@ add_newdoc('numpy.core.multiarray', 'normalize_axis_index',
AxisError: axes_arg: axis -4 is out of bounds for array of dimension 3
""")
-add_newdoc('numpy.core.multiarray', 'datetime_as_string',
- """
- datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
-
- Convert an array of datetimes into an array of strings.
-
- Parameters
- ----------
- arr : array_like of datetime64
- The array of UTC timestamps to format.
- unit : str
- One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
- timezone : {'naive', 'UTC', 'local'} or tzinfo
- Timezone information to use when displaying the datetime. If 'UTC', end
- with a Z to indicate UTC time. If 'local', convert to the local timezone
- first, and suffix with a +-#### timezone offset. If a tzinfo object,
- then do as with 'local', but use the specified timezone.
- casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
- Casting to allow when changing between datetime units.
-
- Returns
- -------
- str_arr : ndarray
- An array of strings the same shape as `arr`.
-
- Examples
- --------
- >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
- >>> d
- array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
- '2002-10-27T07:30'], dtype='datetime64[m]')
-
- Setting the timezone to UTC shows the same information, but with a Z suffix
-
- >>> np.datetime_as_string(d, timezone='UTC')
- array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
- '2002-10-27T07:30Z'], dtype='<U35')
-
- Note that we picked datetimes that cross a DST boundary. Passing in a
- ``pytz`` timezone object will print the appropriate offset
-
- >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
- array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
- '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
-
- Passing in a unit will change the precision
-
- >>> np.datetime_as_string(d, unit='h')
- array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
- dtype='<U32')
- >>> np.datetime_as_string(d, unit='s')
- array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
- '2002-10-27T07:30:00'], dtype='<U38')
-
- 'casting' can be used to specify whether precision can be changed
-
- >>> np.datetime_as_string(d, unit='h', casting='safe')
- TypeError: Cannot create a datetime string as units 'h' from a NumPy
- datetime with units 'm' according to the rule 'safe'
- """)
-
add_newdoc('numpy.core.multiarray', 'datetime_data',
"""
datetime_data(dtype, /)
@@ -7142,7 +5888,7 @@ add_newdoc('numpy.core.multiarray', 'datetime_data',
as a timedelta
>>> np.datetime64('2010', np.datetime_data(dt_25s))
- numpy.datetime64('2010-01-01T00:00:00', '25s')
+ numpy.datetime64('2010-01-01T00:00:00','25s')
""")
@@ -7174,9 +5920,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('T',
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7188,9 +5932,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('base',
albeit unimplemented, all the attributes of the ndarray class so as to
a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7240,9 +5982,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('all',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7254,9 +5994,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('any',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7268,9 +6006,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('argmax',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7282,9 +6018,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('argmin',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7296,9 +6030,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('argsort',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7310,9 +6042,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('astype',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7324,9 +6054,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('byteswap',
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7338,9 +6066,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('choose',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7352,9 +6078,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('clip',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7366,9 +6090,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('compress',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7380,9 +6102,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('conjugate',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7394,9 +6114,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('copy',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7408,9 +6126,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('cumprod',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7422,9 +6138,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('cumsum',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7436,9 +6150,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('diagonal',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7450,9 +6162,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('dump',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7464,9 +6174,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('dumps',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7478,9 +6186,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('fill',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7492,9 +6198,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('flatten',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7506,9 +6210,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('getfield',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7520,9 +6222,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('item',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7534,9 +6234,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('itemset',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7548,9 +6246,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('max',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7562,9 +6258,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('mean',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7576,9 +6270,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('min',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7623,9 +6315,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('nonzero',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7637,9 +6327,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('prod',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7651,9 +6339,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('ptp',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7665,9 +6351,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('put',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7679,9 +6363,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('ravel',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7693,9 +6375,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('repeat',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7707,9 +6387,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('reshape',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7721,9 +6399,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('resize',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7735,9 +6411,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('round',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7749,9 +6423,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('searchsorted',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7763,9 +6435,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('setfield',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7777,9 +6447,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('setflags',
albeit unimplemented, all the attributes of the ndarray class so as to
provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7791,9 +6459,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('sort',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7805,9 +6471,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('squeeze',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7819,9 +6483,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('std',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7833,9 +6495,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('sum',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7847,9 +6507,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('swapaxes',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7861,9 +6519,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('take',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7875,9 +6531,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('tofile',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7889,9 +6543,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('tolist',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7903,9 +6555,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('tostring',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7917,9 +6567,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('trace',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7931,9 +6579,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('transpose',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7945,9 +6591,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('var',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7959,9 +6603,7 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('view',
albeit unimplemented, all the attributes of the ndarray class
so as to provide a uniform API.
- See Also
- --------
- The corresponding attribute of the derived class of interest.
+ See also the corresponding attribute of the derived class of interest.
"""))
@@ -7976,25 +6618,25 @@ add_newdoc('numpy.core.numerictypes', 'generic', ('view',
add_newdoc('numpy.core.numerictypes', 'number',
"""
Abstract base class of all numeric scalar types.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'integer',
"""
Abstract base class of all integer scalar types.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'signedinteger',
"""
Abstract base class of all signed integer scalar types.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'unsignedinteger',
"""
Abstract base class of all unsigned integer scalar types.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'inexact',
@@ -8002,20 +6644,20 @@ add_newdoc('numpy.core.numerictypes', 'inexact',
Abstract base class of all numeric scalar types with a (potentially)
inexact representation of the values in its range, such as
floating-point numbers.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'floating',
"""
Abstract base class of all floating-point scalar types.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'complexfloating',
"""
Abstract base class of all complex number scalar types that are made up of
floating-point numbers.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'flexible',
@@ -8023,13 +6665,13 @@ add_newdoc('numpy.core.numerictypes', 'flexible',
Abstract base class of all scalar types without predefined length.
The actual size of these types depends on the specific `np.dtype`
instantiation.
-
+
""")
add_newdoc('numpy.core.numerictypes', 'character',
"""
Abstract base class of all character string scalar types.
-
+
""")
@@ -8193,3 +6835,22 @@ add_newdoc_for_scalar_type('object_', [],
"""
Any Python object.
""")
+
+# TODO: work out how to put this on the base class, np.floating
+for float_name in ('half', 'single', 'double', 'longdouble'):
+ add_newdoc('numpy.core.numerictypes', float_name, ('as_integer_ratio',
+ """
+ {ftype}.as_integer_ratio() -> (int, int)
+
+ Return a pair of integers, whose ratio is exactly equal to the original
+ floating point number, and with a positive denominator.
+ Raise OverflowError on infinities and a ValueError on NaNs.
+
+ >>> np.{ftype}(10.0).as_integer_ratio()
+ (10, 1)
+ >>> np.{ftype}(0.0).as_integer_ratio()
+ (0, 1)
+ >>> np.{ftype}(-.25).as_integer_ratio()
+ (-1, 4)
+ """.format(ftype=float_name)))
+
diff --git a/numpy/core/_asarray.py b/numpy/core/_asarray.py
new file mode 100644
index 000000000..0ad4161f4
--- /dev/null
+++ b/numpy/core/_asarray.py
@@ -0,0 +1,324 @@
+"""
+Functions in the ``as*array`` family that promote array-likes into arrays.
+
+`require` fits this category despite its name not matching this pattern.
+"""
+from __future__ import division, absolute_import, print_function
+
+from .overrides import set_module
+from .multiarray import array
+
+
+__all__ = [
+ "asarray", "asanyarray", "ascontiguousarray", "asfortranarray", "require",
+]
+
+@set_module('numpy')
+def asarray(a, dtype=None, order=None):
+ """Convert the input to an array.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes lists, lists of tuples, tuples, tuples of tuples, tuples
+ of lists and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F'}, optional
+ Whether to use row-major (C-style) or
+ column-major (Fortran-style) memory representation.
+ Defaults to 'C'.
+
+ Returns
+ -------
+ out : ndarray
+ Array interpretation of `a`. No copy is performed if the input
+ is already an ndarray with matching dtype and order. If `a` is a
+ subclass of ndarray, a base class ndarray is returned.
+
+ See Also
+ --------
+ asanyarray : Similar function which passes through subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> np.asarray(a)
+ array([1, 2])
+
+ Existing arrays are not copied:
+
+ >>> a = np.array([1, 2])
+ >>> np.asarray(a) is a
+ True
+
+ If `dtype` is set, array is copied only if dtype does not match:
+
+ >>> a = np.array([1, 2], dtype=np.float32)
+ >>> np.asarray(a, dtype=np.float32) is a
+ True
+ >>> np.asarray(a, dtype=np.float64) is a
+ False
+
+ Contrary to `asanyarray`, ndarray subclasses are not passed through:
+
+ >>> issubclass(np.recarray, np.ndarray)
+ True
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asarray(a) is a
+ False
+ >>> np.asanyarray(a) is a
+ True
+
+ """
+ return array(a, dtype, copy=False, order=order)
+
+
+@set_module('numpy')
+def asanyarray(a, dtype=None, order=None):
+ """Convert the input to an ndarray, but pass ndarray subclasses through.
+
+ Parameters
+ ----------
+ a : array_like
+ Input data, in any form that can be converted to an array. This
+ includes scalars, lists, lists of tuples, tuples, tuples of tuples,
+ tuples of lists, and ndarrays.
+ dtype : data-type, optional
+ By default, the data-type is inferred from the input data.
+ order : {'C', 'F'}, optional
+ Whether to use row-major (C-style) or column-major
+ (Fortran-style) memory representation. Defaults to 'C'.
+
+ Returns
+ -------
+ out : ndarray or an ndarray subclass
+ Array interpretation of `a`. If `a` is an ndarray or a subclass
+ of ndarray, it is returned as-is and no copy is performed.
+
+ See Also
+ --------
+ asarray : Similar function which always returns ndarrays.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfarray : Convert input to a floating point ndarray.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ asarray_chkfinite : Similar function which checks input for NaNs and
+ Infs.
+ fromiter : Create an array from an iterator.
+ fromfunction : Construct an array by executing a function on grid
+ positions.
+
+ Examples
+ --------
+ Convert a list into an array:
+
+ >>> a = [1, 2]
+ >>> np.asanyarray(a)
+ array([1, 2])
+
+ Instances of `ndarray` subclasses are passed through as-is:
+
+ >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
+ >>> np.asanyarray(a) is a
+ True
+
+ """
+ return array(a, dtype, copy=False, order=order, subok=True)
+
+
+@set_module('numpy')
+def ascontiguousarray(a, dtype=None):
+ """
+ Return a contiguous array (ndim >= 1) in memory (C order).
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ Data-type of returned array.
+
+ Returns
+ -------
+ out : ndarray
+ Contiguous array of same shape and content as `a`, with type `dtype`
+ if specified.
+
+ See Also
+ --------
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> np.ascontiguousarray(x, dtype=np.float32)
+ array([[0., 1., 2.],
+ [3., 4., 5.]], dtype=float32)
+ >>> x.flags['C_CONTIGUOUS']
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """
+ return array(a, dtype, copy=False, order='C', ndmin=1)
+
+
+@set_module('numpy')
+def asfortranarray(a, dtype=None):
+ """
+ Return an array (ndim >= 1) laid out in Fortran order in memory.
+
+ Parameters
+ ----------
+ a : array_like
+ Input array.
+ dtype : str or dtype object, optional
+ By default, the data-type is inferred from the input data.
+
+ Returns
+ -------
+ out : ndarray
+ The input `a` in Fortran, or column-major, order.
+
+ See Also
+ --------
+ ascontiguousarray : Convert input to a contiguous (C order) array.
+ asanyarray : Convert input to an ndarray with either row or
+ column-major memory order.
+ require : Return an ndarray that satisfies requirements.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> y = np.asfortranarray(x)
+ >>> x.flags['F_CONTIGUOUS']
+ False
+ >>> y.flags['F_CONTIGUOUS']
+ True
+
+ Note: This function returns an array with at least one-dimension (1-d)
+ so it will not preserve 0-d arrays.
+
+ """
+ return array(a, dtype, copy=False, order='F', ndmin=1)
+
+
+@set_module('numpy')
+def require(a, dtype=None, requirements=None):
+ """
+ Return an ndarray of the provided type that satisfies requirements.
+
+ This function is useful to be sure that an array with the correct flags
+ is returned for passing to compiled code (perhaps through ctypes).
+
+ Parameters
+ ----------
+ a : array_like
+ The object to be converted to a type-and-requirement-satisfying array.
+ dtype : data-type
+ The required data-type. If None preserve the current dtype. If your
+ application requires the data to be in native byteorder, include
+ a byteorder specification as a part of the dtype specification.
+ requirements : str or list of str
+ The requirements list can be any of the following
+
+ * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
+ * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
+ * 'ALIGNED' ('A') - ensure a data-type aligned array
+ * 'WRITEABLE' ('W') - ensure a writable array
+ * 'OWNDATA' ('O') - ensure an array that owns its own data
+ * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
+
+ Returns
+ -------
+ out : ndarray
+ Array with specified requirements and type if given.
+
+ See Also
+ --------
+ asarray : Convert input to an ndarray.
+ asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
+ ascontiguousarray : Convert input to a contiguous array.
+ asfortranarray : Convert input to an ndarray with column-major
+ memory order.
+ ndarray.flags : Information about the memory layout of the array.
+
+ Notes
+ -----
+ The returned array will be guaranteed to have the listed requirements
+ by making a copy if needed.
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2,3)
+ >>> x.flags
+ C_CONTIGUOUS : True
+ F_CONTIGUOUS : False
+ OWNDATA : False
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ UPDATEIFCOPY : False
+
+ >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
+ >>> y.flags
+ C_CONTIGUOUS : False
+ F_CONTIGUOUS : True
+ OWNDATA : True
+ WRITEABLE : True
+ ALIGNED : True
+ WRITEBACKIFCOPY : False
+ UPDATEIFCOPY : False
+
+ """
+ possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
+ 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
+ 'A': 'A', 'ALIGNED': 'A',
+ 'W': 'W', 'WRITEABLE': 'W',
+ 'O': 'O', 'OWNDATA': 'O',
+ 'E': 'E', 'ENSUREARRAY': 'E'}
+ if not requirements:
+ return asanyarray(a, dtype=dtype)
+ else:
+ requirements = {possible_flags[x.upper()] for x in requirements}
+
+ if 'E' in requirements:
+ requirements.remove('E')
+ subok = False
+ else:
+ subok = True
+
+ order = 'A'
+ if requirements >= {'C', 'F'}:
+ raise ValueError('Cannot specify both "C" and "F" order')
+ elif 'F' in requirements:
+ order = 'F'
+ requirements.remove('F')
+ elif 'C' in requirements:
+ order = 'C'
+ requirements.remove('C')
+
+ arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
+
+ for prop in requirements:
+ if not arr.flags[prop]:
+ arr = arr.copy(order)
+ break
+ return arr
diff --git a/numpy/core/_dtype.py b/numpy/core/_dtype.py
index 26c44eaaf..df1ff180e 100644
--- a/numpy/core/_dtype.py
+++ b/numpy/core/_dtype.py
@@ -5,9 +5,44 @@ String handling is much easier to do correctly in python.
"""
from __future__ import division, absolute_import, print_function
+import sys
+
import numpy as np
+_kind_to_stem = {
+ 'u': 'uint',
+ 'i': 'int',
+ 'c': 'complex',
+ 'f': 'float',
+ 'b': 'bool',
+ 'V': 'void',
+ 'O': 'object',
+ 'M': 'datetime',
+ 'm': 'timedelta'
+}
+if sys.version_info[0] >= 3:
+ _kind_to_stem.update({
+ 'S': 'bytes',
+ 'U': 'str'
+ })
+else:
+ _kind_to_stem.update({
+ 'S': 'string',
+ 'U': 'unicode'
+ })
+
+
+def _kind_name(dtype):
+ try:
+ return _kind_to_stem[dtype.kind]
+ except KeyError:
+ raise RuntimeError(
+ "internal dtype error, unknown kind {!r}"
+ .format(dtype.kind)
+ )
+
+
def __str__(dtype):
if dtype.fields is not None:
return _struct_str(dtype, include_align=True)
@@ -103,7 +138,9 @@ def _scalar_str(dtype, short):
else:
return "'%sU%d'" % (byteorder, dtype.itemsize / 4)
- elif dtype.type == np.void:
+ # unlike the other types, subclasses of void are preserved - but
+ # historically the repr does not actually reveal the subclass
+ elif issubclass(dtype.type, np.void):
if _isunsized(dtype):
return "'V'"
else:
@@ -122,20 +159,7 @@ def _scalar_str(dtype, short):
# Longer repr, like 'float64'
else:
- kindstrs = {
- 'u': "uint",
- 'i': "int",
- 'f': "float",
- 'c': "complex"
- }
- try:
- kindstr = kindstrs[dtype.kind]
- except KeyError:
- raise RuntimeError(
- "internal dtype repr error, unknown kind {!r}"
- .format(dtype.kind)
- )
- return "'%s%d'" % (kindstr, 8*dtype.itemsize)
+ return "'%s%d'" % (_kind_name(dtype), 8*dtype.itemsize)
elif dtype.isbuiltin == 2:
return dtype.type.__name__
@@ -228,7 +252,7 @@ def _is_packed(dtype):
from a list of the field names and dtypes with no additional
dtype parameters.
- Duplicates the C `is_dtype_struct_simple_unaligned_layout` functio.
+ Duplicates the C `is_dtype_struct_simple_unaligned_layout` function.
"""
total_offset = 0
for name in dtype.names:
@@ -292,26 +316,39 @@ def _subarray_str(dtype):
)
+def _name_includes_bit_suffix(dtype):
+ if dtype.type == np.object_:
+ # pointer size varies by system, best to omit it
+ return False
+ elif dtype.type == np.bool_:
+ # implied
+ return False
+ elif np.issubdtype(dtype, np.flexible) and _isunsized(dtype):
+ # unspecified
+ return False
+ else:
+ return True
+
+
def _name_get(dtype):
- # provides dtype.name.__get__
+ # provides dtype.name.__get__, documented as returning a "bit name"
if dtype.isbuiltin == 2:
# user dtypes don't promise to do anything special
return dtype.type.__name__
- # Builtin classes are documented as returning a "bit name"
- name = dtype.type.__name__
-
- # handle bool_, str_, etc
- if name[-1] == '_':
- name = name[:-1]
+ if issubclass(dtype.type, np.void):
+ # historically, void subclasses preserve their name, eg `record64`
+ name = dtype.type.__name__
+ else:
+ name = _kind_name(dtype)
- # append bit counts to str, unicode, and void
- if np.issubdtype(dtype, np.flexible) and not _isunsized(dtype):
+ # append bit counts
+ if _name_includes_bit_suffix(dtype):
name += "{}".format(dtype.itemsize * 8)
# append metadata to datetimes
- elif dtype.type in (np.datetime64, np.timedelta64):
+ if dtype.type in (np.datetime64, np.timedelta64):
name += _datetime_metadata_str(dtype)
return name
diff --git a/numpy/core/_dtype_ctypes.py b/numpy/core/_dtype_ctypes.py
new file mode 100644
index 000000000..708241289
--- /dev/null
+++ b/numpy/core/_dtype_ctypes.py
@@ -0,0 +1,113 @@
+"""
+Conversion from ctypes to dtype.
+
+In an ideal world, we could achieve this through the PEP3118 buffer protocol,
+something like::
+
+ def dtype_from_ctypes_type(t):
+ # needed to ensure that the shape of `t` is within memoryview.format
+ class DummyStruct(ctypes.Structure):
+ _fields_ = [('a', t)]
+
+ # empty to avoid memory allocation
+ ctype_0 = (DummyStruct * 0)()
+ mv = memoryview(ctype_0)
+
+ # convert the struct, and slice back out the field
+ return _dtype_from_pep3118(mv.format)['a']
+
+Unfortunately, this fails because:
+
+* ctypes cannot handle length-0 arrays with PEP3118 (bpo-32782)
+* PEP3118 cannot represent unions, but both numpy and ctypes can
+* ctypes cannot handle big-endian structs with PEP3118 (bpo-32780)
+"""
+import _ctypes
+import ctypes
+
+import numpy as np
+
+
+def _from_ctypes_array(t):
+ return np.dtype((dtype_from_ctypes_type(t._type_), (t._length_,)))
+
+
+def _from_ctypes_structure(t):
+ for item in t._fields_:
+ if len(item) > 2:
+ raise TypeError(
+ "ctypes bitfields have no dtype equivalent")
+
+ if hasattr(t, "_pack_"):
+ formats = []
+ offsets = []
+ names = []
+ current_offset = 0
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ # Each type has a default offset, this is platform dependent for some types.
+ effective_pack = min(t._pack_, ctypes.alignment(ftyp))
+ current_offset = ((current_offset + effective_pack - 1) // effective_pack) * effective_pack
+ offsets.append(current_offset)
+ current_offset += ctypes.sizeof(ftyp)
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+ else:
+ fields = []
+ for fname, ftyp in t._fields_:
+ fields.append((fname, dtype_from_ctypes_type(ftyp)))
+
+ # by default, ctypes structs are aligned
+ return np.dtype(fields, align=True)
+
+
+def _from_ctypes_scalar(t):
+ """
+ Return the dtype type with endianness included if it's the case
+ """
+ if getattr(t, '__ctype_be__', None) is t:
+ return np.dtype('>' + t._type_)
+ elif getattr(t, '__ctype_le__', None) is t:
+ return np.dtype('<' + t._type_)
+ else:
+ return np.dtype(t._type_)
+
+
+def _from_ctypes_union(t):
+ formats = []
+ offsets = []
+ names = []
+ for fname, ftyp in t._fields_:
+ names.append(fname)
+ formats.append(dtype_from_ctypes_type(ftyp))
+ offsets.append(0) # Union fields are offset to 0
+
+ return np.dtype(dict(
+ formats=formats,
+ offsets=offsets,
+ names=names,
+ itemsize=ctypes.sizeof(t)))
+
+
+def dtype_from_ctypes_type(t):
+ """
+ Construct a dtype object from a ctypes type
+ """
+ if issubclass(t, _ctypes.Array):
+ return _from_ctypes_array(t)
+ elif issubclass(t, _ctypes._Pointer):
+ raise TypeError("ctypes pointers have no dtype equivalent")
+ elif issubclass(t, _ctypes.Structure):
+ return _from_ctypes_structure(t)
+ elif issubclass(t, _ctypes.Union):
+ return _from_ctypes_union(t)
+ elif isinstance(getattr(t, '_type_', None), str):
+ return _from_ctypes_scalar(t)
+ else:
+ raise NotImplementedError(
+ "Unknown ctypes type {}".format(t.__name__))
diff --git a/numpy/core/_exceptions.py b/numpy/core/_exceptions.py
new file mode 100644
index 000000000..88a45561f
--- /dev/null
+++ b/numpy/core/_exceptions.py
@@ -0,0 +1,200 @@
+"""
+Various richly-typed exceptions, that also help us deal with string formatting
+in python where it's easier.
+
+By putting the formatting in `__str__`, we also avoid paying the cost for
+users who silence the exceptions.
+"""
+from numpy.core.overrides import set_module
+
+def _unpack_tuple(tup):
+ if len(tup) == 1:
+ return tup[0]
+ else:
+ return tup
+
+
+def _display_as_base(cls):
+ """
+ A decorator that makes an exception class look like its base.
+
+ We use this to hide subclasses that are implementation details - the user
+ should catch the base type, which is what the traceback will show them.
+
+ Classes decorated with this decorator are subject to removal without a
+ deprecation warning.
+ """
+ assert issubclass(cls, Exception)
+ cls.__name__ = cls.__base__.__name__
+ cls.__qualname__ = cls.__base__.__qualname__
+ set_module(cls.__base__.__module__)(cls)
+ return cls
+
+
+class UFuncTypeError(TypeError):
+ """ Base class for all ufunc exceptions """
+ def __init__(self, ufunc):
+ self.ufunc = ufunc
+
+
+@_display_as_base
+class _UFuncBinaryResolutionError(UFuncTypeError):
+ """ Thrown when a binary resolution fails """
+ def __init__(self, ufunc, dtypes):
+ super().__init__(ufunc)
+ self.dtypes = tuple(dtypes)
+ assert len(self.dtypes) == 2
+
+ def __str__(self):
+ return (
+ "ufunc {!r} cannot use operands with types {!r} and {!r}"
+ ).format(
+ self.ufunc.__name__, *self.dtypes
+ )
+
+
+@_display_as_base
+class _UFuncNoLoopError(UFuncTypeError):
+ """ Thrown when a ufunc loop cannot be found """
+ def __init__(self, ufunc, dtypes):
+ super().__init__(ufunc)
+ self.dtypes = tuple(dtypes)
+
+ def __str__(self):
+ return (
+ "ufunc {!r} did not contain a loop with signature matching types "
+ "{!r} -> {!r}"
+ ).format(
+ self.ufunc.__name__,
+ _unpack_tuple(self.dtypes[:self.ufunc.nin]),
+ _unpack_tuple(self.dtypes[self.ufunc.nin:])
+ )
+
+
+@_display_as_base
+class _UFuncCastingError(UFuncTypeError):
+ def __init__(self, ufunc, casting, from_, to):
+ super().__init__(ufunc)
+ self.casting = casting
+ self.from_ = from_
+ self.to = to
+
+
+@_display_as_base
+class _UFuncInputCastingError(_UFuncCastingError):
+ """ Thrown when a ufunc input cannot be casted """
+ def __init__(self, ufunc, casting, from_, to, i):
+ super().__init__(ufunc, casting, from_, to)
+ self.in_i = i
+
+ def __str__(self):
+ # only show the number if more than one input exists
+ i_str = "{} ".format(self.in_i) if self.ufunc.nin != 1 else ""
+ return (
+ "Cannot cast ufunc {!r} input {}from {!r} to {!r} with casting "
+ "rule {!r}"
+ ).format(
+ self.ufunc.__name__, i_str, self.from_, self.to, self.casting
+ )
+
+
+@_display_as_base
+class _UFuncOutputCastingError(_UFuncCastingError):
+ """ Thrown when a ufunc output cannot be casted """
+ def __init__(self, ufunc, casting, from_, to, i):
+ super().__init__(ufunc, casting, from_, to)
+ self.out_i = i
+
+ def __str__(self):
+ # only show the number if more than one output exists
+ i_str = "{} ".format(self.out_i) if self.ufunc.nout != 1 else ""
+ return (
+ "Cannot cast ufunc {!r} output {}from {!r} to {!r} with casting "
+ "rule {!r}"
+ ).format(
+ self.ufunc.__name__, i_str, self.from_, self.to, self.casting
+ )
+
+
+# Exception used in shares_memory()
+@set_module('numpy')
+class TooHardError(RuntimeError):
+ pass
+
+
+@set_module('numpy')
+class AxisError(ValueError, IndexError):
+ """ Axis supplied was invalid. """
+ def __init__(self, axis, ndim=None, msg_prefix=None):
+ # single-argument form just delegates to base class
+ if ndim is None and msg_prefix is None:
+ msg = axis
+
+ # do the string formatting here, to save work in the C code
+ else:
+ msg = ("axis {} is out of bounds for array of dimension {}"
+ .format(axis, ndim))
+ if msg_prefix is not None:
+ msg = "{}: {}".format(msg_prefix, msg)
+
+ super(AxisError, self).__init__(msg)
+
+
+@_display_as_base
+class _ArrayMemoryError(MemoryError):
+ """ Thrown when an array cannot be allocated"""
+ def __init__(self, shape, dtype):
+ self.shape = shape
+ self.dtype = dtype
+
+ @property
+ def _total_size(self):
+ num_bytes = self.dtype.itemsize
+ for dim in self.shape:
+ num_bytes *= dim
+ return num_bytes
+
+ @staticmethod
+ def _size_to_string(num_bytes):
+ """ Convert a number of bytes into a binary size string """
+ import math
+
+ # https://en.wikipedia.org/wiki/Binary_prefix
+ LOG2_STEP = 10
+ STEP = 1024
+ units = ['bytes', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB']
+
+ unit_i = max(num_bytes.bit_length() - 1, 1) // LOG2_STEP
+ unit_val = 1 << (unit_i * LOG2_STEP)
+ n_units = num_bytes / unit_val
+ del unit_val
+
+ # ensure we pick a unit that is correct after rounding
+ if round(n_units) == STEP:
+ unit_i += 1
+ n_units /= STEP
+
+ # deal with sizes so large that we don't have units for them
+ if unit_i >= len(units):
+ new_unit_i = len(units) - 1
+ n_units *= 1 << ((unit_i - new_unit_i) * LOG2_STEP)
+ unit_i = new_unit_i
+
+ unit_name = units[unit_i]
+ # format with a sensible number of digits
+ if unit_i == 0:
+ # no decimal point on bytes
+ return '{:.0f} {}'.format(n_units, unit_name)
+ elif round(n_units) < 1000:
+ # 3 significant figures, if none are dropped to the left of the .
+ return '{:#.3g} {}'.format(n_units, unit_name)
+ else:
+ # just give all the digits otherwise
+ return '{:#.0f} {}'.format(n_units, unit_name)
+
+ def __str__(self):
+ size_str = self._size_to_string(self._total_size)
+ return (
+ "Unable to allocate {} for an array with shape {} and data type {}"
+ .format(size_str, self.shape, self.dtype)
+ )
diff --git a/numpy/core/_internal.py b/numpy/core/_internal.py
index c4d967dc2..b0ea603e1 100644
--- a/numpy/core/_internal.py
+++ b/numpy/core/_internal.py
@@ -8,6 +8,7 @@ from __future__ import division, absolute_import, print_function
import re
import sys
+import platform
from numpy.compat import unicode
from .multiarray import dtype, array, ndarray
@@ -16,6 +17,8 @@ try:
except ImportError:
ctypes = None
+IS_PYPY = platform.python_implementation() == 'PyPy'
+
if (sys.byteorder == 'little'):
_nbo = b'<'
else:
@@ -143,7 +146,7 @@ def _reconstruct(subtype, shape, dtype):
# format_re was originally from numarray by J. Todd Miller
format_re = re.compile(br'(?P<order1>[<>|=]?)'
- br'(?P<repeats> *[(]?[ ,0-9L]*[)]? *)'
+ br'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
br'(?P<order2>[<>|=]?)'
br'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
sep_re = re.compile(br'\s*,\s*')
@@ -237,19 +240,68 @@ _getintp_ctype.cache = None
class _missing_ctypes(object):
def cast(self, num, obj):
- return num
+ return num.value
+
+ class c_void_p(object):
+ def __init__(self, ptr):
+ self.value = ptr
+
+
+class _unsafe_first_element_pointer(object):
+ """
+ Helper to allow viewing an array as a ctypes pointer to the first element
+
+ This avoids:
+ * dealing with strides
+ * `.view` rejecting object-containing arrays
+ * `memoryview` not supporting overlapping fields
+ """
+ def __init__(self, arr):
+ self.base = arr
+
+ @property
+ def __array_interface__(self):
+ i = dict(
+ shape=(),
+ typestr='|V0',
+ data=(self.base.__array_interface__['data'][0], False),
+ strides=(),
+ version=3,
+ )
+ return i
+
+
+def _get_void_ptr(arr):
+ """
+ Get a `ctypes.c_void_p` to arr.data, that keeps a reference to the array
+ """
+ import numpy as np
+ # convert to a 0d array that has a data pointer referrign to the start
+ # of arr. This holds a reference to arr.
+ simple_arr = np.asarray(_unsafe_first_element_pointer(arr))
+
+ # create a `char[0]` using the same memory.
+ c_arr = (ctypes.c_char * 0).from_buffer(simple_arr)
+
+ # finally cast to void*
+ return ctypes.cast(ctypes.pointer(c_arr), ctypes.c_void_p)
- def c_void_p(self, num):
- return num
class _ctypes(object):
def __init__(self, array, ptr=None):
+ self._arr = array
+
if ctypes:
self._ctypes = ctypes
+ # get a void pointer to the buffer, which keeps the array alive
+ self._data = _get_void_ptr(array)
+ assert self._data.value == ptr
else:
+ # fake a pointer-like object that holds onto the reference
self._ctypes = _missing_ctypes()
- self._arr = array
- self._data = ptr
+ self._data = self._ctypes.c_void_p(ptr)
+ self._data._objects = array
+
if self._arr.ndim == 0:
self._zerod = True
else:
@@ -262,6 +314,8 @@ class _ctypes(object):
``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
pointer to a ctypes array of floating-point data:
``self.data_as(ctypes.POINTER(ctypes.c_double))``.
+
+ The returned pointer will keep a reference to the array.
"""
return self._ctypes.cast(self._data, obj)
@@ -283,7 +337,8 @@ class _ctypes(object):
return None
return (obj*self._arr.ndim)(*self._arr.strides)
- def get_data(self):
+ @property
+ def data(self):
"""
A pointer to the memory area of the array as a Python integer.
This memory area may contain data that is not aligned, or not in correct
@@ -292,10 +347,16 @@ class _ctypes(object):
attribute to arbitrary C-code to avoid trouble that can include Python
crashing. User Beware! The value of this attribute is exactly the same
as ``self._array_interface_['data'][0]``.
+
+ Note that unlike `data_as`, a reference will not be kept to the array:
+ code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
+ pointer to a deallocated array, and should be spelt
+ ``(a + b).ctypes.data_as(ctypes.c_void_p)``
"""
- return self._data
+ return self._data.value
- def get_shape(self):
+ @property
+ def shape(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the C-integer corresponding to ``dtype('p')`` on this
@@ -306,7 +367,8 @@ class _ctypes(object):
"""
return self.shape_as(_getintp_ctype())
- def get_strides(self):
+ @property
+ def strides(self):
"""
(c_intp*self.ndim): A ctypes array of length self.ndim where
the basetype is the same as for the shape attribute. This ctypes array
@@ -316,13 +378,20 @@ class _ctypes(object):
"""
return self.strides_as(_getintp_ctype())
- def get_as_parameter(self):
- return self._ctypes.c_void_p(self._data)
+ @property
+ def _as_parameter_(self):
+ """
+ Overrides the ctypes semi-magic method
- data = property(get_data)
- shape = property(get_shape)
- strides = property(get_strides)
- _as_parameter_ = property(get_as_parameter, None, doc="_as parameter_")
+ Enables `c_func(some_array.ctypes)`
+ """
+ return self._data
+
+ # kept for compatibility
+ get_data = data.fget
+ get_shape = shape.fget
+ get_strides = strides.fget
+ get_as_parameter = _as_parameter_.fget
def _newnames(datatype, order):
@@ -390,7 +459,7 @@ def _getfield_is_safe(oldtype, newtype, offset):
if newtype.hasobject or oldtype.hasobject:
if offset == 0 and newtype == oldtype:
return
- if oldtype.names:
+ if oldtype.names is not None:
for name in oldtype.names:
if (oldtype.fields[name][1] == offset and
oldtype.fields[name][0] == newtype):
@@ -482,6 +551,12 @@ _pep3118_standard_map = {
}
_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
+_pep3118_unsupported_map = {
+ 'u': 'UCS-2 strings',
+ '&': 'pointers',
+ 't': 'bitfields',
+ 'X': 'function pointers',
+}
class _Stream(object):
def __init__(self, s):
@@ -593,6 +668,11 @@ def __dtype_from_pep3118(stream, is_subdtype):
stream.byteorder, stream.byteorder)
value = dtype(numpy_byteorder + dtypechar)
align = value.alignment
+ elif stream.next in _pep3118_unsupported_map:
+ desc = _pep3118_unsupported_map[stream.next]
+ raise NotImplementedError(
+ "Unrepresentable PEP 3118 data type {!r} ({})"
+ .format(stream.next, desc))
else:
raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
@@ -717,27 +797,6 @@ def _gcd(a, b):
def _lcm(a, b):
return a // _gcd(a, b) * b
-# Exception used in shares_memory()
-class TooHardError(RuntimeError):
- pass
-
-class AxisError(ValueError, IndexError):
- """ Axis supplied was invalid. """
- def __init__(self, axis, ndim=None, msg_prefix=None):
- # single-argument form just delegates to base class
- if ndim is None and msg_prefix is None:
- msg = axis
-
- # do the string formatting here, to save work in the C code
- else:
- msg = ("axis {} is out of bounds for array of dimension {}"
- .format(axis, ndim))
- if msg_prefix is not None:
- msg = "{}: {}".format(msg_prefix, msg)
-
- super(AxisError, self).__init__(msg)
-
-
def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
""" Format the error message for when __array_ufunc__ gives up. """
args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
@@ -750,6 +809,13 @@ def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
.format(ufunc, method, args_string, types_string))
+def array_function_errmsg_formatter(public_api, types):
+ """ Format the error message for when __array_ufunc__ gives up. """
+ func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
+ return ("no implementation found for '{}' on types that implement "
+ '__array_function__: {}'.format(func_name, list(types)))
+
+
def _ufunc_doc_signature_formatter(ufunc):
"""
Builds a signature string which resembles PEP 457
@@ -796,13 +862,18 @@ def _ufunc_doc_signature_formatter(ufunc):
)
-def _is_from_ctypes(obj):
- # determine if an object comes from ctypes, in order to work around
+def npy_ctypes_check(cls):
+ # determine if a class comes from ctypes, in order to work around
# a bug in the buffer protocol for those objects, bpo-10746
try:
# ctypes class are new-style, so have an __mro__. This probably fails
# for ctypes classes with multiple inheritance.
- ctype_base = type(obj).__mro__[-2]
+ if IS_PYPY:
+ # (..., _ctypes.basics._CData, Bufferable, object)
+ ctype_base = cls.__mro__[-3]
+ else:
+ # # (..., _ctypes._CData, object)
+ ctype_base = cls.__mro__[-2]
# right now, they're part of the _ctypes module
return 'ctypes' in ctype_base.__module__
except Exception:
diff --git a/numpy/core/_methods.py b/numpy/core/_methods.py
index 8974f0ce1..269e509b8 100644
--- a/numpy/core/_methods.py
+++ b/numpy/core/_methods.py
@@ -9,9 +9,11 @@ import warnings
from numpy.core import multiarray as mu
from numpy.core import umath as um
-from numpy.core.numeric import asanyarray
+from numpy.core._asarray import asanyarray
from numpy.core import numerictypes as nt
+from numpy.core import _exceptions
from numpy._globals import _NoValue
+from numpy.compat import pickle, os_fspath, contextlib_nullcontext
# save those O(100) nanoseconds!
umr_maximum = um.maximum.reduce
@@ -24,20 +26,20 @@ umr_all = um.logical_and.reduce
# avoid keyword arguments to speed up parsing, saves about 15%-20% for very
# small reductions
def _amax(a, axis=None, out=None, keepdims=False,
- initial=_NoValue):
- return umr_maximum(a, axis, None, out, keepdims, initial)
+ initial=_NoValue, where=True):
+ return umr_maximum(a, axis, None, out, keepdims, initial, where)
def _amin(a, axis=None, out=None, keepdims=False,
- initial=_NoValue):
- return umr_minimum(a, axis, None, out, keepdims, initial)
+ initial=_NoValue, where=True):
+ return umr_minimum(a, axis, None, out, keepdims, initial, where)
def _sum(a, axis=None, dtype=None, out=None, keepdims=False,
- initial=_NoValue):
- return umr_sum(a, axis, dtype, out, keepdims, initial)
+ initial=_NoValue, where=True):
+ return umr_sum(a, axis, dtype, out, keepdims, initial, where)
def _prod(a, axis=None, dtype=None, out=None, keepdims=False,
- initial=_NoValue):
- return umr_prod(a, axis, dtype, out, keepdims, initial)
+ initial=_NoValue, where=True):
+ return umr_prod(a, axis, dtype, out, keepdims, initial, where)
def _any(a, axis=None, dtype=None, out=None, keepdims=False):
return umr_any(a, axis, dtype, out, keepdims)
@@ -55,6 +57,80 @@ def _count_reduce_items(arr, axis):
items *= arr.shape[ax]
return items
+# Numpy 1.17.0, 2019-02-24
+# Various clip behavior deprecations, marked with _clip_dep as a prefix.
+
+def _clip_dep_is_scalar_nan(a):
+ # guarded to protect circular imports
+ from numpy.core.fromnumeric import ndim
+ if ndim(a) != 0:
+ return False
+ try:
+ return um.isnan(a)
+ except TypeError:
+ return False
+
+def _clip_dep_is_byte_swapped(a):
+ if isinstance(a, mu.ndarray):
+ return not a.dtype.isnative
+ return False
+
+def _clip_dep_invoke_with_casting(ufunc, *args, out=None, casting=None, **kwargs):
+ # normal path
+ if casting is not None:
+ return ufunc(*args, out=out, casting=casting, **kwargs)
+
+ # try to deal with broken casting rules
+ try:
+ return ufunc(*args, out=out, **kwargs)
+ except _exceptions._UFuncOutputCastingError as e:
+ # Numpy 1.17.0, 2019-02-24
+ warnings.warn(
+ "Converting the output of clip from {!r} to {!r} is deprecated. "
+ "Pass `casting=\"unsafe\"` explicitly to silence this warning, or "
+ "correct the type of the variables.".format(e.from_, e.to),
+ DeprecationWarning,
+ stacklevel=2
+ )
+ return ufunc(*args, out=out, casting="unsafe", **kwargs)
+
+def _clip(a, min=None, max=None, out=None, *, casting=None, **kwargs):
+ if min is None and max is None:
+ raise ValueError("One of max or min must be given")
+
+ # Numpy 1.17.0, 2019-02-24
+ # This deprecation probably incurs a substantial slowdown for small arrays,
+ # it will be good to get rid of it.
+ if not _clip_dep_is_byte_swapped(a) and not _clip_dep_is_byte_swapped(out):
+ using_deprecated_nan = False
+ if _clip_dep_is_scalar_nan(min):
+ min = -float('inf')
+ using_deprecated_nan = True
+ if _clip_dep_is_scalar_nan(max):
+ max = float('inf')
+ using_deprecated_nan = True
+ if using_deprecated_nan:
+ warnings.warn(
+ "Passing `np.nan` to mean no clipping in np.clip has always "
+ "been unreliable, and is now deprecated. "
+ "In future, this will always return nan, like it already does "
+ "when min or max are arrays that contain nan. "
+ "To skip a bound, pass either None or an np.inf of an "
+ "appropriate sign.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+
+ if min is None:
+ return _clip_dep_invoke_with_casting(
+ um.minimum, a, max, out=out, casting=casting, **kwargs)
+ elif max is None:
+ return _clip_dep_invoke_with_casting(
+ um.maximum, a, min, out=out, casting=casting, **kwargs)
+ else:
+ return _clip_dep_invoke_with_casting(
+ um.clip, a, min, max, out=out, casting=casting, **kwargs)
+
def _mean(a, axis=None, dtype=None, out=None, keepdims=False):
arr = asanyarray(a)
@@ -115,10 +191,11 @@ def _var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
# Note that x may not be inexact and that we need it to be an array,
# not a scalar.
x = asanyarray(arr - arrmean)
- if issubclass(arr.dtype.type, nt.complexfloating):
- x = um.multiply(x, um.conjugate(x), out=x).real
- else:
+ if issubclass(arr.dtype.type, (nt.floating, nt.integer)):
x = um.multiply(x, x, out=x)
+ else:
+ x = um.multiply(x, um.conjugate(x), out=x).real
+
ret = umr_sum(x, axis, dtype, out, keepdims)
# Compute degrees of freedom and make sure it is not negative.
@@ -155,17 +232,13 @@ def _ptp(a, axis=None, out=None, keepdims=False):
out
)
-_NDARRAY_ARRAY_FUNCTION = mu.ndarray.__array_function__
-
-def _array_function(self, func, types, args, kwargs):
- # TODO: rewrite this in C
- # Cannot handle items that have __array_function__ other than our own.
- for t in types:
- if t is not mu.ndarray:
- method = getattr(t, '__array_function__', _NDARRAY_ARRAY_FUNCTION)
- if method is not _NDARRAY_ARRAY_FUNCTION:
- return NotImplemented
+def _dump(self, file, protocol=2):
+ if hasattr(file, 'write'):
+ ctx = contextlib_nullcontext(file)
+ else:
+ ctx = open(os_fspath(file), "wb")
+ with ctx as f:
+ pickle.dump(self, f, protocol=protocol)
- # Arguments contain no overrides, so we can safely call the
- # overloaded function again.
- return func(*args, **kwargs)
+def _dumps(self, protocol=2):
+ return pickle.dumps(self, protocol=protocol)
diff --git a/numpy/core/_type_aliases.py b/numpy/core/_type_aliases.py
index 8d629aa07..d6e1a1fb7 100644
--- a/numpy/core/_type_aliases.py
+++ b/numpy/core/_type_aliases.py
@@ -29,6 +29,7 @@ from numpy.compat import unicode
from numpy._globals import VisibleDeprecationWarning
from numpy.core._string_helpers import english_lower, english_capitalize
from numpy.core.multiarray import typeinfo, dtype
+from numpy.core._dtype import _kind_name
sctypeDict = {} # Contains all leaf-node scalar types with aliases
@@ -59,29 +60,7 @@ for k, v in typeinfo.items():
else:
_concrete_typeinfo[k] = v
-_concrete_types = set(v.type for k, v in _concrete_typeinfo.items())
-
-_kind_to_stem = {
- 'u': 'uint',
- 'i': 'int',
- 'c': 'complex',
- 'f': 'float',
- 'b': 'bool',
- 'V': 'void',
- 'O': 'object',
- 'M': 'datetime',
- 'm': 'timedelta'
-}
-if sys.version_info[0] >= 3:
- _kind_to_stem.update({
- 'S': 'bytes',
- 'U': 'str'
- })
-else:
- _kind_to_stem.update({
- 'S': 'string',
- 'U': 'unicode'
- })
+_concrete_types = {v.type for k, v in _concrete_typeinfo.items()}
def _bits_of(obj):
@@ -100,8 +79,9 @@ def _bits_of(obj):
def bitname(obj):
"""Return a bit-width name for a given type object"""
bits = _bits_of(obj)
- char = dtype(obj).kind
- base = _kind_to_stem[char]
+ dt = dtype(obj)
+ char = dt.kind
+ base = _kind_name(dt)
if base == 'object':
bits = 0
diff --git a/numpy/core/_ufunc_config.py b/numpy/core/_ufunc_config.py
new file mode 100644
index 000000000..c3951cc09
--- /dev/null
+++ b/numpy/core/_ufunc_config.py
@@ -0,0 +1,458 @@
+"""
+Functions for changing global ufunc configuration
+
+This provides helpers which wrap `umath.geterrobj` and `umath.seterrobj`
+"""
+from __future__ import division, absolute_import, print_function
+
+try:
+ # Accessing collections abstract classes from collections
+ # has been deprecated since Python 3.3
+ import collections.abc as collections_abc
+except ImportError:
+ import collections as collections_abc
+import contextlib
+
+from .overrides import set_module
+from .umath import (
+ UFUNC_BUFSIZE_DEFAULT,
+ ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT, ERR_LOG, ERR_DEFAULT,
+ SHIFT_DIVIDEBYZERO, SHIFT_OVERFLOW, SHIFT_UNDERFLOW, SHIFT_INVALID,
+)
+from . import umath
+
+__all__ = [
+ "seterr", "geterr", "setbufsize", "getbufsize", "seterrcall", "geterrcall",
+ "errstate",
+]
+
+_errdict = {"ignore": ERR_IGNORE,
+ "warn": ERR_WARN,
+ "raise": ERR_RAISE,
+ "call": ERR_CALL,
+ "print": ERR_PRINT,
+ "log": ERR_LOG}
+
+_errdict_rev = {value: key for key, value in _errdict.items()}
+
+
+@set_module('numpy')
+def seterr(all=None, divide=None, over=None, under=None, invalid=None):
+ """
+ Set how floating-point errors are handled.
+
+ Note that operations on integer scalar types (such as `int16`) are
+ handled like floating point, and are affected by these settings.
+
+ Parameters
+ ----------
+ all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Set treatment for all types of floating-point errors at once:
+
+ - ignore: Take no action when the exception occurs.
+ - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
+ - raise: Raise a `FloatingPointError`.
+ - call: Call a function specified using the `seterrcall` function.
+ - print: Print a warning directly to ``stdout``.
+ - log: Record error in a Log object specified by `seterrcall`.
+
+ The default is not to change the current behavior.
+ divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for division by zero.
+ over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point overflow.
+ under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for floating-point underflow.
+ invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
+ Treatment for invalid floating-point operation.
+
+ Returns
+ -------
+ old_settings : dict
+ Dictionary containing the old settings.
+
+ See also
+ --------
+ seterrcall : Set a callback function for the 'call' mode.
+ geterr, geterrcall, errstate
+
+ Notes
+ -----
+ The floating-point exceptions are defined in the IEEE 754 standard [1]_:
+
+ - Division by zero: infinite result obtained from finite numbers.
+ - Overflow: result too large to be expressed.
+ - Underflow: result so close to zero that some precision
+ was lost.
+ - Invalid operation: result is not an expressible number, typically
+ indicates that a NaN was produced.
+
+ .. [1] https://en.wikipedia.org/wiki/IEEE_754
+
+ Examples
+ --------
+ >>> old_settings = np.seterr(all='ignore') #seterr to known value
+ >>> np.seterr(over='raise')
+ {'divide': 'ignore', 'over': 'ignore', 'under': 'ignore', 'invalid': 'ignore'}
+ >>> np.seterr(**old_settings) # reset to default
+ {'divide': 'ignore', 'over': 'raise', 'under': 'ignore', 'invalid': 'ignore'}
+
+ >>> np.int16(32000) * np.int16(3)
+ 30464
+ >>> old_settings = np.seterr(all='warn', over='raise')
+ >>> np.int16(32000) * np.int16(3)
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ FloatingPointError: overflow encountered in short_scalars
+
+ >>> from collections import OrderedDict
+ >>> old_settings = np.seterr(all='print')
+ >>> OrderedDict(np.geterr())
+ OrderedDict([('divide', 'print'), ('over', 'print'), ('under', 'print'), ('invalid', 'print')])
+ >>> np.int16(32000) * np.int16(3)
+ 30464
+
+ """
+
+ pyvals = umath.geterrobj()
+ old = geterr()
+
+ if divide is None:
+ divide = all or old['divide']
+ if over is None:
+ over = all or old['over']
+ if under is None:
+ under = all or old['under']
+ if invalid is None:
+ invalid = all or old['invalid']
+
+ maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
+ (_errdict[over] << SHIFT_OVERFLOW) +
+ (_errdict[under] << SHIFT_UNDERFLOW) +
+ (_errdict[invalid] << SHIFT_INVALID))
+
+ pyvals[1] = maskvalue
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def geterr():
+ """
+ Get the current way of handling floating-point errors.
+
+ Returns
+ -------
+ res : dict
+ A dictionary with keys "divide", "over", "under", and "invalid",
+ whose values are from the strings "ignore", "print", "log", "warn",
+ "raise", and "call". The keys represent possible floating-point
+ exceptions, and the values define how these exceptions are handled.
+
+ See Also
+ --------
+ geterrcall, seterr, seterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> from collections import OrderedDict
+ >>> sorted(np.geterr().items())
+ [('divide', 'warn'), ('invalid', 'warn'), ('over', 'warn'), ('under', 'ignore')]
+ >>> np.arange(3.) / np.arange(3.)
+ array([nan, 1., 1.])
+
+ >>> oldsettings = np.seterr(all='warn', over='raise')
+ >>> OrderedDict(sorted(np.geterr().items()))
+ OrderedDict([('divide', 'warn'), ('invalid', 'warn'), ('over', 'raise'), ('under', 'warn')])
+ >>> np.arange(3.) / np.arange(3.)
+ array([nan, 1., 1.])
+
+ """
+ maskvalue = umath.geterrobj()[1]
+ mask = 7
+ res = {}
+ val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
+ res['divide'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_OVERFLOW) & mask
+ res['over'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_UNDERFLOW) & mask
+ res['under'] = _errdict_rev[val]
+ val = (maskvalue >> SHIFT_INVALID) & mask
+ res['invalid'] = _errdict_rev[val]
+ return res
+
+
+@set_module('numpy')
+def setbufsize(size):
+ """
+ Set the size of the buffer used in ufuncs.
+
+ Parameters
+ ----------
+ size : int
+ Size of buffer.
+
+ """
+ if size > 10e6:
+ raise ValueError("Buffer size, %s, is too big." % size)
+ if size < 5:
+ raise ValueError("Buffer size, %s, is too small." % size)
+ if size % 16 != 0:
+ raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
+
+ pyvals = umath.geterrobj()
+ old = getbufsize()
+ pyvals[0] = size
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def getbufsize():
+ """
+ Return the size of the buffer used in ufuncs.
+
+ Returns
+ -------
+ getbufsize : int
+ Size of ufunc buffer in bytes.
+
+ """
+ return umath.geterrobj()[0]
+
+
+@set_module('numpy')
+def seterrcall(func):
+ """
+ Set the floating-point error callback function or log object.
+
+ There are two ways to capture floating-point error messages. The first
+ is to set the error-handler to 'call', using `seterr`. Then, set
+ the function to call using this function.
+
+ The second is to set the error-handler to 'log', using `seterr`.
+ Floating-point errors then trigger a call to the 'write' method of
+ the provided object.
+
+ Parameters
+ ----------
+ func : callable f(err, flag) or object with write method
+ Function to call upon floating-point errors ('call'-mode) or
+ object whose 'write' method is used to log such message ('log'-mode).
+
+ The call function takes two arguments. The first is a string describing
+ the type of error (such as "divide by zero", "overflow", "underflow",
+ or "invalid value"), and the second is the status flag. The flag is a
+ byte, whose four least-significant bits indicate the type of error, one
+ of "divide", "over", "under", "invalid"::
+
+ [0 0 0 0 divide over under invalid]
+
+ In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
+
+ If an object is provided, its write method should take one argument,
+ a string.
+
+ Returns
+ -------
+ h : callable, log instance or None
+ The old error handler.
+
+ See Also
+ --------
+ seterr, geterr, geterrcall
+
+ Examples
+ --------
+ Callback upon error:
+
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ ...
+
+ >>> saved_handler = np.seterrcall(err_handler)
+ >>> save_err = np.seterr(all='call')
+ >>> from collections import OrderedDict
+
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([inf, inf, inf])
+
+ >>> np.seterrcall(saved_handler)
+ <function err_handler at 0x...>
+ >>> OrderedDict(sorted(np.seterr(**save_err).items()))
+ OrderedDict([('divide', 'call'), ('invalid', 'call'), ('over', 'call'), ('under', 'call')])
+
+ Log error message:
+
+ >>> class Log(object):
+ ... def write(self, msg):
+ ... print("LOG: %s" % msg)
+ ...
+
+ >>> log = Log()
+ >>> saved_handler = np.seterrcall(log)
+ >>> save_err = np.seterr(all='log')
+
+ >>> np.array([1, 2, 3]) / 0.0
+ LOG: Warning: divide by zero encountered in true_divide
+ array([inf, inf, inf])
+
+ >>> np.seterrcall(saved_handler)
+ <numpy.core.numeric.Log object at 0x...>
+ >>> OrderedDict(sorted(np.seterr(**save_err).items()))
+ OrderedDict([('divide', 'log'), ('invalid', 'log'), ('over', 'log'), ('under', 'log')])
+
+ """
+ if func is not None and not isinstance(func, collections_abc.Callable):
+ if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable):
+ raise ValueError("Only callable can be used as callback")
+ pyvals = umath.geterrobj()
+ old = geterrcall()
+ pyvals[2] = func
+ umath.seterrobj(pyvals)
+ return old
+
+
+@set_module('numpy')
+def geterrcall():
+ """
+ Return the current callback function used on floating-point errors.
+
+ When the error handling for a floating-point error (one of "divide",
+ "over", "under", or "invalid") is set to 'call' or 'log', the function
+ that is called or the log instance that is written to is returned by
+ `geterrcall`. This function or log instance has been set with
+ `seterrcall`.
+
+ Returns
+ -------
+ errobj : callable, log instance or None
+ The current error handler. If no handler was set through `seterrcall`,
+ ``None`` is returned.
+
+ See Also
+ --------
+ seterrcall, seterr, geterr
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> np.geterrcall() # we did not yet set a handler, returns None
+
+ >>> oldsettings = np.seterr(all='call')
+ >>> def err_handler(type, flag):
+ ... print("Floating point error (%s), with flag %s" % (type, flag))
+ >>> oldhandler = np.seterrcall(err_handler)
+ >>> np.array([1, 2, 3]) / 0.0
+ Floating point error (divide by zero), with flag 1
+ array([inf, inf, inf])
+
+ >>> cur_handler = np.geterrcall()
+ >>> cur_handler is err_handler
+ True
+
+ """
+ return umath.geterrobj()[2]
+
+
+class _unspecified(object):
+ pass
+
+
+_Unspecified = _unspecified()
+
+
+@set_module('numpy')
+class errstate(contextlib.ContextDecorator):
+ """
+ errstate(**kwargs)
+
+ Context manager for floating-point error handling.
+
+ Using an instance of `errstate` as a context manager allows statements in
+ that context to execute with a known error handling behavior. Upon entering
+ the context the error handling is set with `seterr` and `seterrcall`, and
+ upon exiting it is reset to what it was before.
+
+ .. versionchanged:: 1.17.0
+ `errstate` is also usable as a function decorator, saving
+ a level of indentation if an entire function is wrapped.
+ See :py:class:`contextlib.ContextDecorator` for more information.
+
+ Parameters
+ ----------
+ kwargs : {divide, over, under, invalid}
+ Keyword arguments. The valid keywords are the possible floating-point
+ exceptions. Each keyword should have a string value that defines the
+ treatment for the particular error. Possible values are
+ {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
+
+ See Also
+ --------
+ seterr, geterr, seterrcall, geterrcall
+
+ Notes
+ -----
+ For complete documentation of the types of floating-point exceptions and
+ treatment options, see `seterr`.
+
+ Examples
+ --------
+ >>> from collections import OrderedDict
+ >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
+
+ >>> np.arange(3) / 0.
+ array([nan, inf, inf])
+ >>> with np.errstate(divide='warn'):
+ ... np.arange(3) / 0.
+ array([nan, inf, inf])
+
+ >>> np.sqrt(-1)
+ nan
+ >>> with np.errstate(invalid='raise'):
+ ... np.sqrt(-1)
+ Traceback (most recent call last):
+ File "<stdin>", line 2, in <module>
+ FloatingPointError: invalid value encountered in sqrt
+
+ Outside the context the error handling behavior has not changed:
+
+ >>> OrderedDict(sorted(np.geterr().items()))
+ OrderedDict([('divide', 'ignore'), ('invalid', 'ignore'), ('over', 'ignore'), ('under', 'ignore')])
+
+ """
+ # Note that we don't want to run the above doctests because they will fail
+ # without a from __future__ import with_statement
+
+ def __init__(self, **kwargs):
+ self.call = kwargs.pop('call', _Unspecified)
+ self.kwargs = kwargs
+
+ def __enter__(self):
+ self.oldstate = seterr(**self.kwargs)
+ if self.call is not _Unspecified:
+ self.oldcall = seterrcall(self.call)
+
+ def __exit__(self, *exc_info):
+ seterr(**self.oldstate)
+ if self.call is not _Unspecified:
+ seterrcall(self.oldcall)
+
+
+def _setdef():
+ defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
+ umath.seterrobj(defval)
+
+
+# set the default values
+_setdef()
diff --git a/numpy/core/arrayprint.py b/numpy/core/arrayprint.py
index 960e64ca3..8a7626d9d 100644
--- a/numpy/core/arrayprint.py
+++ b/numpy/core/arrayprint.py
@@ -26,6 +26,7 @@ __docformat__ = 'restructuredtext'
import sys
import functools
+import numbers
if sys.version_info[0] >= 3:
try:
from _thread import get_ident
@@ -48,6 +49,7 @@ from .fromnumeric import ravel, any
from .numeric import concatenate, asarray, errstate
from .numerictypes import (longlong, intc, int_, float_, complex_, bool_,
flexible)
+from .overrides import array_function_dispatch, set_module
import warnings
import contextlib
@@ -85,9 +87,17 @@ def _make_options_dict(precision=None, threshold=None, edgeitems=None,
if legacy not in [None, False, '1.13']:
warnings.warn("legacy printing option can currently only be '1.13' or "
"`False`", stacklevel=3)
-
+ if threshold is not None:
+ # forbid the bad threshold arg suggested by stack overflow, gh-12351
+ if not isinstance(threshold, numbers.Number):
+ raise TypeError("threshold must be numeric")
+ if np.isnan(threshold):
+ raise ValueError("threshold must be non-NAN, try "
+ "sys.maxsize for untruncated representation")
return options
+
+@set_module('numpy')
def set_printoptions(precision=None, threshold=None, edgeitems=None,
linewidth=None, suppress=None, nanstr=None, infstr=None,
formatter=None, sign=None, floatmode=None, **kwarg):
@@ -106,6 +116,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr (default 1000).
+ To always use the full repr without summarization, pass `sys.maxsize`.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension (default 3).
@@ -155,7 +166,8 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
- 'str_kind' : sets 'str' and 'numpystr'
floatmode : str, optional
Controls the interpretation of the `precision` option for
- floating-point types. Can take the following values:
+ floating-point types. Can take the following values
+ (default maxprec_equal):
* 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
@@ -182,32 +194,34 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
See Also
--------
- get_printoptions, set_string_function, array2string
+ get_printoptions, printoptions, set_string_function, array2string
Notes
-----
`formatter` is always reset with a call to `set_printoptions`.
+ Use `printoptions` as a context manager to set the values temporarily.
+
Examples
--------
Floating point precision can be set:
>>> np.set_printoptions(precision=4)
- >>> print(np.array([1.123456789]))
- [ 1.1235]
+ >>> np.array([1.123456789])
+ [1.1235]
Long arrays can be summarised:
>>> np.set_printoptions(threshold=5)
- >>> print(np.arange(10))
- [0 1 2 ..., 7 8 9]
+ >>> np.arange(10)
+ array([0, 1, 2, ..., 7, 8, 9])
Small results can be suppressed:
>>> eps = np.finfo(float).eps
>>> x = np.arange(4.)
>>> x**2 - (x + eps)**2
- array([ -4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
+ array([-4.9304e-32, -4.4409e-16, 0.0000e+00, 0.0000e+00])
>>> np.set_printoptions(suppress=True)
>>> x**2 - (x + eps)**2
array([-0., -0., 0., 0.])
@@ -224,9 +238,16 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
To put back the default options, you can use:
- >>> np.set_printoptions(edgeitems=3,infstr='inf',
+ >>> np.set_printoptions(edgeitems=3, infstr='inf',
... linewidth=75, nanstr='nan', precision=8,
... suppress=False, threshold=1000, formatter=None)
+
+ Also to temporarily override options, use `printoptions` as a context manager:
+
+ >>> with np.printoptions(precision=2, suppress=True, threshold=5):
+ ... np.linspace(0, 10, 10)
+ array([ 0. , 1.11, 2.22, ..., 7.78, 8.89, 10. ])
+
"""
legacy = kwarg.pop('legacy', None)
if kwarg:
@@ -249,6 +270,7 @@ def set_printoptions(precision=None, threshold=None, edgeitems=None,
set_legacy_print_mode(0)
+@set_module('numpy')
def get_printoptions():
"""
Return the current print options.
@@ -272,12 +294,13 @@ def get_printoptions():
See Also
--------
- set_printoptions, set_string_function
+ set_printoptions, printoptions, set_string_function
"""
return _format_options.copy()
+@set_module('numpy')
@contextlib.contextmanager
def printoptions(*args, **kwargs):
"""Context manager for setting print options.
@@ -289,9 +312,10 @@ def printoptions(*args, **kwargs):
Examples
--------
+ >>> from numpy.testing import assert_equal
>>> with np.printoptions(precision=2):
- ... print(np.array([2.0])) / 3
- [0.67]
+ ... np.array([2.0]) / 3
+ array([0.67])
The `as`-clause of the `with`-statement gives the current print options:
@@ -496,6 +520,16 @@ def _array2string(a, options, separator=' ', prefix=""):
return lst
+def _array2string_dispatcher(
+ a, max_line_width=None, precision=None,
+ suppress_small=None, separator=None, prefix=None,
+ style=None, formatter=None, threshold=None,
+ edgeitems=None, sign=None, floatmode=None, suffix=None,
+ **kwarg):
+ return (a,)
+
+
+@array_function_dispatch(_array2string_dispatcher, module='numpy')
def array2string(a, max_line_width=None, precision=None,
suppress_small=None, separator=' ', prefix="",
style=np._NoValue, formatter=None, threshold=None,
@@ -509,14 +543,17 @@ def array2string(a, max_line_width=None, precision=None,
a : array_like
Input array.
max_line_width : int, optional
- The maximum number of columns the string should span. Newline
- characters splits the string appropriately after array elements.
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int or None, optional
- Floating point precision. Default is the current printing
- precision (usually 8), which can be altered using `set_printoptions`.
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
- Represent very small numbers as zero. A number is "very small" if it
- is smaller than the current printing precision.
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
separator : str, optional
Inserted between elements.
prefix : str, optional
@@ -563,17 +600,22 @@ def array2string(a, max_line_width=None, precision=None,
threshold : int, optional
Total number of array elements which trigger summarization
rather than full repr.
+ Defaults to ``numpy.get_printoptions()['threshold']``.
edgeitems : int, optional
Number of array items in summary at beginning and end of
each dimension.
+ Defaults to ``numpy.get_printoptions()['edgeitems']``.
sign : string, either '-', '+', or ' ', optional
Controls printing of the sign of floating-point types. If '+', always
print the sign of positive values. If ' ', always prints a space
(whitespace character) in the sign position of positive values. If
'-', omit the sign character of positive values.
+ Defaults to ``numpy.get_printoptions()['sign']``.
floatmode : str, optional
Controls the interpretation of the `precision` option for
- floating-point types. Can take the following values:
+ floating-point types.
+ Defaults to ``numpy.get_printoptions()['floatmode']``.
+ Can take the following values:
- 'fixed': Always print exactly `precision` fractional digits,
even if this would print more or fewer digits than
@@ -624,9 +666,9 @@ def array2string(a, max_line_width=None, precision=None,
Examples
--------
>>> x = np.array([1e-16,1,2,3])
- >>> print(np.array2string(x, precision=2, separator=',',
- ... suppress_small=True))
- [ 0., 1., 2., 3.]
+ >>> np.array2string(x, precision=2, separator=',',
+ ... suppress_small=True)
+ '[0.,1.,2.,3.]'
>>> x = np.arange(3.)
>>> np.array2string(x, formatter={'float_kind':lambda x: "%.2f" % x})
@@ -634,7 +676,7 @@ def array2string(a, max_line_width=None, precision=None,
>>> x = np.arange(3)
>>> np.array2string(x, formatter={'int':lambda x: hex(x)})
- '[0x0L 0x1L 0x2L]'
+ '[0x0 0x1 0x2]'
"""
legacy = kwarg.pop('legacy', None)
@@ -652,7 +694,7 @@ def array2string(a, max_line_width=None, precision=None,
if style is np._NoValue:
style = repr
- if a.shape == () and not a.dtype.names:
+ if a.shape == () and a.dtype.names is None:
return style(a.item())
elif style is not np._NoValue:
# Deprecation 11-9-2017 v1.14
@@ -951,20 +993,8 @@ class FloatingFormat(object):
pad_left=self.pad_left,
pad_right=self.pad_right)
-# for back-compatibility, we keep the classes for each float type too
-class FloatFormat(FloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn("FloatFormat has been replaced by FloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(FloatFormat, self).__init__(*args, **kwargs)
-
-
-class LongFloatFormat(FloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn("LongFloatFormat has been replaced by FloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(LongFloatFormat, self).__init__(*args, **kwargs)
+@set_module('numpy')
def format_float_scientific(x, precision=None, unique=True, trim='k',
sign=False, pad_left=None, exp_digits=None):
"""
@@ -1032,6 +1062,8 @@ def format_float_scientific(x, precision=None, unique=True, trim='k',
trim=trim, sign=sign, pad_left=pad_left,
exp_digits=exp_digits)
+
+@set_module('numpy')
def format_float_positional(x, precision=None, unique=True,
fractional=True, trim='k', sign=False,
pad_left=None, pad_right=None):
@@ -1159,21 +1191,6 @@ class ComplexFloatingFormat(object):
return r + i
-# for back-compatibility, we keep the classes for each complex type too
-class ComplexFormat(ComplexFloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn(
- "ComplexFormat has been replaced by ComplexFloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(ComplexFormat, self).__init__(*args, **kwargs)
-
-class LongComplexFormat(ComplexFloatingFormat):
- def __init__(self, *args, **kwargs):
- warnings.warn(
- "LongComplexFormat has been replaced by ComplexFloatingFormat",
- DeprecationWarning, stacklevel=2)
- super(LongComplexFormat, self).__init__(*args, **kwargs)
-
class _TimelikeFormat(object):
def __init__(self, data):
@@ -1284,16 +1301,6 @@ class StructuredVoidFormat(object):
return "({})".format(", ".join(str_fields))
-# for backwards compatibility
-class StructureFormat(StructuredVoidFormat):
- def __init__(self, *args, **kwargs):
- # NumPy 1.14, 2018-02-14
- warnings.warn(
- "StructureFormat has been replaced by StructuredVoidFormat",
- DeprecationWarning, stacklevel=2)
- super(StructureFormat, self).__init__(*args, **kwargs)
-
-
def _void_scalar_repr(x):
"""
Implements the repr for structured-void scalars. It is called from the
@@ -1333,7 +1340,7 @@ def dtype_is_implied(dtype):
>>> np.core.arrayprint.dtype_is_implied(np.int8)
False
>>> np.array([1, 2, 3], np.int8)
- array([1, 2, 3], dtype=np.int8)
+ array([1, 2, 3], dtype=int8)
"""
dtype = np.dtype(dtype)
if _format_options['legacy'] == '1.13' and dtype.type == bool_:
@@ -1353,6 +1360,7 @@ def dtype_short_repr(dtype):
The intent is roughly that the following holds
>>> from numpy import *
+ >>> dt = np.int64([1, 2]).dtype
>>> assert eval(dtype_short_repr(dt)) == dt
"""
if dtype.names is not None:
@@ -1370,48 +1378,10 @@ def dtype_short_repr(dtype):
return typename
-def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
- """
- Return the string representation of an array.
-
- Parameters
- ----------
- arr : ndarray
- Input array.
- max_line_width : int, optional
- The maximum number of columns the string should span. Newline
- characters split the string appropriately after array elements.
- precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
- suppress_small : bool, optional
- Represent very small numbers as zero, default is False. Very small
- is defined by `precision`, if the precision is 8 then
- numbers smaller than 5e-9 are represented as zero.
-
- Returns
- -------
- string : str
- The string representation of an array.
-
- See Also
- --------
- array_str, array2string, set_printoptions
-
- Examples
- --------
- >>> np.array_repr(np.array([1,2]))
- 'array([1, 2])'
- >>> np.array_repr(np.ma.array([0.]))
- 'MaskedArray([ 0.])'
- >>> np.array_repr(np.array([], np.int32))
- 'array([], dtype=int32)'
-
- >>> x = np.array([1e-6, 4e-7, 2, 3])
- >>> np.array_repr(x, precision=6, suppress_small=True)
- 'array([ 0.000001, 0. , 2. , 3. ])'
-
- """
+def _array_repr_implementation(
+ arr, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_repr() that allows overriding array2string."""
if max_line_width is None:
max_line_width = _format_options['linewidth']
@@ -1454,42 +1424,72 @@ def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
return arr_str + spacer + dtype_str
-_guarded_str = _recursive_guard()(str)
-def array_str(a, max_line_width=None, precision=None, suppress_small=None):
- """
- Return a string representation of the data in an array.
+def _array_repr_dispatcher(
+ arr, max_line_width=None, precision=None, suppress_small=None):
+ return (arr,)
- The data in the array is returned as a single string. This function is
- similar to `array_repr`, the difference being that `array_repr` also
- returns information on the kind of array and its data type.
+
+@array_function_dispatch(_array_repr_dispatcher, module='numpy')
+def array_repr(arr, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return the string representation of an array.
Parameters
----------
- a : ndarray
+ arr : ndarray
Input array.
max_line_width : int, optional
- Inserts newlines if text is longer than `max_line_width`. The
- default is, indirectly, 75.
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
precision : int, optional
- Floating point precision. Default is the current printing precision
- (usually 8), which can be altered using `set_printoptions`.
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
suppress_small : bool, optional
Represent numbers "very close" to zero as zero; default is False.
Very close is defined by precision: if the precision is 8, e.g.,
numbers smaller (in absolute value) than 5e-9 are represented as
zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+
+ Returns
+ -------
+ string : str
+ The string representation of an array.
See Also
--------
- array2string, array_repr, set_printoptions
+ array_str, array2string, set_printoptions
Examples
--------
- >>> np.array_str(np.arange(3))
- '[0 1 2]'
+ >>> np.array_repr(np.array([1,2]))
+ 'array([1, 2])'
+ >>> np.array_repr(np.ma.array([0.]))
+ 'MaskedArray([0.])'
+ >>> np.array_repr(np.array([], np.int32))
+ 'array([], dtype=int32)'
+
+ >>> x = np.array([1e-6, 4e-7, 2, 3])
+ >>> np.array_repr(x, precision=6, suppress_small=True)
+ 'array([0.000001, 0. , 2. , 3. ])'
"""
+ return _array_repr_implementation(
+ arr, max_line_width, precision, suppress_small)
+
+
+@_recursive_guard()
+def _guarded_repr_or_str(v):
+ if isinstance(v, bytes):
+ return repr(v)
+ return str(v)
+
+
+def _array_str_implementation(
+ a, max_line_width=None, precision=None, suppress_small=None,
+ array2string=array2string):
+ """Internal version of array_str() that allows overriding array2string."""
if (_format_options['legacy'] == '1.13' and
a.shape == () and not a.dtype.names):
return str(a.item())
@@ -1501,10 +1501,64 @@ def array_str(a, max_line_width=None, precision=None, suppress_small=None):
# obtain a scalar and call str on it, avoiding problems for subclasses
# for which indexing with () returns a 0d instead of a scalar by using
# ndarray's getindex. Also guard against recursive 0d object arrays.
- return _guarded_str(np.ndarray.__getitem__(a, ()))
+ return _guarded_repr_or_str(np.ndarray.__getitem__(a, ()))
return array2string(a, max_line_width, precision, suppress_small, ' ', "")
+
+def _array_str_dispatcher(
+ a, max_line_width=None, precision=None, suppress_small=None):
+ return (a,)
+
+
+@array_function_dispatch(_array_str_dispatcher, module='numpy')
+def array_str(a, max_line_width=None, precision=None, suppress_small=None):
+ """
+ Return a string representation of the data in an array.
+
+ The data in the array is returned as a single string. This function is
+ similar to `array_repr`, the difference being that `array_repr` also
+ returns information on the kind of array and its data type.
+
+ Parameters
+ ----------
+ a : ndarray
+ Input array.
+ max_line_width : int, optional
+ Inserts newlines if text is longer than `max_line_width`.
+ Defaults to ``numpy.get_printoptions()['linewidth']``.
+ precision : int, optional
+ Floating point precision.
+ Defaults to ``numpy.get_printoptions()['precision']``.
+ suppress_small : bool, optional
+ Represent numbers "very close" to zero as zero; default is False.
+ Very close is defined by precision: if the precision is 8, e.g.,
+ numbers smaller (in absolute value) than 5e-9 are represented as
+ zero.
+ Defaults to ``numpy.get_printoptions()['suppress']``.
+
+ See Also
+ --------
+ array2string, array_repr, set_printoptions
+
+ Examples
+ --------
+ >>> np.array_str(np.arange(3))
+ '[0 1 2]'
+
+ """
+ return _array_str_implementation(
+ a, max_line_width, precision, suppress_small)
+
+
+# needed if __array_function__ is disabled
+_array2string_impl = getattr(array2string, '__wrapped__', array2string)
+_default_array_str = functools.partial(_array_str_implementation,
+ array2string=_array2string_impl)
+_default_array_repr = functools.partial(_array_repr_implementation,
+ array2string=_array2string_impl)
+
+
def set_string_function(f, repr=True):
"""
Set a Python function to be used when pretty printing arrays.
@@ -1534,8 +1588,8 @@ def set_string_function(f, repr=True):
>>> a = np.arange(10)
>>> a
HA! - What are you going to do now?
- >>> print(a)
- [0 1 2 3 4 5 6 7 8 9]
+ >>> _ = a
+ >>> # [0 1 2 3 4 5 6 7 8 9]
We can reset the function to the default:
@@ -1553,16 +1607,16 @@ def set_string_function(f, repr=True):
>>> x.__str__()
'random'
>>> x.__repr__()
- 'array([ 0, 1, 2, 3])'
+ 'array([0, 1, 2, 3])'
"""
if f is None:
if repr:
- return multiarray.set_string_function(array_repr, 1)
+ return multiarray.set_string_function(_default_array_repr, 1)
else:
- return multiarray.set_string_function(array_str, 0)
+ return multiarray.set_string_function(_default_array_str, 0)
else:
return multiarray.set_string_function(f, repr)
-set_string_function(array_str, 0)
-set_string_function(array_repr, 1)
+set_string_function(_default_array_str, False)
+set_string_function(_default_array_repr, True)
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 43c32eac6..00f10df57 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -39,7 +39,12 @@
0x0000000b = edb1ba83730c650fd9bc5772a919cda7
# Version 12 (NumPy 1.14) Added PyArray_ResolveWritebackIfCopy,
-# Version 12 (NumPy 1.15) No change.
# PyArray_SetWritebackIfCopyBase and deprecated PyArray_SetUpdateIfCopyBase.
+# Version 12 (NumPy 1.15) No change.
0x0000000c = a1bc756c5782853ec2e3616cf66869d8
+# Version 13 (NumPy 1.16)
+# Deprecate PyArray_SetNumericOps and PyArray_GetNumericOps,
+# Add fields core_dim_flags and core_dim_sizes to PyUFuncObject.
+# Add PyUFunc_FromFuncAndDataAndSignatureAndIdentity to ufunc_funcs_api.
+0x0000000d = 5b0e8bbded00b166125974fc71e80a33
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 42c564a97..7336e5e13 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -19,6 +19,7 @@ __docformat__ = 'restructuredtext'
# The files under src/ that are scanned for API functions
API_FILES = [join('multiarray', 'alloc.c'),
+ join('multiarray', 'arrayfunction_override.c'),
join('multiarray', 'array_assign_array.c'),
join('multiarray', 'array_assign_scalar.c'),
join('multiarray', 'arrayobject.c'),
@@ -163,9 +164,7 @@ def skip_brackets(s, lbrac, rbrac):
def split_arguments(argstr):
arguments = []
- bracket_counts = {'(': 0, '[': 0}
current_argument = []
- state = 0
i = 0
def finish_arg():
if current_argument:
@@ -260,7 +259,8 @@ def find_functions(filename, tag='API'):
elif state == STATE_ARGS:
if line.startswith('{'):
# finished
- fargs_str = ' '.join(function_args).rstrip(' )')
+ # remove any white space and the closing bracket:
+ fargs_str = ' '.join(function_args).rstrip()[:-1].rstrip()
fargs = split_arguments(fargs_str)
f = Function(function_name, return_type, fargs,
'\n'.join(doclist))
@@ -400,9 +400,7 @@ class FunctionApi(object):
return " (void *) %s" % self.name
def internal_define(self):
- annstr = []
- for a in self.annotations:
- annstr.append(str(a))
+ annstr = [str(a) for a in self.annotations]
annstr = ' '.join(annstr)
astr = """\
NPY_NO_EXPORT %s %s %s \\\n (%s);""" % (annstr, self.return_type,
@@ -463,10 +461,7 @@ def get_api_functions(tagname, api_dict):
functions = []
for f in API_FILES:
functions.extend(find_functions(f, tagname))
- dfunctions = []
- for func in functions:
- o = api_dict[func.name][0]
- dfunctions.append( (o, func) )
+ dfunctions = [(api_dict[func.name][0], func) for func in functions]
dfunctions.sort()
return [a[1] for a in dfunctions]
@@ -489,14 +484,11 @@ def get_versions_hash():
d = []
file = os.path.join(os.path.dirname(__file__), 'cversions.txt')
- fid = open(file, 'r')
- try:
+ with open(file, 'r') as fid:
for line in fid:
m = VERRE.match(line)
if m:
d.append((int(m.group(1), 16), m.group(2)))
- finally:
- fid.close()
return dict(d)
diff --git a/numpy/core/code_generators/generate_numpy_api.py b/numpy/core/code_generators/generate_numpy_api.py
index 7f2541667..5e04fb86d 100644
--- a/numpy/core/code_generators/generate_numpy_api.py
+++ b/numpy/core/code_generators/generate_numpy_api.py
@@ -50,7 +50,6 @@ _import_array(void)
PyObject *c_api = NULL;
if (numpy == NULL) {
- PyErr_SetString(PyExc_ImportError, "numpy.core._multiarray_umath failed to import");
return -1;
}
c_api = PyObject_GetAttrString(numpy, "_ARRAY_API");
@@ -193,7 +192,9 @@ def do_generate_api(targets, sources):
genapi.check_api_dict(multiarray_api_index)
numpyapi_list = genapi.get_api_functions('NUMPY_API',
- multiarray_funcs)
+ multiarray_funcs)
+
+ # FIXME: ordered_funcs_api is unused
ordered_funcs_api = genapi.order_dict(multiarray_funcs)
# Create dict name -> *Api instance
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index 6dc01877b..0d3bbffe9 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -10,11 +10,14 @@ sys.path.insert(0, os.path.dirname(__file__))
import ufunc_docstrings as docstrings
sys.path.pop(0)
-Zero = "PyUFunc_Zero"
-One = "PyUFunc_One"
-None_ = "PyUFunc_None"
-AllOnes = "PyUFunc_MinusOne"
-ReorderableNone = "PyUFunc_ReorderableNone"
+Zero = "PyInt_FromLong(0)"
+One = "PyInt_FromLong(1)"
+True_ = "(Py_INCREF(Py_True), Py_True)"
+False_ = "(Py_INCREF(Py_False), Py_False)"
+None_ = object()
+AllOnes = "PyInt_FromLong(-1)"
+MinusInfinity = 'PyFloat_FromDouble(-NPY_INFINITY)'
+ReorderableNone = "(Py_INCREF(Py_None), Py_None)"
# Sentinel value to specify using the full type description in the
# function name
@@ -71,13 +74,18 @@ class TypeDescription(object):
assert len(self.out) == nout
self.astype = self.astype_dict.get(self.type, None)
-_fdata_map = dict(e='npy_%sf', f='npy_%sf', d='npy_%s', g='npy_%sl',
- F='nc_%sf', D='nc_%s', G='nc_%sl')
+_fdata_map = dict(
+ e='npy_%sf',
+ f='npy_%sf',
+ d='npy_%s',
+ g='npy_%sl',
+ F='nc_%sf',
+ D='nc_%s',
+ G='nc_%sl'
+)
+
def build_func_data(types, f):
- func_data = []
- for t in types:
- d = _fdata_map.get(t, '%s') % (f,)
- func_data.append(d)
+ func_data = [_fdata_map.get(t, '%s') % (f,) for t in types]
return func_data
def TD(types, f=None, astype=None, in_=None, out=None, simd=None):
@@ -124,7 +132,7 @@ class Ufunc(object):
type_descriptions : list of TypeDescription objects
"""
def __init__(self, nin, nout, identity, docstring, typereso,
- *type_descriptions):
+ *type_descriptions, **kwargs):
self.nin = nin
self.nout = nout
if identity is None:
@@ -133,10 +141,13 @@ class Ufunc(object):
self.docstring = docstring
self.typereso = typereso
self.type_descriptions = []
+ self.signature = kwargs.pop('signature', None)
for td in type_descriptions:
self.type_descriptions.extend(td)
for td in self.type_descriptions:
td.finish_signature(self.nin, self.nout)
+ if kwargs:
+ raise ValueError('unknown kwargs %r' % str(kwargs))
# String-handling utilities to avoid locale-dependence.
@@ -188,31 +199,32 @@ def english_upper(s):
# output specification (optional)
# ]
-chartoname = {'?': 'bool',
- 'b': 'byte',
- 'B': 'ubyte',
- 'h': 'short',
- 'H': 'ushort',
- 'i': 'int',
- 'I': 'uint',
- 'l': 'long',
- 'L': 'ulong',
- 'q': 'longlong',
- 'Q': 'ulonglong',
- 'e': 'half',
- 'f': 'float',
- 'd': 'double',
- 'g': 'longdouble',
- 'F': 'cfloat',
- 'D': 'cdouble',
- 'G': 'clongdouble',
- 'M': 'datetime',
- 'm': 'timedelta',
- 'O': 'OBJECT',
- # '.' is like 'O', but calls a method of the object instead
- # of a function
- 'P': 'OBJECT',
- }
+chartoname = {
+ '?': 'bool',
+ 'b': 'byte',
+ 'B': 'ubyte',
+ 'h': 'short',
+ 'H': 'ushort',
+ 'i': 'int',
+ 'I': 'uint',
+ 'l': 'long',
+ 'L': 'ulong',
+ 'q': 'longlong',
+ 'Q': 'ulonglong',
+ 'e': 'half',
+ 'f': 'float',
+ 'd': 'double',
+ 'g': 'longdouble',
+ 'F': 'cfloat',
+ 'D': 'cdouble',
+ 'G': 'clongdouble',
+ 'M': 'datetime',
+ 'm': 'timedelta',
+ 'O': 'OBJECT',
+ # '.' is like 'O', but calls a method of the object instead
+ # of a function
+ 'P': 'OBJECT',
+}
all = '?bBhHiIlLqQefdgFDGOMm'
O = 'O'
@@ -312,7 +324,7 @@ defdict = {
TD(intfltcmplx),
[TypeDescription('m', FullTypeDescr, 'mq', 'm'),
TypeDescription('m', FullTypeDescr, 'md', 'm'),
- #TypeDescription('m', FullTypeDescr, 'mm', 'd'),
+ TypeDescription('m', FullTypeDescr, 'mm', 'q'),
],
TD(O, f='PyNumber_FloorDivide'),
),
@@ -346,14 +358,14 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.square'),
None,
- TD(ints+inexact, simd=[('avx2', ints)]),
+ TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f', 'fd')]),
TD(O, f='Py_square'),
),
'reciprocal':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.reciprocal'),
None,
- TD(ints+inexact, simd=[('avx2', ints)]),
+ TD(ints+inexact, simd=[('avx2', ints), ('fma', 'fd'), ('avx512f','fd')]),
TD(O, f='Py_reciprocal'),
),
# This is no longer used as numpy.ones_like, however it is
@@ -383,7 +395,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.absolute'),
'PyUFunc_AbsoluteTypeResolver',
- TD(bints+flts+timedeltaonly),
+ TD(bints+flts+timedeltaonly, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
TD(cmplx, out=('f', 'd', 'g')),
TD(O, f='PyNumber_Absolute'),
),
@@ -404,7 +416,7 @@ defdict = {
'positive':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.positive'),
- 'PyUFunc_SimpleUnaryOperationTypeResolver',
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
TD(ints+flts+timedeltaonly),
TD(cmplx, f='pos'),
TD(O, f='PyNumber_Positive'),
@@ -412,7 +424,7 @@ defdict = {
'sign':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sign'),
- 'PyUFunc_SimpleUnaryOperationTypeResolver',
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
TD(nobool_or_datetime),
),
'greater':
@@ -458,7 +470,7 @@ defdict = {
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
),
'logical_and':
- Ufunc(2, 1, One,
+ Ufunc(2, 1, True_,
docstrings.get('numpy.core.umath.logical_and'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
@@ -472,14 +484,14 @@ defdict = {
TD(O, f='npy_ObjectLogicalNot'),
),
'logical_or':
- Ufunc(2, 1, Zero,
+ Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_or'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
),
'logical_xor':
- Ufunc(2, 1, Zero,
+ Ufunc(2, 1, False_,
docstrings.get('numpy.core.umath.logical_xor'),
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?'),
@@ -488,33 +500,40 @@ defdict = {
'maximum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.maximum'),
- 'PyUFunc_SimpleBinaryOperationTypeResolver',
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'minimum':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.minimum'),
- 'PyUFunc_SimpleBinaryOperationTypeResolver',
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
+'clip':
+ Ufunc(3, 1, ReorderableNone,
+ docstrings.get('numpy.core.umath.clip'),
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
+ TD(noobj),
+ [TypeDescription('O', 'npy_ObjectClip', 'OOO', 'O')]
+ ),
'fmax':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmax'),
- 'PyUFunc_SimpleBinaryOperationTypeResolver',
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMax')
),
'fmin':
Ufunc(2, 1, ReorderableNone,
docstrings.get('numpy.core.umath.fmin'),
- 'PyUFunc_SimpleBinaryOperationTypeResolver',
+ 'PyUFunc_SimpleUniformOperationTypeResolver',
TD(noobj),
TD(O, f='npy_ObjectMin')
),
'logaddexp':
- Ufunc(2, 1, None,
+ Ufunc(2, 1, MinusInfinity,
docstrings.get('numpy.core.umath.logaddexp'),
None,
TD(flts, f="logaddexp", astype={'e':'f'})
@@ -643,14 +662,18 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.cos'),
None,
- TD(inexact, f='cos', astype={'e':'f'}),
+ TD('e', f='cos', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='cos'),
TD(P, f='cos'),
),
'sin':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.sin'),
None,
- TD(inexact, f='sin', astype={'e':'f'}),
+ TD('e', f='sin', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='sin'),
TD(P, f='sin'),
),
'tan':
@@ -685,7 +708,9 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.exp'),
None,
- TD(inexact, f='exp', astype={'e':'f'}),
+ TD('e', f='exp', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='exp'),
TD(P, f='exp'),
),
'exp2':
@@ -706,7 +731,9 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.log'),
None,
- TD(inexact, f='log', astype={'e':'f'}),
+ TD('e', f='log', astype={'e':'f'}),
+ TD('f', simd=[('fma', 'f'), ('avx512f', 'f')]),
+ TD('fdg' + cmplx, f='log'),
TD(P, f='log'),
),
'log2':
@@ -735,8 +762,8 @@ defdict = {
docstrings.get('numpy.core.umath.sqrt'),
None,
TD('e', f='sqrt', astype={'e':'f'}),
- TD(inexactvec),
- TD(inexact, f='sqrt', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg' + cmplx, f='sqrt'),
TD(P, f='sqrt'),
),
'cbrt':
@@ -750,15 +777,19 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.ceil'),
None,
- TD(flts, f='ceil', astype={'e':'f'}),
- TD(P, f='ceil'),
+ TD('e', f='ceil', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg', f='ceil'),
+ TD(O, f='npy_ObjectCeil'),
),
'trunc':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.trunc'),
None,
- TD(flts, f='trunc', astype={'e':'f'}),
- TD(P, f='trunc'),
+ TD('e', f='trunc', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg', f='trunc'),
+ TD(O, f='npy_ObjectTrunc'),
),
'fabs':
Ufunc(1, 1, None,
@@ -771,14 +802,18 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.floor'),
None,
- TD(flts, f='floor', astype={'e':'f'}),
- TD(P, f='floor'),
+ TD('e', f='floor', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg', f='floor'),
+ TD(O, f='npy_ObjectFloor'),
),
'rint':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.rint'),
None,
- TD(inexact, f='rint', astype={'e':'f'}),
+ TD('e', f='rint', astype={'e':'f'}),
+ TD(inexactvec, simd=[('fma', 'fd'), ('avx512f', 'fd')]),
+ TD('fdg' + cmplx, f='rint'),
TD(P, f='rint'),
),
'arctan2':
@@ -791,15 +826,17 @@ defdict = {
'remainder':
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.remainder'),
- None,
+ 'PyUFunc_RemainderTypeResolver',
TD(intflt),
+ [TypeDescription('m', FullTypeDescr, 'mm', 'm')],
TD(O, f='PyNumber_Remainder'),
),
'divmod':
Ufunc(2, 2, None,
docstrings.get('numpy.core.umath.divmod'),
- None,
+ 'PyUFunc_DivmodTypeResolver',
TD(intflt),
+ [TypeDescription('m', FullTypeDescr, 'mm', 'qm')],
# TD(O, f='PyNumber_Divmod'), # gh-9730
),
'hypot':
@@ -813,7 +850,7 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
None,
- TD(inexact, out='?'),
+ TD(nodatetime_or_obj, out='?'),
),
'isnat':
Ufunc(1, 1, None,
@@ -825,13 +862,13 @@ defdict = {
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
None,
- TD(inexact, out='?'),
+ TD(nodatetime_or_obj, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isfinite'),
- None,
- TD(inexact, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'signbit':
Ufunc(1, 1, None,
@@ -890,17 +927,25 @@ defdict = {
'gcd' :
Ufunc(2, 1, Zero,
docstrings.get('numpy.core.umath.gcd'),
- "PyUFunc_SimpleBinaryOperationTypeResolver",
+ "PyUFunc_SimpleUniformOperationTypeResolver",
TD(ints),
TD('O', f='npy_ObjectGCD'),
),
'lcm' :
Ufunc(2, 1, None,
docstrings.get('numpy.core.umath.lcm'),
- "PyUFunc_SimpleBinaryOperationTypeResolver",
+ "PyUFunc_SimpleUniformOperationTypeResolver",
TD(ints),
TD('O', f='npy_ObjectLCM'),
- )
+ ),
+'matmul' :
+ Ufunc(2, 1, None,
+ docstrings.get('numpy.core.umath.matmul'),
+ "PyUFunc_SimpleUniformOperationTypeResolver",
+ TD(notimes_or_obj),
+ TD(O),
+ signature='(n?,k),(k,m?)->(n?,m?)',
+ ),
}
if sys.version_info[0] >= 3:
@@ -914,25 +959,35 @@ def indent(st, spaces):
indented = re.sub(r' +$', r'', indented)
return indented
-chartotype1 = {'e': 'e_e',
- 'f': 'f_f',
- 'd': 'd_d',
- 'g': 'g_g',
- 'F': 'F_F',
- 'D': 'D_D',
- 'G': 'G_G',
- 'O': 'O_O',
- 'P': 'O_O_method'}
+# maps [nin, nout][type] to a suffix
+arity_lookup = {
+ (1, 1): {
+ 'e': 'e_e',
+ 'f': 'f_f',
+ 'd': 'd_d',
+ 'g': 'g_g',
+ 'F': 'F_F',
+ 'D': 'D_D',
+ 'G': 'G_G',
+ 'O': 'O_O',
+ 'P': 'O_O_method',
+ },
+ (2, 1): {
+ 'e': 'ee_e',
+ 'f': 'ff_f',
+ 'd': 'dd_d',
+ 'g': 'gg_g',
+ 'F': 'FF_F',
+ 'D': 'DD_D',
+ 'G': 'GG_G',
+ 'O': 'OO_O',
+ 'P': 'OO_O_method',
+ },
+ (3, 1): {
+ 'O': 'OOO_O',
+ }
+}
-chartotype2 = {'e': 'ee_e',
- 'f': 'ff_f',
- 'd': 'dd_d',
- 'g': 'gg_g',
- 'F': 'FF_F',
- 'D': 'DD_D',
- 'G': 'GG_G',
- 'O': 'OO_O',
- 'P': 'OO_O_method'}
#for each name
# 1) create functions, data, and signature
# 2) fill in functions and data in InitOperators
@@ -982,11 +1037,9 @@ def make_arrays(funcdict):
))
else:
funclist.append('NULL')
- if (uf.nin, uf.nout) == (2, 1):
- thedict = chartotype2
- elif (uf.nin, uf.nout) == (1, 1):
- thedict = chartotype1
- else:
+ try:
+ thedict = arity_lookup[uf.nin, uf.nout]
+ except KeyError:
raise ValueError("Could not handle {}[{}]".format(name, t.type))
astype = ''
@@ -1046,19 +1099,44 @@ def make_ufuncs(funcdict):
# string literal in C code. We split at endlines because textwrap.wrap
# do not play well with \n
docstring = '\\n\"\"'.join(docstring.split(r"\n"))
+ if uf.signature is None:
+ sig = "NULL"
+ else:
+ sig = '"{}"'.format(uf.signature)
fmt = textwrap.dedent("""\
- f = PyUFunc_FromFuncAndData(
+ identity = {identity_expr};
+ if ({has_identity} && identity == NULL) {{
+ return -1;
+ }}
+ f = PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
{name}_functions, {name}_data, {name}_signatures, {nloops},
{nin}, {nout}, {identity}, "{name}",
- "{doc}", 0
+ "{doc}", 0, {sig}, identity
);
+ if ({has_identity}) {{
+ Py_DECREF(identity);
+ }}
if (f == NULL) {{
return -1;
- }}""")
- mlist.append(fmt.format(
+ }}
+ """)
+ args = dict(
name=name, nloops=len(uf.type_descriptions),
- nin=uf.nin, nout=uf.nout, identity=uf.identity, doc=docstring
- ))
+ nin=uf.nin, nout=uf.nout,
+ has_identity='0' if uf.identity is None_ else '1',
+ identity='PyUFunc_IdentityValue',
+ identity_expr=uf.identity,
+ doc=docstring,
+ sig=sig,
+ )
+
+ # Only PyUFunc_None means don't reorder - we pass this using the old
+ # argument
+ if uf.identity is None_:
+ args['identity'] = 'PyUFunc_None'
+ args['identity_expr'] = 'NULL'
+
+ mlist.append(fmt.format(**args))
if uf.typereso is not None:
mlist.append(
r"((PyUFuncObject *)f)->type_resolver = &%s;" % uf.typereso)
@@ -1082,11 +1160,14 @@ def make_code(funcdict, filename):
#include "cpuid.h"
#include "ufunc_object.h"
#include "ufunc_type_resolution.h"
+ #include "loops.h"
+ #include "matmul.h"
+ #include "clip.h"
%s
static int
InitOperators(PyObject *dictionary) {
- PyObject *f;
+ PyObject *f, *identity;
%s
%s
@@ -1099,7 +1180,6 @@ def make_code(funcdict, filename):
if __name__ == "__main__":
filename = __file__
- fid = open('__umath_generated.c', 'w')
code = make_code(defdict, filename)
- fid.write(code)
- fid.close()
+ with open('__umath_generated.c', 'w') as fid:
+ fid.write(code)
diff --git a/numpy/core/code_generators/numpy_api.py b/numpy/core/code_generators/numpy_api.py
index d8a9ee6b4..a71c236fd 100644
--- a/numpy/core/code_generators/numpy_api.py
+++ b/numpy/core/code_generators/numpy_api.py
@@ -402,6 +402,8 @@ ufunc_funcs_api = {
# End 1.7 API
'PyUFunc_RegisterLoopForDescr': (41,),
# End 1.8 API
+ 'PyUFunc_FromFuncAndDataAndSignatureAndIdentity': (42,),
+ # End 1.16 API
}
# List of all the dicts which define the C API
diff --git a/numpy/core/code_generators/ufunc_docstrings.py b/numpy/core/code_generators/ufunc_docstrings.py
index 6e5cb25af..1ac477b54 100644
--- a/numpy/core/code_generators/ufunc_docstrings.py
+++ b/numpy/core/code_generators/ufunc_docstrings.py
@@ -26,12 +26,19 @@ subst = {
a freshly-allocated array is returned. A tuple (possible only as a
keyword argument) must have length equal to the number of outputs.
where : array_like, optional
- Values of True indicate to calculate the ufunc at that position, values
- of False indicate to leave the value in the output alone.
+ This condition is broadcast over the input. At locations where the
+ condition is True, the `out` array will be set to the ufunc result.
+ Elsewhere, the `out` array will retain its original value.
+ Note that if an uninitialized `out` array is created via the default
+ ``out=None``, locations within it where the condition is False will
+ remain uninitialized.
**kwargs
For other keyword-only arguments, see the
:ref:`ufunc docs <ufuncs.kwargs>`.
""").strip(),
+ 'BROADCASTABLE_2': ("If ``x1.shape != x2.shape``, they must be "
+ "broadcastable to a common shape (which becomes the "
+ "shape of the output)."),
'OUT_SCALAR_1': "This is a scalar if `x` is a scalar.",
'OUT_SCALAR_2': "This is a scalar if both `x1` and `x2` are scalars.",
}
@@ -39,13 +46,20 @@ subst = {
def add_newdoc(place, name, doc):
doc = textwrap.dedent(doc).strip()
- if name[0] != '_':
+ skip = (
+ # gufuncs do not use the OUT_SCALAR replacement strings
+ 'matmul',
+ # clip has 3 inputs, which is not handled by this
+ 'clip',
+ )
+ if name[0] != '_' and name not in skip:
if '\nx :' in doc:
assert '$OUT_SCALAR_1' in doc, "in {}".format(name)
elif '\nx2 :' in doc or '\nx1, x2 :' in doc:
assert '$OUT_SCALAR_2' in doc, "in {}".format(name)
else:
assert False, "Could not detect number of inputs in {}".format(name)
+
for k, v in subst.items():
doc = doc.replace('$' + k, v)
@@ -103,9 +117,7 @@ add_newdoc('numpy.core.umath', 'add',
Parameters
----------
x1, x2 : array_like
- The arrays to be added. If ``x1.shape != x2.shape``, they must be
- broadcastable to a common shape (which may be the shape of one or
- the other).
+ The arrays to be added. $BROADCASTABLE_2
$PARAMS
Returns
@@ -431,8 +443,7 @@ add_newdoc('numpy.core.umath', 'arctan2',
x1 : array_like, real-valued
`y`-coordinates.
x2 : array_like, real-valued
- `x`-coordinates. `x2` must be broadcastable to match the shape of
- `x1` or vice versa.
+ `x`-coordinates. $BROADCASTABLE_2
$PARAMS
Returns
@@ -555,7 +566,7 @@ add_newdoc('numpy.core.umath', 'bitwise_and',
Parameters
----------
x1, x2 : array_like
- Only integer and boolean types are handled.
+ Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
@@ -608,7 +619,7 @@ add_newdoc('numpy.core.umath', 'bitwise_or',
Parameters
----------
x1, x2 : array_like
- Only integer and boolean types are handled.
+ Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
@@ -647,8 +658,8 @@ add_newdoc('numpy.core.umath', 'bitwise_or',
array([ 6, 5, 255])
>>> np.array([2, 5, 255]) | np.array([4, 4, 4])
array([ 6, 5, 255])
- >>> np.bitwise_or(np.array([2, 5, 255, 2147483647L], dtype=np.int32),
- ... np.array([4, 4, 4, 2147483647L], dtype=np.int32))
+ >>> np.bitwise_or(np.array([2, 5, 255, 2147483647], dtype=np.int32),
+ ... np.array([4, 4, 4, 2147483647], dtype=np.int32))
array([ 6, 5, 255, 2147483647])
>>> np.bitwise_or([True, True], [False, True])
array([ True, True])
@@ -666,7 +677,7 @@ add_newdoc('numpy.core.umath', 'bitwise_xor',
Parameters
----------
x1, x2 : array_like
- Only integer and boolean types are handled.
+ Only integer and boolean types are handled. $BROADCASTABLE_2
$PARAMS
Returns
@@ -792,6 +803,13 @@ add_newdoc('numpy.core.umath', 'conjugate',
The complex conjugate of `x`, with same dtype as `y`.
$OUT_SCALAR_1
+ Notes
+ -----
+ `conj` is an alias for `conjugate`:
+
+ >>> np.conj is np.conjugate
+ True
+
Examples
--------
>>> np.conjugate(1+2j)
@@ -836,6 +854,7 @@ add_newdoc('numpy.core.umath', 'cos',
array([ 1.00000000e+00, 6.12303177e-17, -1.00000000e+00])
>>>
>>> # Example of providing the optional output parameter
+ >>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
@@ -844,7 +863,7 @@ add_newdoc('numpy.core.umath', 'cos',
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -911,7 +930,7 @@ add_newdoc('numpy.core.umath', 'degrees',
270., 300., 330.])
>>> out = np.zeros((rad.shape))
- >>> r = degrees(rad, out)
+ >>> r = np.degrees(rad, out)
>>> np.all(r == out)
True
@@ -968,7 +987,7 @@ add_newdoc('numpy.core.umath', 'heaviside',
x1 : array_like
Input values.
x2 : array_like
- The value of the function when x1 is 0.
+ The value of the function when x1 is 0. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1003,7 +1022,7 @@ add_newdoc('numpy.core.umath', 'divide',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array.
+ Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1072,7 +1091,7 @@ add_newdoc('numpy.core.umath', 'equal',
Parameters
----------
x1, x2 : array_like
- Input arrays of the same shape.
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1311,7 +1330,7 @@ add_newdoc('numpy.core.umath', 'floor_divide',
"""
Return the largest integer smaller or equal to the division of the inputs.
It is equivalent to the Python ``//`` operator and pairs with the
- Python ``%`` (`remainder`), function so that ``b = a % b + b * (a // b)``
+ Python ``%`` (`remainder`), function so that ``a = a % b + b * (a // b)``
up to roundoff.
Parameters
@@ -1319,7 +1338,7 @@ add_newdoc('numpy.core.umath', 'floor_divide',
x1 : array_like
Numerator.
x2 : array_like
- Denominator.
+ Denominator. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1359,7 +1378,7 @@ add_newdoc('numpy.core.umath', 'fmod',
x1 : array_like
Dividend.
x2 : array_like
- Divisor.
+ Divisor. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1409,9 +1428,7 @@ add_newdoc('numpy.core.umath', 'greater',
Parameters
----------
x1, x2 : array_like
- Input arrays. If ``x1.shape != x2.shape``, they must be
- broadcastable to a common shape (which may be the shape of one or
- the other).
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1447,9 +1464,7 @@ add_newdoc('numpy.core.umath', 'greater_equal',
Parameters
----------
x1, x2 : array_like
- Input arrays. If ``x1.shape != x2.shape``, they must be
- broadcastable to a common shape (which may be the shape of one or
- the other).
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1482,7 +1497,7 @@ add_newdoc('numpy.core.umath', 'hypot',
Parameters
----------
x1, x2 : array_like
- Leg of the triangle(s).
+ Leg of the triangle(s). $BROADCASTABLE_2
$PARAMS
Returns
@@ -1558,33 +1573,31 @@ add_newdoc('numpy.core.umath', 'invert',
We've seen that 13 is represented by ``00001101``.
The invert or bit-wise NOT of 13 is then:
- >>> np.invert(np.array([13], dtype=uint8))
- array([242], dtype=uint8)
+ >>> x = np.invert(np.array(13, dtype=np.uint8))
+ >>> x
+ 242
>>> np.binary_repr(x, width=8)
- '00001101'
- >>> np.binary_repr(242, width=8)
'11110010'
The result depends on the bit-width:
- >>> np.invert(np.array([13], dtype=uint16))
- array([65522], dtype=uint16)
+ >>> x = np.invert(np.array(13, dtype=np.uint16))
+ >>> x
+ 65522
>>> np.binary_repr(x, width=16)
- '0000000000001101'
- >>> np.binary_repr(65522, width=16)
'1111111111110010'
When using signed integer types the result is the two's complement of
the result for the unsigned type:
- >>> np.invert(np.array([13], dtype=int8))
+ >>> np.invert(np.array([13], dtype=np.int8))
array([-14], dtype=int8)
>>> np.binary_repr(-14, width=8)
'11110010'
Booleans are accepted as well:
- >>> np.invert(array([True, False]))
+ >>> np.invert(np.array([True, False]))
array([False, True])
""")
@@ -1783,6 +1796,7 @@ add_newdoc('numpy.core.umath', 'left_shift',
Input values.
x2 : array_like of integer type
Number of zeros to append to `x1`. Has to be non-negative.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -1818,9 +1832,7 @@ add_newdoc('numpy.core.umath', 'less',
Parameters
----------
x1, x2 : array_like
- Input arrays. If ``x1.shape != x2.shape``, they must be
- broadcastable to a common shape (which may be the shape of one or
- the other).
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1848,9 +1860,7 @@ add_newdoc('numpy.core.umath', 'less_equal',
Parameters
----------
x1, x2 : array_like
- Input arrays. If ``x1.shape != x2.shape``, they must be
- broadcastable to a common shape (which may be the shape of one or
- the other).
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
@@ -1968,7 +1978,7 @@ add_newdoc('numpy.core.umath', 'log10',
Examples
--------
>>> np.log10([1e-15, -3.])
- array([-15., NaN])
+ array([-15., nan])
""")
@@ -2034,7 +2044,7 @@ add_newdoc('numpy.core.umath', 'logaddexp',
Parameters
----------
x1, x2 : array_like
- Input values.
+ Input values. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2076,7 +2086,7 @@ add_newdoc('numpy.core.umath', 'logaddexp2',
Parameters
----------
x1, x2 : array_like
- Input values.
+ Input values. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2167,14 +2177,14 @@ add_newdoc('numpy.core.umath', 'logical_and',
Parameters
----------
x1, x2 : array_like
- Input arrays. `x1` and `x2` must be of the same shape.
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
- Boolean result with the same shape as `x1` and `x2` of the logical
- AND operation on corresponding elements of `x1` and `x2`.
+ Boolean result of the logical AND operation applied to the elements
+ of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
@@ -2237,14 +2247,14 @@ add_newdoc('numpy.core.umath', 'logical_or',
----------
x1, x2 : array_like
Logical OR is applied to the elements of `x1` and `x2`.
- They have to be of the same shape.
+ $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray or bool
- Boolean result with the same shape as `x1` and `x2` of the logical
- OR operation on elements of `x1` and `x2`.
+ Boolean result of the logical OR operation applied to the elements
+ of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
@@ -2272,16 +2282,14 @@ add_newdoc('numpy.core.umath', 'logical_xor',
Parameters
----------
x1, x2 : array_like
- Logical XOR is applied to the elements of `x1` and `x2`. They must
- be broadcastable to the same shape.
+ Logical XOR is applied to the elements of `x1` and `x2`. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : bool or ndarray of bool
Boolean result of the logical XOR operation applied to the elements
- of `x1` and `x2`; the shape is determined by whether or not
- broadcasting of one or both arrays was required.
+ of `x1` and `x2`; the shape is determined by broadcasting.
$OUT_SCALAR_2
See Also
@@ -2321,8 +2329,7 @@ add_newdoc('numpy.core.umath', 'maximum',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. They must have
- the same shape, or shapes that can be broadcast to a single shape.
+ The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2360,7 +2367,7 @@ add_newdoc('numpy.core.umath', 'maximum',
[ 0.5, 2. ]])
>>> np.maximum([np.nan, 0, np.nan], [0, np.nan, np.nan])
- array([ NaN, NaN, NaN])
+ array([nan, nan, nan])
>>> np.maximum(np.Inf, 1)
inf
@@ -2380,8 +2387,7 @@ add_newdoc('numpy.core.umath', 'minimum',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. They must have
- the same shape, or shapes that can be broadcast to a single shape.
+ The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2419,7 +2425,7 @@ add_newdoc('numpy.core.umath', 'minimum',
[ 0. , 1. ]])
>>> np.minimum([np.nan, 0, np.nan],[0, np.nan, np.nan])
- array([ NaN, NaN, NaN])
+ array([nan, nan, nan])
>>> np.minimum(-np.Inf, 1)
-inf
@@ -2439,8 +2445,7 @@ add_newdoc('numpy.core.umath', 'fmax',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. They must have
- the same shape.
+ The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2479,7 +2484,7 @@ add_newdoc('numpy.core.umath', 'fmax',
[ 0.5, 2. ]])
>>> np.fmax([np.nan, 0, np.nan],[0, np.nan, np.nan])
- array([ 0., 0., NaN])
+ array([ 0., 0., nan])
""")
@@ -2497,8 +2502,7 @@ add_newdoc('numpy.core.umath', 'fmin',
Parameters
----------
x1, x2 : array_like
- The arrays holding the elements to be compared. They must have
- the same shape.
+ The arrays holding the elements to be compared. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2537,8 +2541,171 @@ add_newdoc('numpy.core.umath', 'fmin',
[ 0. , 1. ]])
>>> np.fmin([np.nan, 0, np.nan],[0, np.nan, np.nan])
- array([ 0., 0., NaN])
+ array([ 0., 0., nan])
+
+ """)
+
+add_newdoc('numpy.core.umath', 'clip',
+ """
+ Clip (limit) the values in an array.
+
+ Given an interval, values outside the interval are clipped to
+ the interval edges. For example, if an interval of ``[0, 1]``
+ is specified, values smaller than 0 become 0, and values larger
+ than 1 become 1.
+
+ Equivalent to but faster than ``np.minimum(np.maximum(a, a_min), a_max)``.
+
+ Parameters
+ ----------
+ a : array_like
+ Array containing elements to clip.
+ a_min : array_like
+ Minimum value.
+ a_max : array_like
+ Maximum value.
+ out : ndarray, optional
+ The results will be placed in this array. It may be the input
+ array for in-place clipping. `out` must be of the right shape
+ to hold the output. Its type is preserved.
+ $PARAMS
+
+ See Also
+ --------
+ numpy.clip :
+ Wrapper that makes the ``a_min`` and ``a_max`` arguments optional,
+ dispatching to one of `~numpy.core.umath.clip`,
+ `~numpy.core.umath.minimum`, and `~numpy.core.umath.maximum`.
+
+ Returns
+ -------
+ clipped_array : ndarray
+ An array with the elements of `a`, but where values
+ < `a_min` are replaced with `a_min`, and those > `a_max`
+ with `a_max`.
+ """)
+
+add_newdoc('numpy.core.umath', 'matmul',
+ """
+ Matrix product of two arrays.
+
+ Parameters
+ ----------
+ x1, x2 : array_like
+ Input arrays, scalars not allowed.
+ out : ndarray, optional
+ A location into which the result is stored. If provided, it must have
+ a shape that matches the signature `(n,k),(k,m)->(n,m)`. If not
+ provided or `None`, a freshly-allocated array is returned.
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs <ufuncs.kwargs>`.
+
+ .. versionadded:: 1.16
+ Now handles ufunc kwargs
+ Returns
+ -------
+ y : ndarray
+ The matrix product of the inputs.
+ This is a scalar only when both x1, x2 are 1-d vectors.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ If a scalar value is passed in.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ dot : alternative matrix product with different broadcasting rules.
+
+ Notes
+ -----
+
+ The behavior depends on the arguments in the following way.
+
+ - If both arguments are 2-D they are multiplied like conventional
+ matrices.
+ - If either argument is N-D, N > 2, it is treated as a stack of
+ matrices residing in the last two indexes and broadcast accordingly.
+ - If the first argument is 1-D, it is promoted to a matrix by
+ prepending a 1 to its dimensions. After matrix multiplication
+ the prepended 1 is removed.
+ - If the second argument is 1-D, it is promoted to a matrix by
+ appending a 1 to its dimensions. After matrix multiplication
+ the appended 1 is removed.
+
+ ``matmul`` differs from ``dot`` in two important ways:
+
+ - Multiplication by scalars is not allowed, use ``*`` instead.
+ - Stacks of matrices are broadcast together as if the matrices
+ were elements, respecting the signature ``(n,k),(k,m)->(n,m)``:
+
+ >>> a = np.ones([9, 5, 7, 4])
+ >>> c = np.ones([9, 5, 4, 3])
+ >>> np.dot(a, c).shape
+ (9, 5, 7, 9, 5, 3)
+ >>> np.matmul(a, c).shape
+ (9, 5, 7, 3)
+ >>> # n is 7, k is 4, m is 3
+
+ The matmul function implements the semantics of the `@` operator introduced
+ in Python 3.5 following PEP465.
+
+ Examples
+ --------
+ For 2-D arrays it is the matrix product:
+
+ >>> a = np.array([[1, 0],
+ ... [0, 1]])
+ >>> b = np.array([[4, 1],
+ ... [2, 2]])
+ >>> np.matmul(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ For 2-D mixed with 1-D, the result is the usual.
+
+ >>> a = np.array([[1, 0],
+ ... [0, 1]])
+ >>> b = np.array([1, 2])
+ >>> np.matmul(a, b)
+ array([1, 2])
+ >>> np.matmul(b, a)
+ array([1, 2])
+
+
+ Broadcasting is conventional for stacks of arrays
+
+ >>> a = np.arange(2 * 2 * 4).reshape((2, 2, 4))
+ >>> b = np.arange(2 * 2 * 4).reshape((2, 4, 2))
+ >>> np.matmul(a,b).shape
+ (2, 2, 2)
+ >>> np.matmul(a, b)[0, 1, 1]
+ 98
+ >>> sum(a[0, 1, :] * b[0 , :, 1])
+ 98
+
+ Vector, vector returns the scalar inner product, but neither argument
+ is complex-conjugated:
+
+ >>> np.matmul([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ Scalar multiplication raises an error.
+
+ >>> np.matmul([1,2], 3)
+ Traceback (most recent call last):
+ ...
+ ValueError: matmul: Input operand 1 does not have enough dimensions ...
+
+ .. versionadded:: 1.10.0
""")
add_newdoc('numpy.core.umath', 'modf',
@@ -2588,14 +2755,13 @@ add_newdoc('numpy.core.umath', 'multiply',
Parameters
----------
x1, x2 : array_like
- Input arrays to be multiplied.
+ Input arrays to be multiplied. $BROADCASTABLE_2
$PARAMS
Returns
-------
y : ndarray
- The product of `x1` and `x2`, element-wise. Returns a scalar if
- both `x1` and `x2` are scalars.
+ The product of `x1` and `x2`, element-wise.
$OUT_SCALAR_2
Notes
@@ -2670,7 +2836,7 @@ add_newdoc('numpy.core.umath', 'not_equal',
Parameters
----------
x1, x2 : array_like
- Input arrays.
+ Input arrays. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2719,7 +2885,7 @@ add_newdoc('numpy.core.umath', 'power',
x1 : array_like
The bases.
x2 : array_like
- The exponents.
+ The exponents. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2778,7 +2944,7 @@ add_newdoc('numpy.core.umath', 'float_power',
x1 : array_like
The bases.
x2 : array_like
- The exponents.
+ The exponents. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2950,7 +3116,7 @@ add_newdoc('numpy.core.umath', 'remainder',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array.
+ Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
@@ -2970,6 +3136,7 @@ add_newdoc('numpy.core.umath', 'remainder',
-----
Returns 0 when `x2` is 0 and both `x1` and `x2` are (arrays of)
integers.
+ ``mod`` is an alias of ``remainder``.
Examples
--------
@@ -2995,7 +3162,7 @@ add_newdoc('numpy.core.umath', 'divmod',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array.
+ Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
@@ -3034,7 +3201,7 @@ add_newdoc('numpy.core.umath', 'right_shift',
x1 : array_like, int
Input values.
x2 : array_like, int
- Number of bits to remove at the right of `x1`.
+ Number of bits to remove at the right of `x1`. $BROADCASTABLE_2
$PARAMS
Returns
@@ -3161,16 +3328,14 @@ add_newdoc('numpy.core.umath', 'copysign',
"""
Change the sign of x1 to that of x2, element-wise.
- If both arguments are arrays or sequences, they have to be of the same
- length. If `x2` is a scalar, its sign will be copied to all elements of
- `x1`.
+ If `x2` is a scalar, its sign will be copied to all elements of `x1`.
Parameters
----------
x1 : array_like
Values to change the sign of.
x2 : array_like
- The sign of `x2` is copied to `x1`.
+ The sign of `x2` is copied to `x1`. $BROADCASTABLE_2
$PARAMS
Returns
@@ -3205,6 +3370,7 @@ add_newdoc('numpy.core.umath', 'nextafter',
Values to find the next representable value of.
x2 : array_like
The direction where to look for the next representable value of `x1`.
+ $BROADCASTABLE_2
$PARAMS
Returns
@@ -3353,6 +3519,7 @@ add_newdoc('numpy.core.umath', 'sinh',
>>> # Discrepancy due to vagaries of floating point arithmetic.
>>> # Example of providing the optional output parameter
+ >>> out1 = np.array([0], dtype='d')
>>> out2 = np.sinh([0.1], out1)
>>> out2 is out1
True
@@ -3361,7 +3528,7 @@ add_newdoc('numpy.core.umath', 'sinh',
>>> np.sinh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -3406,8 +3573,8 @@ add_newdoc('numpy.core.umath', 'sqrt',
>>> np.sqrt([4, -1, -3+4J])
array([ 2.+0.j, 0.+1.j, 1.+2.j])
- >>> np.sqrt([4, -1, numpy.inf])
- array([ 2., NaN, Inf])
+ >>> np.sqrt([4, -1, np.inf])
+ array([ 2., nan, inf])
""")
@@ -3475,7 +3642,7 @@ add_newdoc('numpy.core.umath', 'subtract',
Parameters
----------
x1, x2 : array_like
- The arrays to be subtracted from each other.
+ The arrays to be subtracted from each other. $BROADCASTABLE_2
$PARAMS
Returns
@@ -3538,6 +3705,7 @@ add_newdoc('numpy.core.umath', 'tan',
>>>
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
+ >>> out1 = np.array([0], dtype='d')
>>> out2 = np.cos([0.1], out1)
>>> out2 is out1
True
@@ -3546,7 +3714,7 @@ add_newdoc('numpy.core.umath', 'tan',
>>> np.cos(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -3589,6 +3757,7 @@ add_newdoc('numpy.core.umath', 'tanh',
>>> # Example of providing the optional output parameter illustrating
>>> # that what is returned is a reference to said parameter
+ >>> out1 = np.array([0], dtype='d')
>>> out2 = np.tanh([0.1], out1)
>>> out2 is out1
True
@@ -3597,7 +3766,7 @@ add_newdoc('numpy.core.umath', 'tanh',
>>> np.tanh(np.zeros((3,3)),np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
- ValueError: invalid return array shape
+ ValueError: operands could not be broadcast together with shapes (3,3) (2,2)
""")
@@ -3614,7 +3783,7 @@ add_newdoc('numpy.core.umath', 'true_divide',
x1 : array_like
Dividend array.
x2 : array_like
- Divisor array.
+ Divisor array. $BROADCASTABLE_2
$PARAMS
Returns
@@ -3639,8 +3808,6 @@ add_newdoc('numpy.core.umath', 'true_divide',
>>> np.true_divide(x, 4)
array([ 0. , 0.25, 0.5 , 0.75, 1. ])
- >>> x/4
- array([0, 0, 0, 0, 1])
>>> x//4
array([0, 0, 0, 0, 1])
@@ -3713,7 +3880,7 @@ add_newdoc('numpy.core.umath', 'ldexp',
x1 : array_like
Array of multipliers.
x2 : array_like, int
- Array of twos exponents.
+ Array of twos exponents. $BROADCASTABLE_2
$PARAMS
Returns
@@ -3736,7 +3903,7 @@ add_newdoc('numpy.core.umath', 'ldexp',
Examples
--------
>>> np.ldexp(5, np.arange(4))
- array([ 5., 10., 20., 40.], dtype=float32)
+ array([ 5., 10., 20., 40.], dtype=float16)
>>> x = np.arange(6)
>>> np.ldexp(*np.frexp(x))
@@ -3751,7 +3918,7 @@ add_newdoc('numpy.core.umath', 'gcd',
Parameters
----------
x1, x2 : array_like, int
- Arrays of values
+ Arrays of values. $BROADCASTABLE_2
Returns
-------
@@ -3781,7 +3948,7 @@ add_newdoc('numpy.core.umath', 'lcm',
Parameters
----------
x1, x2 : array_like, int
- Arrays of values
+ Arrays of values. $BROADCASTABLE_2
Returns
-------
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index 6d0a0add5..a941c5b81 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -17,16 +17,19 @@ The preferred alias for `defchararray` is `numpy.char`.
"""
from __future__ import division, absolute_import, print_function
+import functools
import sys
from .numerictypes import string_, unicode_, integer, object_, bool_, character
from .numeric import ndarray, compare_chararrays
from .numeric import array as narray
from numpy.core.multiarray import _vec_string
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.compat import asbytes, long
import numpy
__all__ = [
- 'chararray', 'equal', 'not_equal', 'greater_equal', 'less_equal',
+ 'equal', 'not_equal', 'greater_equal', 'less_equal',
'greater', 'less', 'str_len', 'add', 'multiply', 'mod', 'capitalize',
'center', 'count', 'decode', 'encode', 'endswith', 'expandtabs',
'find', 'index', 'isalnum', 'isalpha', 'isdigit', 'islower', 'isspace',
@@ -47,6 +50,10 @@ else:
_bytes = str
_len = len
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.char')
+
+
def _use_unicode(*args):
"""
Helper function for determining the output type of some string
@@ -95,6 +102,11 @@ def _get_num_chars(a):
return a.itemsize
+def _binary_op_dispatcher(x1, x2):
+ return (x1, x2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
def equal(x1, x2):
"""
Return (x1 == x2) element-wise.
@@ -119,6 +131,8 @@ def equal(x1, x2):
"""
return compare_chararrays(x1, x2, '==', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def not_equal(x1, x2):
"""
Return (x1 != x2) element-wise.
@@ -143,6 +157,8 @@ def not_equal(x1, x2):
"""
return compare_chararrays(x1, x2, '!=', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def greater_equal(x1, x2):
"""
Return (x1 >= x2) element-wise.
@@ -168,6 +184,8 @@ def greater_equal(x1, x2):
"""
return compare_chararrays(x1, x2, '>=', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def less_equal(x1, x2):
"""
Return (x1 <= x2) element-wise.
@@ -192,6 +210,8 @@ def less_equal(x1, x2):
"""
return compare_chararrays(x1, x2, '<=', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def greater(x1, x2):
"""
Return (x1 > x2) element-wise.
@@ -216,6 +236,8 @@ def greater(x1, x2):
"""
return compare_chararrays(x1, x2, '>', True)
+
+@array_function_dispatch(_binary_op_dispatcher)
def less(x1, x2):
"""
Return (x1 < x2) element-wise.
@@ -240,6 +262,12 @@ def less(x1, x2):
"""
return compare_chararrays(x1, x2, '<', True)
+
+def _unary_op_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_op_dispatcher)
def str_len(a):
"""
Return len(a) element-wise.
@@ -259,6 +287,8 @@ def str_len(a):
"""
return _vec_string(a, integer, '__len__')
+
+@array_function_dispatch(_binary_op_dispatcher)
def add(x1, x2):
"""
Return element-wise string concatenation for two arrays of str or unicode.
@@ -285,6 +315,12 @@ def add(x1, x2):
dtype = _use_unicode(arr1, arr2)
return _vec_string(arr1, (dtype, out_size), '__add__', (arr2,))
+
+def _multiply_dispatcher(a, i):
+ return (a,)
+
+
+@array_function_dispatch(_multiply_dispatcher)
def multiply(a, i):
"""
Return (a * i), that is string multiple concatenation,
@@ -313,6 +349,12 @@ def multiply(a, i):
return _vec_string(
a_arr, (a_arr.dtype.type, out_size), '__mul__', (i_arr,))
+
+def _mod_dispatcher(a, values):
+ return (a, values)
+
+
+@array_function_dispatch(_mod_dispatcher)
def mod(a, values):
"""
Return (a % i), that is pre-Python 2.6 string formatting
@@ -339,6 +381,8 @@ def mod(a, values):
return _to_string_or_unicode_array(
_vec_string(a, object_, '__mod__', (values,)))
+
+@array_function_dispatch(_unary_op_dispatcher)
def capitalize(a):
"""
Return a copy of `a` with only the first character of each element
@@ -377,6 +421,11 @@ def capitalize(a):
return _vec_string(a_arr, a_arr.dtype, 'capitalize')
+def _center_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_center_dispatcher)
def center(a, width, fillchar=' '):
"""
Return a copy of `a` with its elements centered in a string of
@@ -413,6 +462,11 @@ def center(a, width, fillchar=' '):
a_arr, (a_arr.dtype.type, size), 'center', (width_arr, fillchar))
+def _count_dispatcher(a, sub, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_dispatcher)
def count(a, sub, start=0, end=None):
"""
Returns an array with the number of non-overlapping occurrences of
@@ -444,8 +498,7 @@ def count(a, sub, start=0, end=None):
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
- array(['aAaAaA', ' aA ', 'abBABba'],
- dtype='|S7')
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.char.count(c, 'A')
array([3, 1, 1])
>>> np.char.count(c, 'aA')
@@ -459,6 +512,11 @@ def count(a, sub, start=0, end=None):
return _vec_string(a, integer, 'count', [sub, start] + _clean_args(end))
+def _code_dispatcher(a, encoding=None, errors=None):
+ return (a,)
+
+
+@array_function_dispatch(_code_dispatcher)
def decode(a, encoding=None, errors=None):
"""
Calls `str.decode` element-wise.
@@ -493,8 +551,7 @@ def decode(a, encoding=None, errors=None):
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
- array(['aAaAaA', ' aA ', 'abBABba'],
- dtype='|S7')
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.char.encode(c, encoding='cp037')
array(['\\x81\\xc1\\x81\\xc1\\x81\\xc1', '@@\\x81\\xc1@@',
'\\x81\\x82\\xc2\\xc1\\xc2\\x82\\x81'],
@@ -505,6 +562,7 @@ def decode(a, encoding=None, errors=None):
_vec_string(a, object_, 'decode', _clean_args(encoding, errors)))
+@array_function_dispatch(_code_dispatcher)
def encode(a, encoding=None, errors=None):
"""
Calls `str.encode` element-wise.
@@ -540,6 +598,11 @@ def encode(a, encoding=None, errors=None):
_vec_string(a, object_, 'encode', _clean_args(encoding, errors)))
+def _endswith_dispatcher(a, suffix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_endswith_dispatcher)
def endswith(a, suffix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
@@ -572,8 +635,7 @@ def endswith(a, suffix, start=0, end=None):
>>> s[0] = 'foo'
>>> s[1] = 'bar'
>>> s
- array(['foo', 'bar'],
- dtype='|S3')
+ array(['foo', 'bar'], dtype='<U3')
>>> np.char.endswith(s, 'ar')
array([False, True])
>>> np.char.endswith(s, 'a', start=1, end=2)
@@ -584,6 +646,11 @@ def endswith(a, suffix, start=0, end=None):
a, bool_, 'endswith', [suffix, start] + _clean_args(end))
+def _expandtabs_dispatcher(a, tabsize=None):
+ return (a,)
+
+
+@array_function_dispatch(_expandtabs_dispatcher)
def expandtabs(a, tabsize=8):
"""
Return a copy of each string element where all tab characters are
@@ -619,6 +686,7 @@ def expandtabs(a, tabsize=8):
_vec_string(a, object_, 'expandtabs', (tabsize,)))
+@array_function_dispatch(_count_dispatcher)
def find(a, sub, start=0, end=None):
"""
For each element, return the lowest index in the string where
@@ -654,6 +722,7 @@ def find(a, sub, start=0, end=None):
a, integer, 'find', [sub, start] + _clean_args(end))
+@array_function_dispatch(_count_dispatcher)
def index(a, sub, start=0, end=None):
"""
Like `find`, but raises `ValueError` when the substring is not found.
@@ -681,6 +750,8 @@ def index(a, sub, start=0, end=None):
return _vec_string(
a, integer, 'index', [sub, start] + _clean_args(end))
+
+@array_function_dispatch(_unary_op_dispatcher)
def isalnum(a):
"""
Returns true for each element if all characters in the string are
@@ -705,6 +776,8 @@ def isalnum(a):
"""
return _vec_string(a, bool_, 'isalnum')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isalpha(a):
"""
Returns true for each element if all characters in the string are
@@ -729,6 +802,8 @@ def isalpha(a):
"""
return _vec_string(a, bool_, 'isalpha')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isdigit(a):
"""
Returns true for each element if all characters in the string are
@@ -753,6 +828,8 @@ def isdigit(a):
"""
return _vec_string(a, bool_, 'isdigit')
+
+@array_function_dispatch(_unary_op_dispatcher)
def islower(a):
"""
Returns true for each element if all cased characters in the
@@ -778,6 +855,8 @@ def islower(a):
"""
return _vec_string(a, bool_, 'islower')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isspace(a):
"""
Returns true for each element if there are only whitespace
@@ -803,6 +882,8 @@ def isspace(a):
"""
return _vec_string(a, bool_, 'isspace')
+
+@array_function_dispatch(_unary_op_dispatcher)
def istitle(a):
"""
Returns true for each element if the element is a titlecased
@@ -827,6 +908,8 @@ def istitle(a):
"""
return _vec_string(a, bool_, 'istitle')
+
+@array_function_dispatch(_unary_op_dispatcher)
def isupper(a):
"""
Returns true for each element if all cased characters in the
@@ -852,6 +935,12 @@ def isupper(a):
"""
return _vec_string(a, bool_, 'isupper')
+
+def _join_dispatcher(sep, seq):
+ return (sep, seq)
+
+
+@array_function_dispatch(_join_dispatcher)
def join(sep, seq):
"""
Return a string which is the concatenation of the strings in the
@@ -877,6 +966,12 @@ def join(sep, seq):
_vec_string(sep, object_, 'join', (seq,)))
+
+def _just_dispatcher(a, width, fillchar=None):
+ return (a,)
+
+
+@array_function_dispatch(_just_dispatcher)
def ljust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` left-justified in a
@@ -912,6 +1007,7 @@ def ljust(a, width, fillchar=' '):
a_arr, (a_arr.dtype.type, size), 'ljust', (width_arr, fillchar))
+@array_function_dispatch(_unary_op_dispatcher)
def lower(a):
"""
Return an array with the elements converted to lowercase.
@@ -937,17 +1033,20 @@ def lower(a):
Examples
--------
>>> c = np.array(['A1B C', '1BCA', 'BCA1']); c
- array(['A1B C', '1BCA', 'BCA1'],
- dtype='|S5')
+ array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
>>> np.char.lower(c)
- array(['a1b c', '1bca', 'bca1'],
- dtype='|S5')
+ array(['a1b c', '1bca', 'bca1'], dtype='<U5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'lower')
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
def lstrip(a, chars=None):
"""
For each element in `a`, return a copy with the leading characters
@@ -980,23 +1079,20 @@ def lstrip(a, chars=None):
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
- array(['aAaAaA', ' aA ', 'abBABba'],
- dtype='|S7')
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
The 'a' variable is unstripped from c[1] because whitespace leading.
>>> np.char.lstrip(c, 'a')
- array(['AaAaA', ' aA ', 'bBABba'],
- dtype='|S7')
+ array(['AaAaA', ' aA ', 'bBABba'], dtype='<U7')
>>> np.char.lstrip(c, 'A') # leaves c unchanged
- array(['aAaAaA', ' aA ', 'abBABba'],
- dtype='|S7')
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, '')).all()
- ... # XXX: is this a regression? this line now returns False
+ ... # XXX: is this a regression? This used to return True
... # np.char.lstrip(c,'') does not modify c at all.
- True
+ False
>>> (np.char.lstrip(c, ' ') == np.char.lstrip(c, None)).all()
True
@@ -1005,6 +1101,11 @@ def lstrip(a, chars=None):
return _vec_string(a_arr, a_arr.dtype, 'lstrip', (chars,))
+def _partition_dispatcher(a, sep):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
def partition(a, sep):
"""
Partition each element in `a` around `sep`.
@@ -1040,6 +1141,11 @@ def partition(a, sep):
_vec_string(a, object_, 'partition', (sep,)))
+def _replace_dispatcher(a, old, new, count=None):
+ return (a,)
+
+
+@array_function_dispatch(_replace_dispatcher)
def replace(a, old, new, count=None):
"""
For each element in `a`, return a copy of the string with all
@@ -1072,6 +1178,7 @@ def replace(a, old, new, count=None):
a, object_, 'replace', [old, new] + _clean_args(count)))
+@array_function_dispatch(_count_dispatcher)
def rfind(a, sub, start=0, end=None):
"""
For each element in `a`, return the highest index in the string
@@ -1104,6 +1211,7 @@ def rfind(a, sub, start=0, end=None):
a, integer, 'rfind', [sub, start] + _clean_args(end))
+@array_function_dispatch(_count_dispatcher)
def rindex(a, sub, start=0, end=None):
"""
Like `rfind`, but raises `ValueError` when the substring `sub` is
@@ -1133,6 +1241,7 @@ def rindex(a, sub, start=0, end=None):
a, integer, 'rindex', [sub, start] + _clean_args(end))
+@array_function_dispatch(_just_dispatcher)
def rjust(a, width, fillchar=' '):
"""
Return an array with the elements of `a` right-justified in a
@@ -1168,6 +1277,7 @@ def rjust(a, width, fillchar=' '):
a_arr, (a_arr.dtype.type, size), 'rjust', (width_arr, fillchar))
+@array_function_dispatch(_partition_dispatcher)
def rpartition(a, sep):
"""
Partition (split) each element around the right-most separator.
@@ -1203,6 +1313,11 @@ def rpartition(a, sep):
_vec_string(a, object_, 'rpartition', (sep,)))
+def _split_dispatcher(a, sep=None, maxsplit=None):
+ return (a,)
+
+
+@array_function_dispatch(_split_dispatcher)
def rsplit(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
@@ -1240,6 +1355,11 @@ def rsplit(a, sep=None, maxsplit=None):
a, object_, 'rsplit', [sep] + _clean_args(maxsplit))
+def _strip_dispatcher(a, chars=None):
+ return (a,)
+
+
+@array_function_dispatch(_strip_dispatcher)
def rstrip(a, chars=None):
"""
For each element in `a`, return a copy with the trailing
@@ -1272,10 +1392,10 @@ def rstrip(a, chars=None):
>>> c = np.array(['aAaAaA', 'abBABba'], dtype='S7'); c
array(['aAaAaA', 'abBABba'],
dtype='|S7')
- >>> np.char.rstrip(c, 'a')
+ >>> np.char.rstrip(c, b'a')
array(['aAaAaA', 'abBABb'],
dtype='|S7')
- >>> np.char.rstrip(c, 'A')
+ >>> np.char.rstrip(c, b'A')
array(['aAaAa', 'abBABba'],
dtype='|S7')
@@ -1284,6 +1404,7 @@ def rstrip(a, chars=None):
return _vec_string(a_arr, a_arr.dtype, 'rstrip', (chars,))
+@array_function_dispatch(_split_dispatcher)
def split(a, sep=None, maxsplit=None):
"""
For each element in `a`, return a list of the words in the
@@ -1318,6 +1439,11 @@ def split(a, sep=None, maxsplit=None):
a, object_, 'split', [sep] + _clean_args(maxsplit))
+def _splitlines_dispatcher(a, keepends=None):
+ return (a,)
+
+
+@array_function_dispatch(_splitlines_dispatcher)
def splitlines(a, keepends=None):
"""
For each element in `a`, return a list of the lines in the
@@ -1347,6 +1473,11 @@ def splitlines(a, keepends=None):
a, object_, 'splitlines', _clean_args(keepends))
+def _startswith_dispatcher(a, prefix, start=None, end=None):
+ return (a,)
+
+
+@array_function_dispatch(_startswith_dispatcher)
def startswith(a, prefix, start=0, end=None):
"""
Returns a boolean array which is `True` where the string element
@@ -1378,6 +1509,7 @@ def startswith(a, prefix, start=0, end=None):
a, bool_, 'startswith', [prefix, start] + _clean_args(end))
+@array_function_dispatch(_strip_dispatcher)
def strip(a, chars=None):
"""
For each element in `a`, return a copy with the leading and
@@ -1409,23 +1541,20 @@ def strip(a, chars=None):
--------
>>> c = np.array(['aAaAaA', ' aA ', 'abBABba'])
>>> c
- array(['aAaAaA', ' aA ', 'abBABba'],
- dtype='|S7')
+ array(['aAaAaA', ' aA ', 'abBABba'], dtype='<U7')
>>> np.char.strip(c)
- array(['aAaAaA', 'aA', 'abBABba'],
- dtype='|S7')
+ array(['aAaAaA', 'aA', 'abBABba'], dtype='<U7')
>>> np.char.strip(c, 'a') # 'a' unstripped from c[1] because whitespace leads
- array(['AaAaA', ' aA ', 'bBABb'],
- dtype='|S7')
+ array(['AaAaA', ' aA ', 'bBABb'], dtype='<U7')
>>> np.char.strip(c, 'A') # 'A' unstripped from c[1] because (unprinted) ws trails
- array(['aAaAa', ' aA ', 'abBABba'],
- dtype='|S7')
+ array(['aAaAa', ' aA ', 'abBABba'], dtype='<U7')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'strip', _clean_args(chars))
+@array_function_dispatch(_unary_op_dispatcher)
def swapcase(a):
"""
Return element-wise a copy of the string with
@@ -1463,6 +1592,7 @@ def swapcase(a):
return _vec_string(a_arr, a_arr.dtype, 'swapcase')
+@array_function_dispatch(_unary_op_dispatcher)
def title(a):
"""
Return element-wise title cased version of string or unicode.
@@ -1502,6 +1632,11 @@ def title(a):
return _vec_string(a_arr, a_arr.dtype, 'title')
+def _translate_dispatcher(a, table, deletechars=None):
+ return (a,)
+
+
+@array_function_dispatch(_translate_dispatcher)
def translate(a, table, deletechars=None):
"""
For each element in `a`, return a copy of the string where all
@@ -1538,6 +1673,7 @@ def translate(a, table, deletechars=None):
a_arr, a_arr.dtype, 'translate', [table] + _clean_args(deletechars))
+@array_function_dispatch(_unary_op_dispatcher)
def upper(a):
"""
Return an array with the elements converted to uppercase.
@@ -1563,17 +1699,20 @@ def upper(a):
Examples
--------
>>> c = np.array(['a1b c', '1bca', 'bca1']); c
- array(['a1b c', '1bca', 'bca1'],
- dtype='|S5')
+ array(['a1b c', '1bca', 'bca1'], dtype='<U5')
>>> np.char.upper(c)
- array(['A1B C', '1BCA', 'BCA1'],
- dtype='|S5')
+ array(['A1B C', '1BCA', 'BCA1'], dtype='<U5')
"""
a_arr = numpy.asarray(a)
return _vec_string(a_arr, a_arr.dtype, 'upper')
+def _zfill_dispatcher(a, width):
+ return (a,)
+
+
+@array_function_dispatch(_zfill_dispatcher)
def zfill(a, width):
"""
Return the numeric string left-filled with zeros
@@ -1604,6 +1743,7 @@ def zfill(a, width):
a_arr, (a_arr.dtype.type, size), 'zfill', (width_arr,))
+@array_function_dispatch(_unary_op_dispatcher)
def isnumeric(a):
"""
For each element, return True if there are only numeric
@@ -1635,6 +1775,7 @@ def isnumeric(a):
return _vec_string(a, bool_, 'isnumeric')
+@array_function_dispatch(_unary_op_dispatcher)
def isdecimal(a):
"""
For each element, return True if there are only decimal
@@ -1666,6 +1807,7 @@ def isdecimal(a):
return _vec_string(a, bool_, 'isdecimal')
+@set_module('numpy')
class chararray(ndarray):
"""
chararray(shape, itemsize=1, unicode=False, buffer=None, offset=0,
@@ -1698,7 +1840,7 @@ class chararray(ndarray):
This constructor creates the array, using `buffer` (with `offset`
and `strides`) if it is not ``None``. If `buffer` is ``None``, then
constructs a new array with `strides` in "C order", unless both
- ``len(shape) >= 2`` and ``order='Fortran'``, in which case `strides`
+ ``len(shape) >= 2`` and ``order='F'``, in which case `strides`
is in "Fortran order".
Methods
@@ -1794,18 +1936,16 @@ class chararray(ndarray):
>>> charar = np.chararray((3, 3))
>>> charar[:] = 'a'
>>> charar
- chararray([['a', 'a', 'a'],
- ['a', 'a', 'a'],
- ['a', 'a', 'a']],
- dtype='|S1')
+ chararray([[b'a', b'a', b'a'],
+ [b'a', b'a', b'a'],
+ [b'a', b'a', b'a']], dtype='|S1')
>>> charar = np.chararray(charar.shape, itemsize=5)
>>> charar[:] = 'abc'
>>> charar
- chararray([['abc', 'abc', 'abc'],
- ['abc', 'abc', 'abc'],
- ['abc', 'abc', 'abc']],
- dtype='|S5')
+ chararray([[b'abc', b'abc', b'abc'],
+ [b'abc', b'abc', b'abc'],
+ [b'abc', b'abc', b'abc']], dtype='|S5')
"""
def __new__(subtype, shape, itemsize=1, unicode=False, buffer=None,
@@ -1984,7 +2124,7 @@ class chararray(ndarray):
def __rmod__(self, other):
return NotImplemented
- def argsort(self, axis=-1, kind='quicksort', order=None):
+ def argsort(self, axis=-1, kind=None, order=None):
"""
Return the indices that sort the array lexicographically.
diff --git a/numpy/core/einsumfunc.py b/numpy/core/einsumfunc.py
index 1281b3c98..3412c3fd5 100644
--- a/numpy/core/einsumfunc.py
+++ b/numpy/core/einsumfunc.py
@@ -9,6 +9,7 @@ import itertools
from numpy.compat import basestring
from numpy.core.multiarray import c_einsum
from numpy.core.numeric import asanyarray, tensordot
+from numpy.core.overrides import array_function_dispatch
__all__ = ['einsum', 'einsum_path']
@@ -40,10 +41,10 @@ def _flop_count(idx_contraction, inner, num_terms, size_dictionary):
--------
>>> _flop_count('abc', False, 1, {'a': 2, 'b':3, 'c':5})
- 90
+ 30
>>> _flop_count('abc', True, 2, {'a': 2, 'b':3, 'c':5})
- 270
+ 60
"""
@@ -168,9 +169,9 @@ def _optimal_path(input_sets, output_set, idx_dict, memory_limit):
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
- >>> oset = set('')
+ >>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
- >>> _path__optimal_path(isets, oset, idx_sizes, 5000)
+ >>> _optimal_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
@@ -286,7 +287,7 @@ def _update_other_results(results, best):
Returns
-------
mod_results : list
- The list of modifed results, updated with outcome of ``best`` contraction.
+ The list of modified results, updated with outcome of ``best`` contraction.
"""
best_con = best[1]
@@ -339,9 +340,9 @@ def _greedy_path(input_sets, output_set, idx_dict, memory_limit):
Examples
--------
>>> isets = [set('abd'), set('ac'), set('bdc')]
- >>> oset = set('')
+ >>> oset = set()
>>> idx_sizes = {'a': 1, 'b':2, 'c':3, 'd':4}
- >>> _path__greedy_path(isets, oset, idx_sizes, 5000)
+ >>> _greedy_path(isets, oset, idx_sizes, 5000)
[(0, 2), (0, 1)]
"""
@@ -538,13 +539,14 @@ def _parse_einsum_input(operands):
--------
The operand list is simplified to reduce printing:
+ >>> np.random.seed(123)
>>> a = np.random.rand(4, 4)
>>> b = np.random.rand(4, 4, 4)
- >>> __parse_einsum_input(('...a,...a->...', a, b))
- ('za,xza', 'xz', [a, b])
+ >>> _parse_einsum_input(('...a,...a->...', a, b))
+ ('za,xza', 'xz', [a, b]) # may vary
- >>> __parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
- ('za,xza', 'xz', [a, b])
+ >>> _parse_einsum_input((a, [Ellipsis, 0], b, [Ellipsis, 0]))
+ ('za,xza', 'xz', [a, b]) # may vary
"""
if len(operands) == 0:
@@ -689,6 +691,17 @@ def _parse_einsum_input(operands):
return (input_subscripts, output_subscript, operands)
+def _einsum_path_dispatcher(*operands, **kwargs):
+ # NOTE: technically, we should only dispatch on array-like arguments, not
+ # subscripts (given as strings). But separating operands into
+ # arrays/subscripts is a little tricky/slow (given einsum's two supported
+ # signatures), so as a practical shortcut we dispatch on everything.
+ # Strings will be ignored for dispatching since they don't define
+ # __array_function__.
+ return operands
+
+
+@array_function_dispatch(_einsum_path_dispatcher, module='numpy')
def einsum_path(*operands, **kwargs):
"""
einsum_path(subscripts, *operands, optimize='greedy')
@@ -751,6 +764,7 @@ def einsum_path(*operands, **kwargs):
of the contraction and the remaining contraction ``(0, 1)`` is then
completed.
+ >>> np.random.seed(123)
>>> a = np.random.rand(2, 2)
>>> b = np.random.rand(2, 5)
>>> c = np.random.rand(5, 2)
@@ -758,7 +772,7 @@ def einsum_path(*operands, **kwargs):
>>> print(path_info[0])
['einsum_path', (1, 2), (0, 1)]
>>> print(path_info[1])
- Complete contraction: ij,jk,kl->il
+ Complete contraction: ij,jk,kl->il # may vary
Naive scaling: 4
Optimized scaling: 3
Naive FLOP count: 1.600e+02
@@ -777,12 +791,12 @@ def einsum_path(*operands, **kwargs):
>>> I = np.random.rand(10, 10, 10, 10)
>>> C = np.random.rand(10, 10)
>>> path_info = np.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C,
- optimize='greedy')
+ ... optimize='greedy')
>>> print(path_info[0])
['einsum_path', (0, 2), (0, 3), (0, 2), (0, 1)]
- >>> print(path_info[1])
- Complete contraction: ea,fb,abcd,gc,hd->efgh
+ >>> print(path_info[1])
+ Complete contraction: ea,fb,abcd,gc,hd->efgh # may vary
Naive scaling: 8
Optimized scaling: 5
Naive FLOP count: 8.000e+08
@@ -837,7 +851,6 @@ def einsum_path(*operands, **kwargs):
# Python side parsing
input_subscripts, output_subscript, operands = _parse_einsum_input(operands)
- subscripts = input_subscripts + '->' + output_subscript
# Build a few useful list and sets
input_list = input_subscripts.split(',')
@@ -876,9 +889,8 @@ def einsum_path(*operands, **kwargs):
broadcast_indices = [set(x) for x in broadcast_indices]
# Compute size of each input array plus the output array
- size_list = []
- for term in input_list + [output_subscript]:
- size_list.append(_compute_size_by_dict(term, dimension_dict))
+ size_list = [_compute_size_by_dict(term, dimension_dict)
+ for term in input_list + [output_subscript]]
max_size = max(size_list)
if memory_limit is None:
@@ -980,7 +992,16 @@ def einsum_path(*operands, **kwargs):
return (path, path_print)
+def _einsum_dispatcher(*operands, **kwargs):
+ # Arguably we dispatch on more arguments that we really should; see note in
+ # _einsum_path_dispatcher for why.
+ for op in operands:
+ yield op
+ yield kwargs.get('out')
+
+
# Rewrite einsum to handle different cases
+@array_function_dispatch(_einsum_dispatcher, module='numpy')
def einsum(*operands, **kwargs):
"""
einsum(subscripts, *operands, out=None, dtype=None, order='K',
@@ -1255,32 +1276,32 @@ def einsum(*operands, **kwargs):
>>> a = np.arange(60.).reshape(3,4,5)
>>> b = np.arange(24.).reshape(4,3,2)
>>> np.einsum('ijk,jil->kl', a, b)
- array([[ 4400., 4730.],
- [ 4532., 4874.],
- [ 4664., 5018.],
- [ 4796., 5162.],
- [ 4928., 5306.]])
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
>>> np.einsum(a, [0,1,2], b, [1,0,3], [2,3])
- array([[ 4400., 4730.],
- [ 4532., 4874.],
- [ 4664., 5018.],
- [ 4796., 5162.],
- [ 4928., 5306.]])
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
>>> np.tensordot(a,b, axes=([1,0],[0,1]))
- array([[ 4400., 4730.],
- [ 4532., 4874.],
- [ 4664., 5018.],
- [ 4796., 5162.],
- [ 4928., 5306.]])
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
Writeable returned arrays (since version 1.10.0):
>>> a = np.zeros((3, 3))
>>> np.einsum('ii->i', a)[:] = 1
>>> a
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
Example of ellipsis use:
@@ -1303,19 +1324,27 @@ def einsum(*operands, **kwargs):
particularly significant with larger arrays:
>>> a = np.ones(64).reshape(2,4,8)
- # Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
+
+ Basic `einsum`: ~1520ms (benchmarked on 3.1GHz Intel i5.)
+
>>> for iteration in range(500):
- ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
- # Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a)
+
+ Sub-optimal `einsum` (due to repeated path calculation time): ~330ms
+
>>> for iteration in range(500):
- ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
- # Greedy `einsum` (faster optimal path approximation): ~160ms
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')
+
+ Greedy `einsum` (faster optimal path approximation): ~160ms
+
>>> for iteration in range(500):
- ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
- # Optimal `einsum` (best usage pattern in some use cases): ~110ms
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='greedy')
+
+ Optimal `einsum` (best usage pattern in some use cases): ~110ms
+
>>> path = np.einsum_path('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize='optimal')[0]
>>> for iteration in range(500):
- ... np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
+ ... _ = np.einsum('ijk,ilm,njm,nlk,abc->',a,a,a,a,a, optimize=path)
"""
@@ -1354,9 +1383,7 @@ def einsum(*operands, **kwargs):
# Start contraction loop
for num, contraction in enumerate(contraction_list):
inds, idx_rm, einsum_str, remaining, blas = contraction
- tmp_operands = []
- for x in inds:
- tmp_operands.append(operands.pop(x))
+ tmp_operands = [operands.pop(x) for x in inds]
# Do we need to deal with the output?
handle_out = specified_out and ((num + 1) == len(contraction_list))
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index b9cc98cae..6c0b9cde9 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -3,15 +3,18 @@
"""
from __future__ import division, absolute_import, print_function
+import functools
import types
import warnings
import numpy as np
from .. import VisibleDeprecationWarning
from . import multiarray as mu
+from . import overrides
from . import umath as um
from . import numerictypes as nt
-from .numeric import asarray, array, asanyarray, concatenate
+from ._asarray import asarray, array, asanyarray
+from .multiarray import concatenate
from . import _methods
_dt_ = nt.sctype2char
@@ -22,7 +25,7 @@ __all__ = [
'argmin', 'argpartition', 'argsort', 'around', 'choose', 'clip',
'compress', 'cumprod', 'cumproduct', 'cumsum', 'diagonal', 'mean',
'ndim', 'nonzero', 'partition', 'prod', 'product', 'ptp', 'put',
- 'rank', 'ravel', 'repeat', 'reshape', 'resize', 'round_',
+ 'ravel', 'repeat', 'reshape', 'resize', 'round_',
'searchsorted', 'shape', 'size', 'sometrue', 'sort', 'squeeze',
'std', 'sum', 'swapaxes', 'take', 'trace', 'transpose', 'var',
]
@@ -31,6 +34,9 @@ _gentype = types.GeneratorType
# save away Python sum
_sum_ = sum
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
# functions that are now methods
def _wrapit(obj, method, *args, **kwds):
@@ -47,25 +53,26 @@ def _wrapit(obj, method, *args, **kwds):
def _wrapfunc(obj, method, *args, **kwds):
- try:
- return getattr(obj, method)(*args, **kwds)
-
- # An AttributeError occurs if the object does not have
- # such a method in its class.
+ bound = getattr(obj, method, None)
+ if bound is None:
+ return _wrapit(obj, method, *args, **kwds)
- # A TypeError occurs if the object does have such a method
- # in its class, but its signature is not identical to that
- # of NumPy's. This situation has occurred in the case of
- # a downstream library like 'pandas'.
- except (AttributeError, TypeError):
+ try:
+ return bound(*args, **kwds)
+ except TypeError:
+ # A TypeError occurs if the object does have such a method in its
+ # class, but its signature is not identical to that of NumPy's. This
+ # situation has occurred in the case of a downstream library like
+ # 'pandas'.
+ #
+ # Call _wrapit from within the except clause to ensure a potential
+ # exception has a traceback chain.
return _wrapit(obj, method, *args, **kwds)
def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
- passkwargs = {}
- for k, v in kwargs.items():
- if v is not np._NoValue:
- passkwargs[k] = v
+ passkwargs = {k: v for k, v in kwargs.items()
+ if v is not np._NoValue}
if type(obj) is not mu.ndarray:
try:
@@ -83,6 +90,11 @@ def _wrapreduction(obj, ufunc, method, axis, dtype, out, **kwargs):
return ufunc.reduce(obj, axis, dtype, out, **passkwargs)
+def _take_dispatcher(a, indices, axis=None, out=None, mode=None):
+ return (a, out)
+
+
+@array_function_dispatch(_take_dispatcher)
def take(a, indices, axis=None, out=None, mode='raise'):
"""
Take elements from an array along an axis.
@@ -119,7 +131,8 @@ def take(a, indices, axis=None, out=None, mode='raise'):
input array is used.
out : ndarray, optional (Ni..., Nj..., Nk...)
If provided, the result will be placed in this array. It should
- be of the appropriate shape and dtype.
+ be of the appropriate shape and dtype. Note that `out` is always
+ buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise', 'wrap', 'clip'}, optional
Specifies how out-of-bounds indices will behave.
@@ -181,7 +194,12 @@ def take(a, indices, axis=None, out=None, mode='raise'):
return _wrapfunc(a, 'take', indices, axis=axis, out=out, mode=mode)
+def _reshape_dispatcher(a, newshape, order=None):
+ return (a,)
+
+
# not deprecated --- copy if necessary, view otherwise
+@array_function_dispatch(_reshape_dispatcher)
def reshape(a, newshape, order='C'):
"""
Gives a new shape to an array without changing its data.
@@ -227,12 +245,16 @@ def reshape(a, newshape, order='C'):
you should assign the new shape to the shape attribute of the array::
>>> a = np.zeros((10, 2))
+
# A transpose makes the array non-contiguous
>>> b = a.T
+
# Taking a view makes it possible to modify the shape without modifying
# the initial object.
>>> c = b.view()
>>> c.shape = (20)
+ Traceback (most recent call last):
+ ...
AttributeError: incompatible shape for a non-contiguous array
The `order` keyword gives the index ordering both for *fetching* the values
@@ -279,6 +301,14 @@ def reshape(a, newshape, order='C'):
return _wrapfunc(a, 'reshape', newshape, order=order)
+def _choose_dispatcher(a, choices, out=None, mode=None):
+ yield a
+ for c in choices:
+ yield c
+ yield out
+
+
+@array_function_dispatch(_choose_dispatcher)
def choose(a, choices, out=None, mode='raise'):
"""
Construct an array from an index array and a set of arrays to choose from.
@@ -327,7 +357,8 @@ def choose(a, choices, out=None, mode='raise'):
``choices.shape[0]``) is taken as defining the "sequence".
out : array, optional
If provided, the result will be inserted into this array. It should
- be of the appropriate shape and dtype.
+ be of the appropriate shape and dtype. Note that `out` is always
+ buffered if `mode='raise'`; use other modes for better performance.
mode : {'raise' (default), 'wrap', 'clip'}, optional
Specifies how indices outside `[0, n-1]` will be treated:
@@ -349,6 +380,7 @@ def choose(a, choices, out=None, mode='raise'):
See Also
--------
ndarray.choose : equivalent method
+ numpy.take_along_axis : Preferable if `choices` is an array
Notes
-----
@@ -401,6 +433,11 @@ def choose(a, choices, out=None, mode='raise'):
return _wrapfunc(a, 'choose', choices, out=out, mode=mode)
+def _repeat_dispatcher(a, repeats, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_repeat_dispatcher)
def repeat(a, repeats, axis=None):
"""
Repeat elements of an array.
@@ -445,6 +482,11 @@ def repeat(a, repeats, axis=None):
return _wrapfunc(a, 'repeat', repeats, axis=axis)
+def _put_dispatcher(a, ind, v, mode=None):
+ return (a, ind, v)
+
+
+@array_function_dispatch(_put_dispatcher)
def put(a, ind, v, mode='raise'):
"""
Replaces specified elements of an array with given values.
@@ -474,7 +516,8 @@ def put(a, ind, v, mode='raise'):
'clip' mode means that all indices that are too large are replaced
by the index that addresses the last element along that axis. Note
- that this disables indexing with negative numbers.
+ that this disables indexing with negative numbers. In 'raise' mode,
+ if an exception occurs the target array may still be modified.
See Also
--------
@@ -503,6 +546,11 @@ def put(a, ind, v, mode='raise'):
return put(ind, v, mode=mode)
+def _swapaxes_dispatcher(a, axis1, axis2):
+ return (a,)
+
+
+@array_function_dispatch(_swapaxes_dispatcher)
def swapaxes(a, axis1, axis2):
"""
Interchange two axes of an array.
@@ -549,6 +597,11 @@ def swapaxes(a, axis1, axis2):
return _wrapfunc(a, 'swapaxes', axis1, axis2)
+def _transpose_dispatcher(a, axes=None):
+ return (a,)
+
+
+@array_function_dispatch(_transpose_dispatcher)
def transpose(a, axes=None):
"""
Permute the dimensions of an array.
@@ -598,6 +651,11 @@ def transpose(a, axes=None):
return _wrapfunc(a, 'transpose', axes)
+def _partition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_partition_dispatcher)
def partition(a, kth, axis=-1, kind='introselect', order=None):
"""
Return a partitioned copy of an array.
@@ -689,6 +747,11 @@ def partition(a, kth, axis=-1, kind='introselect', order=None):
return a
+def _argpartition_dispatcher(a, kth, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argpartition_dispatcher)
def argpartition(a, kth, axis=-1, kind='introselect', order=None):
"""
Perform an indirect partition along the given axis using the
@@ -757,7 +820,12 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
-def sort(a, axis=-1, kind='quicksort', order=None):
+def _sort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_sort_dispatcher)
+def sort(a, axis=-1, kind=None, order=None):
"""
Return a sorted copy of an array.
@@ -769,7 +837,14 @@ def sort(a, axis=-1, kind='quicksort', order=None):
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm. Default is 'quicksort'.
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort or radix sort under the covers and, in general,
+ the actual implementation will vary with data type. The 'mergesort' option
+ is retained for backwards compatibility.
+
+ .. versionchanged:: 1.15.0.
+ The 'stable' option was added.
+
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
@@ -795,17 +870,22 @@ def sort(a, axis=-1, kind='quicksort', order=None):
The various sorting algorithms are characterized by their average speed,
worst case performance, work space size, and whether they are stable. A
stable sort keeps items with the same key in the same relative
- order. The three available algorithms have the following
+ order. The four algorithms implemented in NumPy have the following
properties:
=========== ======= ============= ============ ========
kind speed worst case work space stable
=========== ======= ============= ============ ========
'quicksort' 1 O(n^2) 0 no
- 'mergesort' 2 O(n*log(n)) ~n/2 yes
'heapsort' 3 O(n*log(n)) 0 no
+ 'mergesort' 2 O(n*log(n)) ~n/2 yes
+ 'timsort' 2 O(n*log(n)) ~n/2 yes
=========== ======= ============= ============ ========
+ .. note:: The datatype determines which of 'mergesort' or 'timsort'
+ is actually used, even if 'mergesort' is specified. User selection
+ at a finer scale is not currently available.
+
All the sort algorithms make temporary copies of the data when
sorting along any but the last axis. Consequently, sorting along
the last axis is faster and uses less space than sorting along
@@ -829,13 +909,30 @@ def sort(a, axis=-1, kind='quicksort', order=None):
.. versionadded:: 1.12.0
- quicksort has been changed to an introsort which will switch
- heapsort when it does not make enough progress. This makes its
- worst case O(n*log(n)).
-
- 'stable' automatically choses the best stable sorting algorithm
- for the data type being sorted. It is currently mapped to
- merge sort.
+ quicksort has been changed to `introsort <https://en.wikipedia.org/wiki/Introsort>`_.
+ When sorting does not make enough progress it switches to
+ `heapsort <https://en.wikipedia.org/wiki/Heapsort>`_.
+ This implementation makes quicksort O(n*log(n)) in the worst case.
+
+ 'stable' automatically chooses the best stable sorting algorithm
+ for the data type being sorted.
+ It, along with 'mergesort' is currently mapped to
+ `timsort <https://en.wikipedia.org/wiki/Timsort>`_
+ or `radix sort <https://en.wikipedia.org/wiki/Radix_sort>`_
+ depending on the data type.
+ API forward compatibility currently limits the
+ ability to select the implementation and it is hardwired for the different
+ data types.
+
+ .. versionadded:: 1.17.0
+
+ Timsort is added for better performance on already or nearly
+ sorted data. On random data timsort is almost identical to
+ mergesort. It is now used for stable sort while quicksort is still the
+ default sort if none is chosen. For timsort details, refer to
+ `CPython listsort.txt <https://github.com/python/cpython/blob/3.7/Objects/listsort.txt>`_.
+ 'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
+ O(n) sort instead of O(n log n).
Examples
--------
@@ -879,7 +976,12 @@ def sort(a, axis=-1, kind='quicksort', order=None):
return a
-def argsort(a, axis=-1, kind='quicksort', order=None):
+def _argsort_dispatcher(a, axis=None, kind=None, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_argsort_dispatcher)
+def argsort(a, axis=-1, kind=None, order=None):
"""
Returns the indices that would sort an array.
@@ -895,7 +997,13 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
Axis along which to sort. The default is -1 (the last axis). If None,
the flattened array is used.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm.
+ Sorting algorithm. The default is 'quicksort'. Note that both 'stable'
+ and 'mergesort' use timsort under the covers and, in general, the
+ actual implementation will vary with data type. The 'mergesort' option
+ is retained for backwards compatibility.
+
+ .. versionchanged:: 1.15.0.
+ The 'stable' option was added.
order : str or list of str, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
@@ -906,10 +1014,10 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
Returns
-------
index_array : ndarray, int
- Array of indices that sort `a` along the specified axis.
+ Array of indices that sort `a` along the specified `axis`.
If `a` is one-dimensional, ``a[index_array]`` yields a sorted `a`.
- More generally, ``np.take_along_axis(a, index_array, axis=a)`` always
- yields the sorted `a`, irrespective of dimensionality.
+ More generally, ``np.take_along_axis(a, index_array, axis=axis)``
+ always yields the sorted `a`, irrespective of dimensionality.
See Also
--------
@@ -940,13 +1048,21 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
array([[0, 3],
[2, 2]])
- >>> np.argsort(x, axis=0) # sorts along first axis (down)
+ >>> ind = np.argsort(x, axis=0) # sorts along first axis (down)
+ >>> ind
array([[0, 1],
[1, 0]])
+ >>> np.take_along_axis(x, ind, axis=0) # same as np.sort(x, axis=0)
+ array([[0, 2],
+ [2, 3]])
- >>> np.argsort(x, axis=1) # sorts along last axis (across)
+ >>> ind = np.argsort(x, axis=1) # sorts along last axis (across)
+ >>> ind
array([[0, 1],
[0, 1]])
+ >>> np.take_along_axis(x, ind, axis=1) # same as np.sort(x, axis=1)
+ array([[0, 3],
+ [2, 2]])
Indices of the sorted elements of a N-dimensional array:
@@ -973,6 +1089,11 @@ def argsort(a, axis=-1, kind='quicksort', order=None):
return _wrapfunc(a, 'argsort', axis=axis, kind=kind, order=order)
+def _argmax_dispatcher(a, axis=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_argmax_dispatcher)
def argmax(a, axis=None, out=None):
"""
Returns the indices of the maximum values along an axis.
@@ -1007,10 +1128,10 @@ def argmax(a, axis=None, out=None):
Examples
--------
- >>> a = np.arange(6).reshape(2,3)
+ >>> a = np.arange(6).reshape(2,3) + 10
>>> a
- array([[0, 1, 2],
- [3, 4, 5]])
+ array([[10, 11, 12],
+ [13, 14, 15]])
>>> np.argmax(a)
5
>>> np.argmax(a, axis=0)
@@ -1024,7 +1145,7 @@ def argmax(a, axis=None, out=None):
>>> ind
(1, 2)
>>> a[ind]
- 5
+ 15
>>> b = np.arange(6)
>>> b[1] = 5
@@ -1037,6 +1158,11 @@ def argmax(a, axis=None, out=None):
return _wrapfunc(a, 'argmax', axis=axis, out=out)
+def _argmin_dispatcher(a, axis=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_argmin_dispatcher)
def argmin(a, axis=None, out=None):
"""
Returns the indices of the minimum values along an axis.
@@ -1071,10 +1197,10 @@ def argmin(a, axis=None, out=None):
Examples
--------
- >>> a = np.arange(6).reshape(2,3)
+ >>> a = np.arange(6).reshape(2,3) + 10
>>> a
- array([[0, 1, 2],
- [3, 4, 5]])
+ array([[10, 11, 12],
+ [13, 14, 15]])
>>> np.argmin(a)
0
>>> np.argmin(a, axis=0)
@@ -1088,12 +1214,12 @@ def argmin(a, axis=None, out=None):
>>> ind
(0, 0)
>>> a[ind]
- 0
+ 10
- >>> b = np.arange(6)
- >>> b[4] = 0
+ >>> b = np.arange(6) + 10
+ >>> b[4] = 10
>>> b
- array([0, 1, 2, 3, 0, 5])
+ array([10, 11, 12, 13, 10, 15])
>>> np.argmin(b) # Only the first occurrence is returned.
0
@@ -1101,6 +1227,11 @@ def argmin(a, axis=None, out=None):
return _wrapfunc(a, 'argmin', axis=axis, out=out)
+def _searchsorted_dispatcher(a, v, side=None, sorter=None):
+ return (a, v, sorter)
+
+
+@array_function_dispatch(_searchsorted_dispatcher)
def searchsorted(a, v, side='left', sorter=None):
"""
Find indices where elements should be inserted to maintain order.
@@ -1153,7 +1284,7 @@ def searchsorted(a, v, side='left', sorter=None):
As of NumPy 1.4.0 `searchsorted` works with real/complex arrays containing
`nan` values. The enhanced sort order is documented in `sort`.
- This function is a faster version of the builtin python `bisect.bisect_left`
+ This function uses the same algorithm as the builtin python `bisect.bisect_left`
(``side='left'``) and `bisect.bisect_right` (``side='right'``) functions,
which is also vectorized in the `v` argument.
@@ -1170,6 +1301,11 @@ def searchsorted(a, v, side='left', sorter=None):
return _wrapfunc(a, 'searchsorted', v, side=side, sorter=sorter)
+def _resize_dispatcher(a, new_shape):
+ return (a,)
+
+
+@array_function_dispatch(_resize_dispatcher)
def resize(a, new_shape):
"""
Return a new array with the specified shape.
@@ -1243,6 +1379,11 @@ def resize(a, new_shape):
return reshape(a, new_shape)
+def _squeeze_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_squeeze_dispatcher)
def squeeze(a, axis=None):
"""
Remove single-dimensional entries from the shape of an array.
@@ -1295,12 +1436,18 @@ def squeeze(a, axis=None):
try:
squeeze = a.squeeze
except AttributeError:
- return _wrapit(a, 'squeeze')
+ return _wrapit(a, 'squeeze', axis=axis)
if axis is None:
return squeeze()
else:
return squeeze(axis=axis)
+
+def _diagonal_dispatcher(a, offset=None, axis1=None, axis2=None):
+ return (a,)
+
+
+@array_function_dispatch(_diagonal_dispatcher)
def diagonal(a, offset=0, axis1=0, axis2=1):
"""
Return specified diagonals.
@@ -1356,7 +1503,7 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
same type as `a` is returned unless `a` is a `matrix`, in which case
a 1-D array rather than a (2-D) `matrix` is returned in order to
maintain backward compatibility.
-
+
If ``a.ndim > 2``, then the dimensions specified by `axis1` and `axis2`
are removed, and a new axis inserted at the end corresponding to the
diagonal.
@@ -1390,9 +1537,9 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
[2, 3]],
[[4, 5],
[6, 7]]])
- >>> a.diagonal(0, # Main diagonals of two arrays created by skipping
- ... 0, # across the outer(left)-most axis last and
- ... 1) # the "middle" (row) axis first.
+ >>> a.diagonal(0, # Main diagonals of two arrays created by skipping
+ ... 0, # across the outer(left)-most axis last and
+ ... 1) # the "middle" (row) axis first.
array([[0, 6],
[1, 7]])
@@ -1400,13 +1547,28 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
corresponds to fixing the right-most (column) axis, and that the
diagonals are "packed" in rows.
- >>> a[:,:,0] # main diagonal is [0 6]
+ >>> a[:,:,0] # main diagonal is [0 6]
array([[0, 2],
[4, 6]])
- >>> a[:,:,1] # main diagonal is [1 7]
+ >>> a[:,:,1] # main diagonal is [1 7]
array([[1, 3],
[5, 7]])
+ The anti-diagonal can be obtained by reversing the order of elements
+ using either `numpy.flipud` or `numpy.fliplr`.
+
+ >>> a = np.arange(9).reshape(3, 3)
+ >>> a
+ array([[0, 1, 2],
+ [3, 4, 5],
+ [6, 7, 8]])
+ >>> np.fliplr(a).diagonal() # Horizontal flip
+ array([2, 4, 6])
+ >>> np.flipud(a).diagonal() # Vertical flip
+ array([6, 4, 2])
+
+ Note that the order in which the diagonal is retrieved varies depending
+ on the flip function.
"""
if isinstance(a, np.matrix):
# Make diagonal of matrix 1-D to preserve backward compatibility.
@@ -1415,6 +1577,12 @@ def diagonal(a, offset=0, axis1=0, axis2=1):
return asanyarray(a).diagonal(offset=offset, axis1=axis1, axis2=axis2)
+def _trace_dispatcher(
+ a, offset=None, axis1=None, axis2=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_trace_dispatcher)
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
"""
Return the sum along diagonals of the array.
@@ -1478,6 +1646,11 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
return asanyarray(a).trace(offset=offset, axis1=axis1, axis2=axis2, dtype=dtype, out=out)
+def _ravel_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_ravel_dispatcher)
def ravel(a, order='C'):
"""Return a contiguous flattened array.
@@ -1541,21 +1714,21 @@ def ravel(a, order='C'):
It is equivalent to ``reshape(-1, order=order)``.
>>> x = np.array([[1, 2, 3], [4, 5, 6]])
- >>> print(np.ravel(x))
- [1 2 3 4 5 6]
+ >>> np.ravel(x)
+ array([1, 2, 3, 4, 5, 6])
- >>> print(x.reshape(-1))
- [1 2 3 4 5 6]
+ >>> x.reshape(-1)
+ array([1, 2, 3, 4, 5, 6])
- >>> print(np.ravel(x, order='F'))
- [1 4 2 5 3 6]
+ >>> np.ravel(x, order='F')
+ array([1, 4, 2, 5, 3, 6])
When ``order`` is 'A', it will preserve the array's 'C' or 'F' ordering:
- >>> print(np.ravel(x.T))
- [1 4 2 5 3 6]
- >>> print(np.ravel(x.T, order='A'))
- [1 2 3 4 5 6]
+ >>> np.ravel(x.T)
+ array([1, 4, 2, 5, 3, 6])
+ >>> np.ravel(x.T, order='A')
+ array([1, 2, 3, 4, 5, 6])
When ``order`` is 'K', it will preserve orderings that are neither 'C'
nor 'F', but won't reverse axes:
@@ -1584,6 +1757,11 @@ def ravel(a, order='C'):
return asanyarray(a).ravel(order=order)
+def _nonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_nonzero_dispatcher)
def nonzero(a):
"""
Return the indices of the elements that are non-zero.
@@ -1591,17 +1769,19 @@ def nonzero(a):
Returns a tuple of arrays, one for each dimension of `a`,
containing the indices of the non-zero elements in that
dimension. The values in `a` are always tested and returned in
- row-major, C-style order. The corresponding non-zero
- values can be obtained with::
+ row-major, C-style order.
- a[nonzero(a)]
+ To group the indices by element, rather than dimension, use `argwhere`,
+ which returns a row for each non-zero element.
- To group the indices by element, rather than dimension, use::
+ .. note::
+
+ When called on a zero-d array or scalar, ``nonzero(a)`` is treated
+ as ``nonzero(atleast1d(a))``.
- transpose(nonzero(a))
+ .. deprecated:: 1.17.0
- The result of this is always a 2-D array, with a row for
- each non-zero element.
+ Use `atleast1d` explicitly if this behavior is deliberate.
Parameters
----------
@@ -1623,6 +1803,12 @@ def nonzero(a):
count_nonzero :
Counts the number of non-zero elements in the input array.
+ Notes
+ -----
+ While the nonzero values can be obtained with ``a[nonzero(a)]``, it is
+ recommended to use ``x[x.astype(bool)]`` or ``x[x != 0]`` instead, which
+ will correctly handle 0-d arrays.
+
Examples
--------
>>> x = np.array([[3, 0, 0], [0, 4, 0], [5, 6, 0]])
@@ -1639,7 +1825,7 @@ def nonzero(a):
array([[0, 0],
[1, 1],
[2, 0],
- [2, 1])
+ [2, 1]])
A common use for ``nonzero`` is to find the indices of an array, where
a condition is True. Given an array `a`, the condition `a` > 3 is a
@@ -1670,6 +1856,11 @@ def nonzero(a):
return _wrapfunc(a, 'nonzero')
+def _shape_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_shape_dispatcher)
def shape(a):
"""
Return the shape of an array.
@@ -1715,6 +1906,11 @@ def shape(a):
return result
+def _compress_dispatcher(condition, a, axis=None, out=None):
+ return (condition, a, out)
+
+
+@array_function_dispatch(_compress_dispatcher)
def compress(condition, a, axis=None, out=None):
"""
Return selected slices of an array along given axis.
@@ -1778,7 +1974,12 @@ def compress(condition, a, axis=None, out=None):
return _wrapfunc(a, 'compress', condition, axis=axis, out=out)
-def clip(a, a_min, a_max, out=None):
+def _clip_dispatcher(a, a_min, a_max, out=None, **kwargs):
+ return (a, a_min, a_max)
+
+
+@array_function_dispatch(_clip_dispatcher)
+def clip(a, a_min, a_max, out=None, **kwargs):
"""
Clip (limit) the values in an array.
@@ -1787,6 +1988,9 @@ def clip(a, a_min, a_max, out=None):
is specified, values smaller than 0 become 0, and values larger
than 1 become 1.
+ Equivalent to but faster than ``np.maximum(a_min, np.minimum(a, a_max))``.
+ No check is performed to ensure ``a_min < a_max``.
+
Parameters
----------
a : array_like
@@ -1804,6 +2008,11 @@ def clip(a, a_min, a_max, out=None):
The results will be placed in this array. It may be the input
array for in-place clipping. `out` must be of the right shape
to hold the output. Its type is preserved.
+ **kwargs
+ For other keyword-only arguments, see the
+ :ref:`ufunc docs <ufuncs.kwargs>`.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -1832,10 +2041,17 @@ def clip(a, a_min, a_max, out=None):
array([3, 4, 2, 3, 4, 5, 6, 7, 8, 8])
"""
- return _wrapfunc(a, 'clip', a_min, a_max, out=out)
+ return _wrapfunc(a, 'clip', a_min, a_max, out=out, **kwargs)
+
+def _sum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
-def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+
+@array_function_dispatch(_sum_dispatcher)
+def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
"""
Sum of array elements over a given axis.
@@ -1879,6 +2095,11 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
.. versionadded:: 1.15.0
+ where : array_like of bool, optional
+ Elements to include in the sum. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.17.0
+
Returns
-------
sum_along_axis : ndarray
@@ -1891,6 +2112,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
--------
ndarray.sum : Equivalent method.
+ add.reduce : Equivalent functionality of `add`.
+
cumsum : Cumulative sum of array elements.
trapz : Integration of array values using the composite trapezoidal rule.
@@ -1907,6 +2130,23 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
>>> np.sum([])
0.0
+ For floating point numbers the numerical precision of sum (and
+ ``np.add.reduce``) is in general limited by directly adding each number
+ individually to the result causing rounding errors in every step.
+ However, often numpy will use a numerically better approach (partial
+ pairwise summation) leading to improved precision in many use-cases.
+ This improved precision is always provided when no ``axis`` is given.
+ When ``axis`` is given, it will depend on which axis is summed.
+ Technically, to provide the best speed possible, the improved precision
+ is only used when the summation is along the fast axis in memory.
+ Note that the exact precision may vary depending on other parameters.
+ In contrast to NumPy, Python's ``math.fsum`` function uses a slower but
+ more precise approach to summation.
+ Especially when summing a large number of lower precision floating point
+ numbers, such as ``float32``, numerical errors can become significant.
+ In such cases it can be advisable to use `dtype="float64"` to use a higher
+ precision for the output.
+
Examples
--------
>>> np.sum([0.5, 1.5])
@@ -1919,6 +2159,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
array([0, 6])
>>> np.sum([[0, 1], [0, 5]], axis=1)
array([1, 5])
+ >>> np.sum([[0, 1], [np.nan, 5]], where=[False, True], axis=1)
+ array([1., 5.])
If the accumulator is too small, overflow occurs:
@@ -1934,8 +2176,8 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
# 2018-02-25, 1.15.0
warnings.warn(
"Calling np.sum(generator) is deprecated, and in the future will give a different result. "
- "Use np.sum(np.from_iter(generator)) or the python sum builtin instead.",
- DeprecationWarning, stacklevel=2)
+ "Use np.sum(np.fromiter(generator)) or the python sum builtin instead.",
+ DeprecationWarning, stacklevel=3)
res = _sum_(a)
if out is not None:
@@ -1944,9 +2186,14 @@ def sum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._No
return res
return _wrapreduction(a, np.add, 'sum', axis, dtype, out, keepdims=keepdims,
- initial=initial)
+ initial=initial, where=where)
+
+def _any_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+@array_function_dispatch(_any_dispatcher)
def any(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether any array element along a given axis evaluates to True.
@@ -2016,10 +2263,10 @@ def any(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.any(np.nan)
True
- >>> o=np.array([False])
+ >>> o=np.array(False)
>>> z=np.any([-1, 4, 5], out=o)
>>> z, o
- (array([ True]), array([ True]))
+ (array(True), array(True))
>>> # Check now that z is a reference to o
>>> z is o
True
@@ -2030,6 +2277,11 @@ def any(a, axis=None, out=None, keepdims=np._NoValue):
return _wrapreduction(a, np.logical_or, 'any', axis, None, out, keepdims=keepdims)
+def _all_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_all_dispatcher)
def all(a, axis=None, out=None, keepdims=np._NoValue):
"""
Test whether all array elements along a given axis evaluate to True.
@@ -2097,15 +2349,20 @@ def all(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.all([1.0, np.nan])
True
- >>> o=np.array([False])
+ >>> o=np.array(False)
>>> z=np.all([-1, 4, 5], out=o)
- >>> id(z), id(o), z # doctest: +SKIP
- (28293632, 28293632, array([ True]))
+ >>> id(z), id(o), z
+ (28293632, 28293632, array(True)) # may vary
"""
return _wrapreduction(a, np.logical_and, 'all', axis, None, out, keepdims=keepdims)
+def _cumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumsum_dispatcher)
def cumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of the elements along a given axis.
@@ -2173,6 +2430,11 @@ def cumsum(a, axis=None, dtype=None, out=None):
return _wrapfunc(a, 'cumsum', axis=axis, dtype=dtype, out=out)
+def _ptp_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_ptp_dispatcher)
def ptp(a, axis=None, out=None, keepdims=np._NoValue):
"""
Range of values (maximum - minimum) along an axis.
@@ -2241,7 +2503,14 @@ def ptp(a, axis=None, out=None, keepdims=np._NoValue):
return _methods._ptp(a, axis=axis, out=out, **kwargs)
-def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+def _amax_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+ where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amax_dispatcher)
+def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
"""
Return the maximum of an array or maximum along an axis.
@@ -2279,6 +2548,11 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
.. versionadded:: 1.15.0
+ where : array_like of bool, optional
+ Elements to compare for the maximum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -2324,11 +2598,14 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
array([2, 3])
>>> np.amax(a, axis=1) # Maxima along the second axis
array([1, 3])
-
+ >>> np.amax(a, where=[False, True], initial=-1, axis=0)
+ array([-1, 3])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amax(b)
nan
+ >>> np.amax(b, where=~np.isnan(b), initial=-1)
+ 4.0
>>> np.nanmax(b)
4.0
@@ -2347,11 +2624,18 @@ def amax(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
>>> max([5], default=6)
5
"""
- return _wrapreduction(a, np.maximum, 'max', axis, None, out, keepdims=keepdims,
- initial=initial)
+ return _wrapreduction(a, np.maximum, 'max', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
-def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+def _amin_dispatcher(a, axis=None, out=None, keepdims=None, initial=None,
+ where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_amin_dispatcher)
+def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue,
+ where=np._NoValue):
"""
Return the minimum of an array or minimum along an axis.
@@ -2389,6 +2673,12 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
.. versionadded:: 1.15.0
+ where : array_like of bool, optional
+ Elements to compare for the minimum. See `~numpy.ufunc.reduce`
+ for details.
+
+ .. versionadded:: 1.17.0
+
Returns
-------
amin : ndarray or scalar
@@ -2433,11 +2723,15 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
array([0, 1])
>>> np.amin(a, axis=1) # Minima along the second axis
array([0, 2])
+ >>> np.amin(a, where=[False, True], initial=10, axis=0)
+ array([10, 1])
>>> b = np.arange(5, dtype=float)
>>> b[2] = np.NaN
>>> np.amin(b)
nan
+ >>> np.amin(b, where=~np.isnan(b), initial=10)
+ 0.0
>>> np.nanmin(b)
0.0
@@ -2455,10 +2749,15 @@ def amin(a, axis=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
>>> min([6], default=5)
6
"""
- return _wrapreduction(a, np.minimum, 'min', axis, None, out, keepdims=keepdims,
- initial=initial)
+ return _wrapreduction(a, np.minimum, 'min', axis, None, out,
+ keepdims=keepdims, initial=initial, where=where)
+
+def _alen_dispathcer(a):
+ return (a,)
+
+@array_function_dispatch(_alen_dispathcer)
def alen(a):
"""
Return the length of the first dimension of the input array.
@@ -2486,13 +2785,24 @@ def alen(a):
7
"""
+ # NumPy 1.18.0, 2019-08-02
+ warnings.warn(
+ "`np.alen` is deprecated, use `len` instead",
+ DeprecationWarning, stacklevel=2)
try:
return len(a)
except TypeError:
return len(array(a, ndmin=1))
-def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._NoValue):
+def _prod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None,
+ initial=None, where=None):
+ return (a, out)
+
+
+@array_function_dispatch(_prod_dispatcher)
+def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue,
+ initial=np._NoValue, where=np._NoValue):
"""
Return the product of array elements over a given axis.
@@ -2537,6 +2847,11 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N
.. versionadded:: 1.15.0
+ where : array_like of bool, optional
+ Elements to include in the product. See `~numpy.ufunc.reduce` for details.
+
+ .. versionadded:: 1.17.0
+
Returns
-------
product_along_axis : ndarray, see `dtype` parameter above.
@@ -2554,8 +2869,8 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N
raised on overflow. That means that, on a 32-bit platform:
>>> x = np.array([536870910, 536870910, 536870910, 536870910])
- >>> np.prod(x) # random
- 16
+ >>> np.prod(x)
+ 16 # may vary
The product of an empty array is the neutral element 1:
@@ -2579,6 +2894,11 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N
>>> np.prod([[1.,2.],[3.,4.]], axis=1)
array([ 2., 12.])
+ Or select specific elements to include:
+
+ >>> np.prod([1., np.nan, 3.], where=[True, False, True])
+ 3.0
+
If the type of `x` is unsigned, then the output type is
the unsigned platform integer:
@@ -2598,10 +2918,15 @@ def prod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue, initial=np._N
>>> np.prod([1, 2], initial=5)
10
"""
- return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out, keepdims=keepdims,
- initial=initial)
+ return _wrapreduction(a, np.multiply, 'prod', axis, dtype, out,
+ keepdims=keepdims, initial=initial, where=where)
+def _cumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_cumprod_dispatcher)
def cumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of elements along a given axis.
@@ -2665,6 +2990,11 @@ def cumprod(a, axis=None, dtype=None, out=None):
return _wrapfunc(a, 'cumprod', axis=axis, dtype=dtype, out=out)
+def _ndim_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_ndim_dispatcher)
def ndim(a):
"""
Return the number of dimensions of an array.
@@ -2702,6 +3032,11 @@ def ndim(a):
return asarray(a).ndim
+def _size_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_size_dispatcher)
def size(a, axis=None):
"""
Return the number of elements along a given axis.
@@ -2748,6 +3083,11 @@ def size(a, axis=None):
return asarray(a).shape[axis]
+def _around_dispatcher(a, decimals=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_around_dispatcher)
def around(a, decimals=0, out=None):
"""
Evenly round to the given number of decimals.
@@ -2787,10 +3127,37 @@ def around(a, decimals=0, out=None):
-----
For values exactly halfway between rounded decimal values, NumPy
rounds to the nearest even value. Thus 1.5 and 2.5 round to 2.0,
- -0.5 and 0.5 round to 0.0, etc. Results may also be surprising due
- to the inexact representation of decimal fractions in the IEEE
- floating point standard [1]_ and errors introduced when scaling
- by powers of ten.
+ -0.5 and 0.5 round to 0.0, etc.
+
+ ``np.around`` uses a fast but sometimes inexact algorithm to round
+ floating-point datatypes. For positive `decimals` it is equivalent to
+ ``np.true_divide(np.rint(a * 10**decimals), 10**decimals)``, which has
+ error due to the inexact representation of decimal fractions in the IEEE
+ floating point standard [1]_ and errors introduced when scaling by powers
+ of ten. For instance, note the extra "1" in the following:
+
+ >>> np.round(56294995342131.5, 3)
+ 56294995342131.51
+
+ If your goal is to print such values with a fixed number of decimals, it is
+ preferable to use numpy's float printing routines to limit the number of
+ printed decimals:
+
+ >>> np.format_float_positional(56294995342131.5, precision=3)
+ '56294995342131.5'
+
+ The float printing routines use an accurate but much more computationally
+ demanding algorithm to compute the number of digits after the decimal
+ point.
+
+ Alternatively, Python's builtin `round` function uses a more accurate
+ but slower algorithm for 64-bit floating point values:
+
+ >>> round(56294995342131.5, 3)
+ 56294995342131.5
+ >>> np.round(16.055, 2), round(16.055, 2) # equals 16.0549999999999997
+ (16.06, 16.05)
+
References
----------
@@ -2803,11 +3170,11 @@ def around(a, decimals=0, out=None):
Examples
--------
>>> np.around([0.37, 1.64])
- array([ 0., 2.])
+ array([0., 2.])
>>> np.around([0.37, 1.64], decimals=1)
- array([ 0.4, 1.6])
+ array([0.4, 1.6])
>>> np.around([.5, 1.5, 2.5, 3.5, 4.5]) # rounds to nearest even value
- array([ 0., 2., 2., 4., 4.])
+ array([0., 2., 2., 4., 4.])
>>> np.around([1,2,3,11], decimals=1) # ndarray of ints is returned
array([ 1, 2, 3, 11])
>>> np.around([1,2,3,11], decimals=-1)
@@ -2817,20 +3184,11 @@ def around(a, decimals=0, out=None):
return _wrapfunc(a, 'round', decimals=decimals, out=out)
-def round_(a, decimals=0, out=None):
- """
- Round an array to the given number of decimals.
-
- Refer to `around` for full documentation.
-
- See Also
- --------
- around : equivalent function
-
- """
- return around(a, decimals=decimals, out=out)
+def _mean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+@array_function_dispatch(_mean_dispatcher)
def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis.
@@ -2904,9 +3262,9 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.mean(a)
2.5
>>> np.mean(a, axis=0)
- array([ 2., 3.])
+ array([2., 3.])
>>> np.mean(a, axis=1)
- array([ 1.5, 3.5])
+ array([1.5, 3.5])
In single precision, `mean` can be inaccurate:
@@ -2919,7 +3277,7 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
Computing the mean in float64 is more accurate:
>>> np.mean(a, dtype=np.float64)
- 0.55000000074505806
+ 0.55000000074505806 # may vary
"""
kwargs = {}
@@ -2937,6 +3295,12 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
out=out, **kwargs)
+def _std_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_std_dispatcher)
def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis.
@@ -3019,11 +3383,11 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
--------
>>> a = np.array([[1, 2], [3, 4]])
>>> np.std(a)
- 1.1180339887498949
+ 1.1180339887498949 # may vary
>>> np.std(a, axis=0)
- array([ 1., 1.])
+ array([1., 1.])
>>> np.std(a, axis=1)
- array([ 0.5, 0.5])
+ array([0.5, 0.5])
In single precision, std() can be inaccurate:
@@ -3036,7 +3400,7 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
Computing the standard deviation in float64 is more accurate:
>>> np.std(a, dtype=np.float64)
- 0.44999999925494177
+ 0.44999999925494177 # may vary
"""
kwargs = {}
@@ -3055,6 +3419,12 @@ def std(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
**kwargs)
+def _var_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_var_dispatcher)
def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis.
@@ -3078,7 +3448,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
instead of a single axis or all the axes as before.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
- the default is `float32`; for arrays of float types it is the same as
+ the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
@@ -3107,7 +3477,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
See Also
--------
- std , mean, nanmean, nanstd, nanvar
+ std, mean, nanmean, nanstd, nanvar
numpy.doc.ufuncs : Section "Output arguments"
Notes
@@ -3137,9 +3507,9 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
>>> np.var(a)
1.25
>>> np.var(a, axis=0)
- array([ 1., 1.])
+ array([1., 1.])
>>> np.var(a, axis=1)
- array([ 0.25, 0.25])
+ array([0.25, 0.25])
In single precision, var() can be inaccurate:
@@ -3152,7 +3522,7 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
Computing the variance in float64 is more accurate:
>>> np.var(a, dtype=np.float64)
- 0.20249999932944759
+ 0.20249999932944759 # may vary
>>> ((1-0.55)**2 + (0.1-0.55)**2)/2
0.2025
@@ -3177,6 +3547,19 @@ def var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
# Aliases of other functions. These have their own definitions only so that
# they can have unique docstrings.
+@array_function_dispatch(_around_dispatcher)
+def round_(a, decimals=0, out=None):
+ """
+ Round an array to the given number of decimals.
+
+ See Also
+ --------
+ around : equivalent function; see for details.
+ """
+ return around(a, decimals=decimals, out=out)
+
+
+@array_function_dispatch(_prod_dispatcher, verify=False)
def product(*args, **kwargs):
"""
Return the product of array elements over a given axis.
@@ -3188,6 +3571,7 @@ def product(*args, **kwargs):
return prod(*args, **kwargs)
+@array_function_dispatch(_cumprod_dispatcher, verify=False)
def cumproduct(*args, **kwargs):
"""
Return the cumulative product over the given axis.
@@ -3199,6 +3583,7 @@ def cumproduct(*args, **kwargs):
return cumprod(*args, **kwargs)
+@array_function_dispatch(_any_dispatcher, verify=False)
def sometrue(*args, **kwargs):
"""
Check whether some values are true.
@@ -3212,6 +3597,7 @@ def sometrue(*args, **kwargs):
return any(*args, **kwargs)
+@array_function_dispatch(_all_dispatcher, verify=False)
def alltrue(*args, **kwargs):
"""
Check if all elements of input array are true.
@@ -3221,29 +3607,3 @@ def alltrue(*args, **kwargs):
numpy.all : Equivalent function; see for details.
"""
return all(*args, **kwargs)
-
-
-def rank(a):
- """
- Return the number of dimensions of an array.
-
- .. note::
- This function is deprecated in NumPy 1.9 to avoid confusion with
- `numpy.linalg.matrix_rank`. The ``ndim`` attribute or function
- should be used instead.
-
- See Also
- --------
- ndim : equivalent non-deprecated function
-
- Notes
- -----
- In the old Numeric package, `rank` was the term used for the number of
- dimensions, but in NumPy `ndim` is used instead.
- """
- # 2014-04-12, 1.9
- warnings.warn(
- "`rank` is deprecated; use the `ndim` attribute or function instead. "
- "To find the rank of a matrix see `numpy.linalg.matrix_rank`.",
- VisibleDeprecationWarning, stacklevel=2)
- return ndim(a)
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 799b1418d..42604ec3f 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -1,29 +1,31 @@
from __future__ import division, absolute_import, print_function
+import functools
import warnings
import operator
+import types
from . import numeric as _nx
from .numeric import (result_type, NaN, shares_memory, MAY_SHARE_BOUNDS,
- TooHardError,asanyarray)
+ TooHardError, asanyarray, ndim)
from numpy.core.multiarray import add_docstring
+from numpy.core import overrides
__all__ = ['logspace', 'linspace', 'geomspace']
-def _index_deprecate(i, stacklevel=2):
- try:
- i = operator.index(i)
- except TypeError:
- msg = ("object of type {} cannot be safely interpreted as "
- "an integer.".format(type(i)))
- i = int(i)
- stacklevel += 1
- warnings.warn(msg, DeprecationWarning, stacklevel=stacklevel)
- return i
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _linspace_dispatcher(start, stop, num=None, endpoint=None, retstep=None,
+ dtype=None, axis=None):
+ return (start, stop)
-def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
+@array_function_dispatch(_linspace_dispatcher)
+def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
+ axis=0):
"""
Return evenly spaced numbers over a specified interval.
@@ -32,11 +34,14 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
The endpoint of the interval can optionally be excluded.
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
Parameters
----------
- start : scalar
+ start : array_like
The starting value of the sequence.
- stop : scalar
+ stop : array_like
The end value of the sequence, unless `endpoint` is set to False.
In that case, the sequence consists of all but the last of ``num + 1``
evenly spaced samples, so that `stop` is excluded. Note that the step
@@ -55,6 +60,13 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
.. versionadded:: 1.9.0
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
Returns
-------
samples : ndarray
@@ -79,11 +91,11 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
Examples
--------
>>> np.linspace(2.0, 3.0, num=5)
- array([ 2. , 2.25, 2.5 , 2.75, 3. ])
+ array([2. , 2.25, 2.5 , 2.75, 3. ])
>>> np.linspace(2.0, 3.0, num=5, endpoint=False)
- array([ 2. , 2.2, 2.4, 2.6, 2.8])
+ array([2. , 2.2, 2.4, 2.6, 2.8])
>>> np.linspace(2.0, 3.0, num=5, retstep=True)
- (array([ 2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
+ (array([2. , 2.25, 2.5 , 2.75, 3. ]), 0.25)
Graphical illustration:
@@ -101,8 +113,13 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
>>> plt.show()
"""
- # 2016-02-25, 1.12
- num = _index_deprecate(num)
+ try:
+ num = operator.index(num)
+ except TypeError:
+ raise TypeError(
+ "object of type {} cannot be safely interpreted as an integer."
+ .format(type(num)))
+
if num < 0:
raise ValueError("Number of samples, %s, must be non-negative." % num)
div = (num - 1) if endpoint else num
@@ -116,16 +133,15 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
if dtype is None:
dtype = dt
- y = _nx.arange(0, num, dtype=dt)
-
delta = stop - start
+ y = _nx.arange(0, num, dtype=dt).reshape((-1,) + (1,) * ndim(delta))
# In-place multiplication y *= delta/div is faster, but prevents the multiplicant
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
- _mult_inplace = _nx.isscalar(delta)
+ _mult_inplace = _nx.isscalar(delta)
if num > 1:
step = delta / div
- if step == 0:
+ if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
y /= div
if _mult_inplace:
@@ -148,13 +164,23 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None):
if endpoint and num > 1:
y[-1] = stop
+ if axis != 0:
+ y = _nx.moveaxis(y, 0, axis)
+
if retstep:
return y.astype(dtype, copy=False), step
else:
return y.astype(dtype, copy=False)
-def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
+def _logspace_dispatcher(start, stop, num=None, endpoint=None, base=None,
+ dtype=None, axis=None):
+ return (start, stop)
+
+
+@array_function_dispatch(_logspace_dispatcher)
+def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None,
+ axis=0):
"""
Return numbers spaced evenly on a log scale.
@@ -162,11 +188,14 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
(`base` to the power of `start`) and ends with ``base ** stop``
(see `endpoint` below).
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
Parameters
----------
- start : float
+ start : array_like
``base ** start`` is the starting value of the sequence.
- stop : float
+ stop : array_like
``base ** stop`` is the final value of the sequence, unless `endpoint`
is False. In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
@@ -183,6 +212,13 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
+
Returns
-------
@@ -210,11 +246,11 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
Examples
--------
>>> np.logspace(2.0, 3.0, num=4)
- array([ 100. , 215.443469 , 464.15888336, 1000. ])
+ array([ 100. , 215.443469 , 464.15888336, 1000. ])
>>> np.logspace(2.0, 3.0, num=4, endpoint=False)
- array([ 100. , 177.827941 , 316.22776602, 562.34132519])
+ array([100. , 177.827941 , 316.22776602, 562.34132519])
>>> np.logspace(2.0, 3.0, num=4, base=2.0)
- array([ 4. , 5.0396842 , 6.34960421, 8. ])
+ array([4. , 5.0396842 , 6.34960421, 8. ])
Graphical illustration:
@@ -232,24 +268,33 @@ def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None):
>>> plt.show()
"""
- y = linspace(start, stop, num=num, endpoint=endpoint)
+ y = linspace(start, stop, num=num, endpoint=endpoint, axis=axis)
if dtype is None:
return _nx.power(base, y)
- return _nx.power(base, y).astype(dtype)
+ return _nx.power(base, y).astype(dtype, copy=False)
+
+
+def _geomspace_dispatcher(start, stop, num=None, endpoint=None, dtype=None,
+ axis=None):
+ return (start, stop)
-def geomspace(start, stop, num=50, endpoint=True, dtype=None):
+@array_function_dispatch(_geomspace_dispatcher)
+def geomspace(start, stop, num=50, endpoint=True, dtype=None, axis=0):
"""
Return numbers spaced evenly on a log scale (a geometric progression).
This is similar to `logspace`, but with endpoints specified directly.
Each output sample is a constant multiple of the previous.
+ .. versionchanged:: 1.16.0
+ Non-scalar `start` and `stop` are now supported.
+
Parameters
----------
- start : scalar
+ start : array_like
The starting value of the sequence.
- stop : scalar
+ stop : array_like
The final value of the sequence, unless `endpoint` is False.
In that case, ``num + 1`` values are spaced over the
interval in log-space, of which all but the last (a sequence of
@@ -262,6 +307,12 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
dtype : dtype
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
+ axis : int, optional
+ The axis in the result to store the samples. Relevant only if start
+ or stop are array-like. By default (0), the samples will be along a
+ new axis inserted at the beginning. Use -1 to get an axis at the end.
+
+ .. versionadded:: 1.16.0
Returns
-------
@@ -304,15 +355,15 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
Negative, decreasing, and complex inputs are allowed:
>>> np.geomspace(1000, 1, num=4)
- array([ 1000., 100., 10., 1.])
+ array([1000., 100., 10., 1.])
>>> np.geomspace(-1000, -1, num=4)
array([-1000., -100., -10., -1.])
>>> np.geomspace(1j, 1000j, num=4) # Straight line
- array([ 0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
+ array([0. +1.j, 0. +10.j, 0. +100.j, 0.+1000.j])
>>> np.geomspace(-1+0j, 1+0j, num=5) # Circle
- array([-1.00000000+0.j , -0.70710678+0.70710678j,
- 0.00000000+1.j , 0.70710678+0.70710678j,
- 1.00000000+0.j ])
+ array([-1.00000000e+00+1.22464680e-16j, -7.07106781e-01+7.07106781e-01j,
+ 6.12323400e-17+1.00000000e+00j, 7.07106781e-01+7.07106781e-01j,
+ 1.00000000e+00+0.00000000e+00j])
Graphical illustration of ``endpoint`` parameter:
@@ -320,78 +371,143 @@ def geomspace(start, stop, num=50, endpoint=True, dtype=None):
>>> N = 10
>>> y = np.zeros(N)
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=True), y + 1, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
>>> plt.semilogx(np.geomspace(1, 1000, N, endpoint=False), y + 2, 'o')
+ [<matplotlib.lines.Line2D object at 0x...>]
>>> plt.axis([0.5, 2000, 0, 3])
+ [0.5, 2000, 0, 3]
>>> plt.grid(True, color='0.7', linestyle='-', which='both', axis='both')
>>> plt.show()
"""
- if start == 0 or stop == 0:
+ start = asanyarray(start)
+ stop = asanyarray(stop)
+ if _nx.any(start == 0) or _nx.any(stop == 0):
raise ValueError('Geometric sequence cannot include zero')
- dt = result_type(start, stop, float(num))
+ dt = result_type(start, stop, float(num), _nx.zeros((), dtype))
if dtype is None:
dtype = dt
else:
# complex to dtype('complex128'), for instance
dtype = _nx.dtype(dtype)
+ # Promote both arguments to the same dtype in case, for instance, one is
+ # complex and another is negative and log would produce NaN otherwise.
+ # Copy since we may change things in-place further down.
+ start = start.astype(dt, copy=True)
+ stop = stop.astype(dt, copy=True)
+
+ out_sign = _nx.ones(_nx.broadcast(start, stop).shape, dt)
# Avoid negligible real or imaginary parts in output by rotating to
# positive real, calculating, then undoing rotation
- out_sign = 1
- if start.real == stop.real == 0:
- start, stop = start.imag, stop.imag
- out_sign = 1j * out_sign
- if _nx.sign(start) == _nx.sign(stop) == -1:
- start, stop = -start, -stop
- out_sign = -out_sign
-
- # Promote both arguments to the same dtype in case, for instance, one is
- # complex and another is negative and log would produce NaN otherwise
- start = start + (stop - stop)
- stop = stop + (start - start)
- if _nx.issubdtype(dtype, _nx.complexfloating):
- start = start + 0j
- stop = stop + 0j
+ if _nx.issubdtype(dt, _nx.complexfloating):
+ all_imag = (start.real == 0.) & (stop.real == 0.)
+ if _nx.any(all_imag):
+ start[all_imag] = start[all_imag].imag
+ stop[all_imag] = stop[all_imag].imag
+ out_sign[all_imag] = 1j
+
+ both_negative = (_nx.sign(start) == -1) & (_nx.sign(stop) == -1)
+ if _nx.any(both_negative):
+ _nx.negative(start, out=start, where=both_negative)
+ _nx.negative(stop, out=stop, where=both_negative)
+ _nx.negative(out_sign, out=out_sign, where=both_negative)
log_start = _nx.log10(start)
log_stop = _nx.log10(stop)
result = out_sign * logspace(log_start, log_stop, num=num,
endpoint=endpoint, base=10.0, dtype=dtype)
+ if axis != 0:
+ result = _nx.moveaxis(result, 0, axis)
- return result.astype(dtype)
+ return result.astype(dtype, copy=False)
-#always succeed
-def add_newdoc(place, obj, doc):
+def _needs_add_docstring(obj):
"""
- Adds documentation to obj which is in module place.
+ Returns true if the only way to set the docstring of `obj` from python is
+ via add_docstring.
+
+ This function errs on the side of being overly conservative.
+ """
+ Py_TPFLAGS_HEAPTYPE = 1 << 9
+
+ if isinstance(obj, (types.FunctionType, types.MethodType, property)):
+ return False
+
+ if isinstance(obj, type) and obj.__flags__ & Py_TPFLAGS_HEAPTYPE:
+ return False
- If doc is a string add it to obj as a docstring
+ return True
- If doc is a tuple, then the first element is interpreted as
- an attribute of obj and the second as the docstring
- (method, docstring)
- If doc is a list, then each element of the list should be a
- sequence of length two --> [(method1, docstring1),
- (method2, docstring2), ...]
+def _add_docstring(obj, doc, warn_on_python):
+ if warn_on_python and not _needs_add_docstring(obj):
+ warnings.warn(
+ "add_newdoc was used on a pure-python object {}. "
+ "Prefer to attach it directly to the source."
+ .format(obj),
+ UserWarning,
+ stacklevel=3)
+ try:
+ add_docstring(obj, doc)
+ except Exception:
+ pass
+
+
+def add_newdoc(place, obj, doc, warn_on_python=True):
+ """
+ Add documentation to an existing object, typically one defined in C
- This routine never raises an error.
+ The purpose is to allow easier editing of the docstrings without requiring
+ a re-compile. This exists primarily for internal use within numpy itself.
+
+ Parameters
+ ----------
+ place : str
+ The absolute name of the module to import from
+ obj : str
+ The name of the object to add documentation to, typically a class or
+ function name
+ doc : {str, Tuple[str, str], List[Tuple[str, str]]}
+ If a string, the documentation to apply to `obj`
+
+ If a tuple, then the first element is interpreted as an attribute of
+ `obj` and the second as the docstring to apply - ``(method, docstring)``
+
+ If a list, then each element of the list should be a tuple of length
+ two - ``[(method1, docstring1), (method2, docstring2), ...]``
+ warn_on_python : bool
+ If True, the default, emit `UserWarning` if this is used to attach
+ documentation to a pure-python object.
+
+ Notes
+ -----
+ This routine never raises an error if the docstring can't be written, but
+ will raise an error if the object being documented does not exist.
This routine cannot modify read-only docstrings, as appear
in new-style classes or built-in functions. Because this
routine never raises an error the caller must check manually
that the docstrings were changed.
+
+ Since this function grabs the ``char *`` from a c-level str object and puts
+ it into the ``tp_doc`` slot of the type of `obj`, it violates a number of
+ C-API best-practices, by:
+
+ - modifying a `PyTypeObject` after calling `PyType_Ready`
+ - calling `Py_INCREF` on the str and losing the reference, so the str
+ will never be released
+
+ If possible it should be avoided.
"""
- try:
- new = getattr(__import__(place, globals(), {}, [obj]), obj)
- if isinstance(doc, str):
- add_docstring(new, doc.strip())
- elif isinstance(doc, tuple):
- add_docstring(getattr(new, doc[0]), doc[1].strip())
- elif isinstance(doc, list):
- for val in doc:
- add_docstring(getattr(new, val[0]), val[1].strip())
- except Exception:
- pass
+ new = getattr(__import__(place, globals(), {}, [obj]), obj)
+ if isinstance(doc, str):
+ _add_docstring(new, doc.strip(), warn_on_python)
+ elif isinstance(doc, tuple):
+ attr, docstring = doc
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
+ elif isinstance(doc, list):
+ for attr, docstring in doc:
+ _add_docstring(getattr(new, attr), docstring.strip(), warn_on_python)
diff --git a/numpy/core/getlimits.py b/numpy/core/getlimits.py
index 0e3c58793..31fa6b9bf 100644
--- a/numpy/core/getlimits.py
+++ b/numpy/core/getlimits.py
@@ -8,6 +8,7 @@ __all__ = ['finfo', 'iinfo']
import warnings
from .machar import MachAr
+from .overrides import set_module
from . import numeric
from . import numerictypes as ntypes
from .numeric import array, inf
@@ -30,6 +31,32 @@ def _fr1(a):
a.shape = ()
return a
+class MachArLike(object):
+ """ Object to simulate MachAr instance """
+
+ def __init__(self,
+ ftype,
+ **kwargs):
+ params = _MACHAR_PARAMS[ftype]
+ float_conv = lambda v: array([v], ftype)
+ float_to_float = lambda v : _fr1(float_conv(v))
+ float_to_str = lambda v: (params['fmt'] % array(_fr0(v)[0], ftype))
+
+ self.title = params['title']
+ # Parameter types same as for discovered MachAr object.
+ self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
+ self.epsneg = float_to_float(kwargs.pop('epsneg'))
+ self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
+ self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
+ self.ibeta = params['itype'](kwargs.pop('ibeta'))
+ self.__dict__.update(kwargs)
+ self.precision = int(-log10(self.eps))
+ self.resolution = float_to_float(float_conv(10) ** (-self.precision))
+ self._str_eps = float_to_str(self.eps)
+ self._str_epsneg = float_to_str(self.epsneg)
+ self._str_xmin = float_to_str(self.xmin)
+ self._str_xmax = float_to_str(self.xmax)
+ self._str_resolution = float_to_str(self.resolution)
_convert_to_float = {
ntypes.csingle: ntypes.single,
@@ -37,7 +64,6 @@ _convert_to_float = {
ntypes.clongfloat: ntypes.longfloat
}
-
# Parameters for creating MachAr / MachAr-like objects
_title_fmt = 'numpy {} precision floating point number'
_MACHAR_PARAMS = {
@@ -58,194 +84,156 @@ _MACHAR_PARAMS = {
fmt = '%12.5e',
title = _title_fmt.format('half'))}
-
-class MachArLike(object):
- """ Object to simulate MachAr instance """
-
- def __init__(self,
- ftype,
- **kwargs):
- params = _MACHAR_PARAMS[ftype]
- float_conv = lambda v: array([v], ftype)
- float_to_float = lambda v : _fr1(float_conv(v))
- self._float_to_str = lambda v: (params['fmt'] %
- array(_fr0(v)[0], ftype))
- self.title = params['title']
- # Parameter types same as for discovered MachAr object.
- self.epsilon = self.eps = float_to_float(kwargs.pop('eps'))
- self.epsneg = float_to_float(kwargs.pop('epsneg'))
- self.xmax = self.huge = float_to_float(kwargs.pop('huge'))
- self.xmin = self.tiny = float_to_float(kwargs.pop('tiny'))
- self.ibeta = params['itype'](kwargs.pop('ibeta'))
- self.__dict__.update(kwargs)
- self.precision = int(-log10(self.eps))
- self.resolution = float_to_float(float_conv(10) ** (-self.precision))
-
- # Properties below to delay need for float_to_str, and thus avoid circular
- # imports during early numpy module loading.
- # See: https://github.com/numpy/numpy/pull/8983#discussion_r115838683
-
- @property
- def _str_eps(self):
- return self._float_to_str(self.eps)
-
- @property
- def _str_epsneg(self):
- return self._float_to_str(self.epsneg)
-
- @property
- def _str_xmin(self):
- return self._float_to_str(self.xmin)
-
- @property
- def _str_xmax(self):
- return self._float_to_str(self.xmax)
-
- @property
- def _str_resolution(self):
- return self._float_to_str(self.resolution)
-
-
-# Known parameters for float16
-# See docstring of MachAr class for description of parameters.
-_f16 = ntypes.float16
-_float16_ma = MachArLike(_f16,
- machep=-10,
- negep=-11,
- minexp=-14,
- maxexp=16,
- it=10,
- iexp=5,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_f16(-10)),
- epsneg=exp2(_f16(-11)),
- huge=_f16(65504),
- tiny=_f16(2 ** -14))
-
-# Known parameters for float32
-_f32 = ntypes.float32
-_float32_ma = MachArLike(_f32,
- machep=-23,
- negep=-24,
- minexp=-126,
- maxexp=128,
- it=23,
- iexp=8,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_f32(-23)),
- epsneg=exp2(_f32(-24)),
- huge=_f32((1 - 2 ** -24) * 2**128),
- tiny=exp2(_f32(-126)))
-
-# Known parameters for float64
-_f64 = ntypes.float64
-_epsneg_f64 = 2.0 ** -53.0
-_tiny_f64 = 2.0 ** -1022.0
-_float64_ma = MachArLike(_f64,
- machep=-52,
- negep=-53,
- minexp=-1022,
- maxexp=1024,
- it=52,
- iexp=11,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=2.0 ** -52.0,
- epsneg=_epsneg_f64,
- huge=(1.0 - _epsneg_f64) / _tiny_f64 * _f64(4),
- tiny=_tiny_f64)
-
-# Known parameters for IEEE 754 128-bit binary float
-_ld = ntypes.longdouble
-_epsneg_f128 = exp2(_ld(-113))
-_tiny_f128 = exp2(_ld(-16382))
-# Ignore runtime error when this is not f128
-with numeric.errstate(all='ignore'):
- _huge_f128 = (_ld(1) - _epsneg_f128) / _tiny_f128 * _ld(4)
-_float128_ma = MachArLike(_ld,
- machep=-112,
- negep=-113,
- minexp=-16382,
- maxexp=16384,
- it=112,
- iexp=15,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_ld(-112)),
- epsneg=_epsneg_f128,
- huge=_huge_f128,
- tiny=_tiny_f128)
-
-# Known parameters for float80 (Intel 80-bit extended precision)
-_epsneg_f80 = exp2(_ld(-64))
-_tiny_f80 = exp2(_ld(-16382))
-# Ignore runtime error when this is not f80
-with numeric.errstate(all='ignore'):
- _huge_f80 = (_ld(1) - _epsneg_f80) / _tiny_f80 * _ld(4)
-_float80_ma = MachArLike(_ld,
- machep=-63,
- negep=-64,
- minexp=-16382,
- maxexp=16384,
- it=63,
- iexp=15,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_ld(-63)),
- epsneg=_epsneg_f80,
- huge=_huge_f80,
- tiny=_tiny_f80)
-
-# Guessed / known parameters for double double; see:
-# https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
-# These numbers have the same exponent range as float64, but extended number of
-# digits in the significand.
-_huge_dd = (umath.nextafter(_ld(inf), _ld(0))
- if hasattr(umath, 'nextafter') # Missing on some platforms?
- else _float64_ma.huge)
-_float_dd_ma = MachArLike(_ld,
- machep=-105,
- negep=-106,
- minexp=-1022,
- maxexp=1024,
- it=105,
- iexp=11,
- ibeta=2,
- irnd=5,
- ngrd=0,
- eps=exp2(_ld(-105)),
- epsneg= exp2(_ld(-106)),
- huge=_huge_dd,
- tiny=exp2(_ld(-1022)))
-
-
# Key to identify the floating point type. Key is result of
# ftype('-0.1').newbyteorder('<').tobytes()
# See:
# https://perl5.git.perl.org/perl.git/blob/3118d7d684b56cbeb702af874f4326683c45f045:/Configure
-_KNOWN_TYPES = {
- b'\x9a\x99\x99\x99\x99\x99\xb9\xbf' : _float64_ma,
- b'\xcd\xcc\xcc\xbd' : _float32_ma,
- b'f\xae' : _float16_ma,
+_KNOWN_TYPES = {}
+def _register_type(machar, bytepat):
+ _KNOWN_TYPES[bytepat] = machar
+_float_ma = {}
+
+def _register_known_types():
+ # Known parameters for float16
+ # See docstring of MachAr class for description of parameters.
+ f16 = ntypes.float16
+ float16_ma = MachArLike(f16,
+ machep=-10,
+ negep=-11,
+ minexp=-14,
+ maxexp=16,
+ it=10,
+ iexp=5,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f16(-10)),
+ epsneg=exp2(f16(-11)),
+ huge=f16(65504),
+ tiny=f16(2 ** -14))
+ _register_type(float16_ma, b'f\xae')
+ _float_ma[16] = float16_ma
+
+ # Known parameters for float32
+ f32 = ntypes.float32
+ float32_ma = MachArLike(f32,
+ machep=-23,
+ negep=-24,
+ minexp=-126,
+ maxexp=128,
+ it=23,
+ iexp=8,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(f32(-23)),
+ epsneg=exp2(f32(-24)),
+ huge=f32((1 - 2 ** -24) * 2**128),
+ tiny=exp2(f32(-126)))
+ _register_type(float32_ma, b'\xcd\xcc\xcc\xbd')
+ _float_ma[32] = float32_ma
+
+ # Known parameters for float64
+ f64 = ntypes.float64
+ epsneg_f64 = 2.0 ** -53.0
+ tiny_f64 = 2.0 ** -1022.0
+ float64_ma = MachArLike(f64,
+ machep=-52,
+ negep=-53,
+ minexp=-1022,
+ maxexp=1024,
+ it=52,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=2.0 ** -52.0,
+ epsneg=epsneg_f64,
+ huge=(1.0 - epsneg_f64) / tiny_f64 * f64(4),
+ tiny=tiny_f64)
+ _register_type(float64_ma, b'\x9a\x99\x99\x99\x99\x99\xb9\xbf')
+ _float_ma[64] = float64_ma
+
+ # Known parameters for IEEE 754 128-bit binary float
+ ld = ntypes.longdouble
+ epsneg_f128 = exp2(ld(-113))
+ tiny_f128 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f128
+ with numeric.errstate(all='ignore'):
+ huge_f128 = (ld(1) - epsneg_f128) / tiny_f128 * ld(4)
+ float128_ma = MachArLike(ld,
+ machep=-112,
+ negep=-113,
+ minexp=-16382,
+ maxexp=16384,
+ it=112,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-112)),
+ epsneg=epsneg_f128,
+ huge=huge_f128,
+ tiny=tiny_f128)
+ # IEEE 754 128-bit binary float
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _register_type(float128_ma,
+ b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf')
+ _float_ma[128] = float128_ma
+
+ # Known parameters for float80 (Intel 80-bit extended precision)
+ epsneg_f80 = exp2(ld(-64))
+ tiny_f80 = exp2(ld(-16382))
+ # Ignore runtime error when this is not f80
+ with numeric.errstate(all='ignore'):
+ huge_f80 = (ld(1) - epsneg_f80) / tiny_f80 * ld(4)
+ float80_ma = MachArLike(ld,
+ machep=-63,
+ negep=-64,
+ minexp=-16382,
+ maxexp=16384,
+ it=63,
+ iexp=15,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-63)),
+ epsneg=epsneg_f80,
+ huge=huge_f80,
+ tiny=tiny_f80)
# float80, first 10 bytes containing actual storage
- b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf' : _float80_ma,
+ _register_type(float80_ma, b'\xcd\xcc\xcc\xcc\xcc\xcc\xcc\xcc\xfb\xbf')
+ _float_ma[80] = float80_ma
+
+ # Guessed / known parameters for double double; see:
+ # https://en.wikipedia.org/wiki/Quadruple-precision_floating-point_format#Double-double_arithmetic
+ # These numbers have the same exponent range as float64, but extended number of
+ # digits in the significand.
+ huge_dd = (umath.nextafter(ld(inf), ld(0))
+ if hasattr(umath, 'nextafter') # Missing on some platforms?
+ else float64_ma.huge)
+ float_dd_ma = MachArLike(ld,
+ machep=-105,
+ negep=-106,
+ minexp=-1022,
+ maxexp=1024,
+ it=105,
+ iexp=11,
+ ibeta=2,
+ irnd=5,
+ ngrd=0,
+ eps=exp2(ld(-105)),
+ epsneg= exp2(ld(-106)),
+ huge=huge_dd,
+ tiny=exp2(ld(-1022)))
# double double; low, high order (e.g. PPC 64)
- b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf' :
- _float_dd_ma,
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99Y<\x9a\x99\x99\x99\x99\x99\xb9\xbf')
# double double; high, low order (e.g. PPC 64 le)
- b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<' :
- _float_dd_ma,
- # IEEE 754 128-bit binary float
- b'\x9a\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\x99\xfb\xbf' :
- _float128_ma,
-}
+ _register_type(float_dd_ma,
+ b'\x9a\x99\x99\x99\x99\x99\xb9\xbf\x9a\x99\x99\x99\x99\x99Y<')
+ _float_ma['dd'] = float_dd_ma
def _get_machar(ftype):
@@ -302,6 +290,7 @@ def _discovered_machar(ftype):
params['title'])
+@set_module('numpy')
class finfo(object):
"""
finfo(dtype)
@@ -452,6 +441,7 @@ class finfo(object):
" max=%(_str_max)s, dtype=%(dtype)s)") % d)
+@set_module('numpy')
class iinfo(object):
"""
iinfo(type)
@@ -515,6 +505,7 @@ class iinfo(object):
if self.kind not in 'iu':
raise ValueError("Invalid integer data type %r." % (self.kind,))
+ @property
def min(self):
"""Minimum value of given dtype."""
if self.kind == 'u':
@@ -527,8 +518,7 @@ class iinfo(object):
iinfo._min_vals[self.key] = val
return val
- min = property(min)
-
+ @property
def max(self):
"""Maximum value of given dtype."""
try:
@@ -541,8 +531,6 @@ class iinfo(object):
iinfo._max_vals[self.key] = val
return val
- max = property(max)
-
def __str__(self):
"""String representation."""
fmt = (
diff --git a/numpy/core/include/numpy/ndarrayobject.h b/numpy/core/include/numpy/ndarrayobject.h
index 12fc7098c..95e9cb060 100644
--- a/numpy/core/include/numpy/ndarrayobject.h
+++ b/numpy/core/include/numpy/ndarrayobject.h
@@ -5,13 +5,7 @@
#ifndef NPY_NDARRAYOBJECT_H
#define NPY_NDARRAYOBJECT_H
#ifdef __cplusplus
-#define CONFUSE_EMACS {
-#define CONFUSE_EMACS2 }
-extern "C" CONFUSE_EMACS
-#undef CONFUSE_EMACS
-#undef CONFUSE_EMACS2
-/* ... otherwise a semi-smart identer (like emacs) tries to indent
- everything when you're typing */
+extern "C" {
#endif
#include <Python.h>
@@ -29,7 +23,7 @@ extern "C" CONFUSE_EMACS
/* C-API that requires previous API to be defined */
-#define PyArray_DescrCheck(op) (((PyObject*)(op))->ob_type==&PyArrayDescr_Type)
+#define PyArray_DescrCheck(op) PyObject_TypeCheck(op, &PyArrayDescr_Type)
#define PyArray_Check(op) PyObject_TypeCheck(op, &PyArray_Type)
#define PyArray_CheckExact(op) (((PyObject*)(op))->ob_type == &PyArray_Type)
@@ -239,10 +233,10 @@ static NPY_INLINE int
NPY_TITLE_KEY_check(PyObject *key, PyObject *value)
{
PyObject *title;
- if (PyTuple_GET_SIZE(value) != 3) {
+ if (PyTuple_Size(value) != 3) {
return 0;
}
- title = PyTuple_GET_ITEM(value, 2);
+ title = PyTuple_GetItem(value, 2);
if (key == title) {
return 1;
}
diff --git a/numpy/core/include/numpy/ndarraytypes.h b/numpy/core/include/numpy/ndarraytypes.h
index ec2893b21..ad98d562b 100644
--- a/numpy/core/include/numpy/ndarraytypes.h
+++ b/numpy/core/include/numpy/ndarraytypes.h
@@ -156,12 +156,20 @@ enum NPY_TYPECHAR {
NPY_COMPLEXLTR = 'c'
};
+/*
+ * Changing this may break Numpy API compatibility
+ * due to changing offsets in PyArray_ArrFuncs, so be
+ * careful. Here we have reused the mergesort slot for
+ * any kind of stable sort, the actual implementation will
+ * depend on the data type.
+ */
typedef enum {
NPY_QUICKSORT=0,
NPY_HEAPSORT=1,
- NPY_MERGESORT=2
+ NPY_MERGESORT=2,
+ NPY_STABLESORT=2,
} NPY_SORTKIND;
-#define NPY_NSORTS (NPY_MERGESORT + 1)
+#define NPY_NSORTS (NPY_STABLESORT + 1)
typedef enum {
@@ -505,7 +513,8 @@ typedef struct {
PyArray_NonzeroFunc *nonzero;
/*
- * Used for arange.
+ * Used for arange. Should return 0 on success
+ * and -1 on failure.
* Can be NULL.
*/
PyArray_FillFunc *fill;
@@ -949,12 +958,12 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
*/
-#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)
-#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS(m, NPY_ARRAY_WRITEABLE)
-#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS(m, NPY_ARRAY_ALIGNED)
+#define PyArray_ISCONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_ISWRITEABLE(m) PyArray_CHKFLAGS((m), NPY_ARRAY_WRITEABLE)
+#define PyArray_ISALIGNED(m) PyArray_CHKFLAGS((m), NPY_ARRAY_ALIGNED)
-#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_C_CONTIGUOUS)
-#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS(m, NPY_ARRAY_F_CONTIGUOUS)
+#define PyArray_IS_C_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_C_CONTIGUOUS)
+#define PyArray_IS_F_CONTIGUOUS(m) PyArray_CHKFLAGS((m), NPY_ARRAY_F_CONTIGUOUS)
/* the variable is used in some places, so always define it */
#define NPY_BEGIN_THREADS_DEF PyThreadState *_save=NULL;
@@ -964,15 +973,15 @@ typedef int (PyArray_FinalizeFunc)(PyArrayObject *, PyObject *);
#define NPY_BEGIN_THREADS do {_save = PyEval_SaveThread();} while (0);
#define NPY_END_THREADS do { if (_save) \
{ PyEval_RestoreThread(_save); _save = NULL;} } while (0);
-#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if (loop_size > 500) \
+#define NPY_BEGIN_THREADS_THRESHOLDED(loop_size) do { if ((loop_size) > 500) \
{ _save = PyEval_SaveThread();} } while (0);
#define NPY_BEGIN_THREADS_DESCR(dtype) \
- do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \
+ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
NPY_BEGIN_THREADS;} while (0);
#define NPY_END_THREADS_DESCR(dtype) \
- do {if (!(PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI))) \
+ do {if (!(PyDataType_FLAGCHK((dtype), NPY_NEEDS_PYAPI))) \
NPY_END_THREADS; } while (0);
#define NPY_ALLOW_C_API_DEF PyGILState_STATE __save__;
@@ -1086,7 +1095,8 @@ typedef struct PyArrayIterObject_tag PyArrayIterObject;
* type of the function which translates a set of coordinates to a
* pointer to the data
*/
-typedef char* (*npy_iter_get_dataptr_t)(PyArrayIterObject* iter, npy_intp*);
+typedef char* (*npy_iter_get_dataptr_t)(
+ PyArrayIterObject* iter, const npy_intp*);
struct PyArrayIterObject_tag {
PyObject_HEAD
@@ -1109,7 +1119,7 @@ struct PyArrayIterObject_tag {
/* Iterator API */
-#define PyArrayIter_Check(op) PyObject_TypeCheck(op, &PyArrayIter_Type)
+#define PyArrayIter_Check(op) PyObject_TypeCheck((op), &PyArrayIter_Type)
#define _PyAIT(it) ((PyArrayIterObject *)(it))
#define PyArray_ITER_RESET(it) do { \
@@ -1187,7 +1197,7 @@ struct PyArrayIterObject_tag {
#define PyArray_ITER_GOTO1D(it, ind) do { \
int __npy_i; \
- npy_intp __npy_ind = (npy_intp) (ind); \
+ npy_intp __npy_ind = (npy_intp)(ind); \
if (__npy_ind < 0) __npy_ind += _PyAIT(it)->size; \
_PyAIT(it)->index = __npy_ind; \
if (_PyAIT(it)->nd_m1 == 0) { \
@@ -1670,7 +1680,7 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
#define PyTypeNum_ISOBJECT(type) ((type) == NPY_OBJECT)
-#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(_PyADt(obj))
+#define PyDataType_ISBOOL(obj) PyTypeNum_ISBOOL(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_ISUNSIGNED(obj) PyTypeNum_ISUNSIGNED(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_ISSIGNED(obj) PyTypeNum_ISSIGNED(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_ISINTEGER(obj) PyTypeNum_ISINTEGER(((PyArray_Descr*)(obj))->type_num )
@@ -1686,7 +1696,8 @@ PyArray_CLEARFLAGS(PyArrayObject *arr, int flags)
#define PyDataType_ISOBJECT(obj) PyTypeNum_ISOBJECT(((PyArray_Descr*)(obj))->type_num)
#define PyDataType_HASFIELDS(obj) (((PyArray_Descr *)(obj))->names != NULL)
#define PyDataType_HASSUBARRAY(dtype) ((dtype)->subarray != NULL)
-#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0)
+#define PyDataType_ISUNSIZED(dtype) ((dtype)->elsize == 0 && \
+ !PyDataType_HASFIELDS(dtype))
#define PyDataType_MAKEUNSIZED(dtype) ((dtype)->elsize = 0)
#define PyArray_ISBOOL(obj) PyTypeNum_ISBOOL(PyArray_TYPE(obj))
diff --git a/numpy/core/include/numpy/npy_1_7_deprecated_api.h b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
index 76b57b748..a6ee21219 100644
--- a/numpy/core/include/numpy/npy_1_7_deprecated_api.h
+++ b/numpy/core/include/numpy/npy_1_7_deprecated_api.h
@@ -5,6 +5,8 @@
#error "Should never include npy_*_*_deprecated_api directly."
#endif
+/* Emit a warning if the user did not specifically request the old API */
+#ifndef NPY_NO_DEPRECATED_API
#if defined(_WIN32)
#define _WARN___STR2__(x) #x
#define _WARN___STR1__(x) _WARN___STR2__(x)
@@ -16,6 +18,7 @@
"#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION"
#endif
/* TODO: How to do this warning message for other compilers? */
+#endif
/*
* This header exists to collect all dangerous/deprecated NumPy API
diff --git a/numpy/core/include/numpy/npy_3kcompat.h b/numpy/core/include/numpy/npy_3kcompat.h
index 808518266..832bc0599 100644
--- a/numpy/core/include/numpy/npy_3kcompat.h
+++ b/numpy/core/include/numpy/npy_3kcompat.h
@@ -69,6 +69,16 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
#define Npy_EnterRecursiveCall(x) Py_EnterRecursiveCall(x)
#endif
+/* Py_SETREF was added in 3.5.2, and only if Py_LIMITED_API is absent */
+#if PY_VERSION_HEX < 0x03050200
+ #define Py_SETREF(op, op2) \
+ do { \
+ PyObject *_py_tmp = (PyObject *)(op); \
+ (op) = (op2); \
+ Py_DECREF(_py_tmp); \
+ } while (0)
+#endif
+
/*
* PyString -> PyBytes
*/
@@ -141,20 +151,14 @@ static NPY_INLINE int PyInt_Check(PyObject *op) {
static NPY_INLINE void
PyUnicode_ConcatAndDel(PyObject **left, PyObject *right)
{
- PyObject *newobj;
- newobj = PyUnicode_Concat(*left, right);
- Py_DECREF(*left);
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
Py_DECREF(right);
- *left = newobj;
}
static NPY_INLINE void
PyUnicode_Concat2(PyObject **left, PyObject *right)
{
- PyObject *newobj;
- newobj = PyUnicode_Concat(*left, right);
- Py_DECREF(*left);
- *left = newobj;
+ Py_SETREF(*left, PyUnicode_Concat(*left, right));
}
/*
@@ -215,6 +219,7 @@ npy_PyFile_Dup2(PyObject *file, char *mode, npy_off_t *orig_pos)
if (handle == NULL) {
PyErr_SetString(PyExc_IOError,
"Getting a FILE* from a Python file object failed");
+ return NULL;
}
/* Record the original raw file handle position */
@@ -379,6 +384,36 @@ npy_PyFile_CloseFile(PyObject *file)
}
+/* This is a copy of _PyErr_ChainExceptions
+ */
+static NPY_INLINE void
+npy_PyErr_ChainExceptions(PyObject *exc, PyObject *val, PyObject *tb)
+{
+ if (exc == NULL)
+ return;
+
+ if (PyErr_Occurred()) {
+ /* only py3 supports this anyway */
+ #ifdef NPY_PY3K
+ PyObject *exc2, *val2, *tb2;
+ PyErr_Fetch(&exc2, &val2, &tb2);
+ PyErr_NormalizeException(&exc, &val, &tb);
+ if (tb != NULL) {
+ PyException_SetTraceback(val, tb);
+ Py_DECREF(tb);
+ }
+ Py_DECREF(exc);
+ PyErr_NormalizeException(&exc2, &val2, &tb2);
+ PyException_SetContext(val2, val);
+ PyErr_Restore(exc2, val2, tb2);
+ #endif
+ }
+ else {
+ PyErr_Restore(exc, val, tb);
+ }
+}
+
+
/* This is a copy of _PyErr_ChainExceptions, with:
* - a minimal implementation for python 2
* - __cause__ used instead of __context__
diff --git a/numpy/core/include/numpy/npy_common.h b/numpy/core/include/numpy/npy_common.h
index 64aaaacff..27b83f7b5 100644
--- a/numpy/core/include/numpy/npy_common.h
+++ b/numpy/core/include/numpy/npy_common.h
@@ -44,12 +44,26 @@
#else
#define NPY_GCC_TARGET_AVX
#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
+#define HAVE_ATTRIBUTE_TARGET_FMA
+#define NPY_GCC_TARGET_FMA __attribute__((target("avx2,fma")))
+#endif
+
#if defined HAVE_ATTRIBUTE_TARGET_AVX2 && defined HAVE_LINK_AVX2
#define NPY_GCC_TARGET_AVX2 __attribute__((target("avx2")))
#else
#define NPY_GCC_TARGET_AVX2
#endif
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F && defined HAVE_LINK_AVX512F
+#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f")))
+#elif defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS
+#define NPY_GCC_TARGET_AVX512F __attribute__((target("avx512f")))
+#else
+#define NPY_GCC_TARGET_AVX512F
+#endif
+
/*
* mark an argument (starting from 1) that must not be NULL and is not checked
* DO NOT USE IF FUNCTION CHECKS FOR NULL!! the compiler will remove the check
@@ -68,6 +82,13 @@
#define NPY_HAVE_SSE2_INTRINSICS
#endif
+#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX2
+#define NPY_HAVE_AVX2_INTRINSICS
+#endif
+
+#if defined HAVE_IMMINTRIN_H && defined HAVE_LINK_AVX512F
+#define NPY_HAVE_AVX512F_INTRINSICS
+#endif
/*
* give a hint to the compiler which branch is more likely or unlikely
* to occur, e.g. rare error cases:
diff --git a/numpy/core/include/numpy/npy_math.h b/numpy/core/include/numpy/npy_math.h
index 582390cdc..69e690f28 100644
--- a/numpy/core/include/numpy/npy_math.h
+++ b/numpy/core/include/numpy/npy_math.h
@@ -114,6 +114,101 @@ NPY_INLINE static float __npy_nzerof(void)
#define NPY_SQRT1_2l 0.707106781186547524400844362104849039L /* 1/sqrt(2) */
/*
+ * Constants used in vector implementation of exp(x)
+ */
+#define NPY_RINT_CVT_MAGICf 0x1.800000p+23f
+#define NPY_CODY_WAITE_LOGE_2_HIGHf -6.93145752e-1f
+#define NPY_CODY_WAITE_LOGE_2_LOWf -1.42860677e-6f
+#define NPY_COEFF_P0_EXPf 9.999999999980870924916e-01f
+#define NPY_COEFF_P1_EXPf 7.257664613233124478488e-01f
+#define NPY_COEFF_P2_EXPf 2.473615434895520810817e-01f
+#define NPY_COEFF_P3_EXPf 5.114512081637298353406e-02f
+#define NPY_COEFF_P4_EXPf 6.757896990527504603057e-03f
+#define NPY_COEFF_P5_EXPf 5.082762527590693718096e-04f
+#define NPY_COEFF_Q0_EXPf 1.000000000000000000000e+00f
+#define NPY_COEFF_Q1_EXPf -2.742335390411667452936e-01f
+#define NPY_COEFF_Q2_EXPf 2.159509375685829852307e-02f
+
+/*
+ * Constants used in vector implementation of log(x)
+ */
+#define NPY_COEFF_P0_LOGf 0.000000000000000000000e+00f
+#define NPY_COEFF_P1_LOGf 9.999999999999998702752e-01f
+#define NPY_COEFF_P2_LOGf 2.112677543073053063722e+00f
+#define NPY_COEFF_P3_LOGf 1.480000633576506585156e+00f
+#define NPY_COEFF_P4_LOGf 3.808837741388407920751e-01f
+#define NPY_COEFF_P5_LOGf 2.589979117907922693523e-02f
+#define NPY_COEFF_Q0_LOGf 1.000000000000000000000e+00f
+#define NPY_COEFF_Q1_LOGf 2.612677543073109236779e+00f
+#define NPY_COEFF_Q2_LOGf 2.453006071784736363091e+00f
+#define NPY_COEFF_Q3_LOGf 9.864942958519418960339e-01f
+#define NPY_COEFF_Q4_LOGf 1.546476374983906719538e-01f
+#define NPY_COEFF_Q5_LOGf 5.875095403124574342950e-03f
+/*
+ * Constants used in vector implementation of sinf/cosf(x)
+ */
+#define NPY_TWO_O_PIf 0x1.45f306p-1f
+#define NPY_CODY_WAITE_PI_O_2_HIGHf -0x1.921fb0p+00f
+#define NPY_CODY_WAITE_PI_O_2_MEDf -0x1.5110b4p-22f
+#define NPY_CODY_WAITE_PI_O_2_LOWf -0x1.846988p-48f
+#define NPY_COEFF_INVF0_COSINEf 0x1.000000p+00f
+#define NPY_COEFF_INVF2_COSINEf -0x1.000000p-01f
+#define NPY_COEFF_INVF4_COSINEf 0x1.55553cp-05f
+#define NPY_COEFF_INVF6_COSINEf -0x1.6c06dcp-10f
+#define NPY_COEFF_INVF8_COSINEf 0x1.98e616p-16f
+#define NPY_COEFF_INVF3_SINEf -0x1.555556p-03f
+#define NPY_COEFF_INVF5_SINEf 0x1.11119ap-07f
+#define NPY_COEFF_INVF7_SINEf -0x1.a06bbap-13f
+#define NPY_COEFF_INVF9_SINEf 0x1.7d3bbcp-19f
+/*
+ * Integer functions.
+ */
+NPY_INPLACE npy_uint npy_gcdu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lcmu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_gcdul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lcmul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_gcdull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lcmull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_int npy_gcd(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lcm(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_gcdl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lcml(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_gcdll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lcmll(npy_longlong a, npy_longlong b);
+
+NPY_INPLACE npy_ubyte npy_rshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ubyte npy_lshiftuhh(npy_ubyte a, npy_ubyte b);
+NPY_INPLACE npy_ushort npy_rshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_ushort npy_lshiftuh(npy_ushort a, npy_ushort b);
+NPY_INPLACE npy_uint npy_rshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_uint npy_lshiftu(npy_uint a, npy_uint b);
+NPY_INPLACE npy_ulong npy_rshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulong npy_lshiftul(npy_ulong a, npy_ulong b);
+NPY_INPLACE npy_ulonglong npy_rshiftull(npy_ulonglong a, npy_ulonglong b);
+NPY_INPLACE npy_ulonglong npy_lshiftull(npy_ulonglong a, npy_ulonglong b);
+
+NPY_INPLACE npy_byte npy_rshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_byte npy_lshifthh(npy_byte a, npy_byte b);
+NPY_INPLACE npy_short npy_rshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_short npy_lshifth(npy_short a, npy_short b);
+NPY_INPLACE npy_int npy_rshift(npy_int a, npy_int b);
+NPY_INPLACE npy_int npy_lshift(npy_int a, npy_int b);
+NPY_INPLACE npy_long npy_rshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_long npy_lshiftl(npy_long a, npy_long b);
+NPY_INPLACE npy_longlong npy_rshiftll(npy_longlong a, npy_longlong b);
+NPY_INPLACE npy_longlong npy_lshiftll(npy_longlong a, npy_longlong b);
+
+/*
+ * avx function has a common API for both sin & cos. This enum is used to
+ * distinguish between the two
+ */
+typedef enum {
+ npy_compute_sin,
+ npy_compute_cos
+} NPY_TRIG_OP;
+
+/*
* C99 double math funcs
*/
NPY_INPLACE double npy_sin(double x);
diff --git a/numpy/core/include/numpy/ufuncobject.h b/numpy/core/include/numpy/ufuncobject.h
index 4b1b3d325..5ff4a0041 100644
--- a/numpy/core/include/numpy/ufuncobject.h
+++ b/numpy/core/include/numpy/ufuncobject.h
@@ -120,7 +120,11 @@ typedef struct _tagPyUFuncObject {
*/
int nin, nout, nargs;
- /* Identity for reduction, either PyUFunc_One or PyUFunc_Zero */
+ /*
+ * Identity for reduction, any of PyUFunc_One, PyUFunc_Zero
+ * PyUFunc_MinusOne, PyUFunc_None, PyUFunc_ReorderableNone,
+ * PyUFunc_IdentityValue.
+ */
int identity;
/* Array of one-dimensional core loops */
@@ -209,9 +213,33 @@ typedef struct _tagPyUFuncObject {
* set by nditer object.
*/
npy_uint32 iter_flags;
+
+ /* New in NPY_API_VERSION 0x0000000D and above */
+
+ /*
+ * for each core_num_dim_ix distinct dimension names,
+ * the possible "frozen" size (-1 if not frozen).
+ */
+ npy_intp *core_dim_sizes;
+
+ /*
+ * for each distinct core dimension, a set of UFUNC_CORE_DIM* flags
+ */
+ npy_uint32 *core_dim_flags;
+
+ /* Identity for reduction, when identity == PyUFunc_IdentityValue */
+ PyObject *identity_value;
+
} PyUFuncObject;
#include "arrayobject.h"
+/* Generalized ufunc; 0x0001 reserved for possible use as CORE_ENABLED */
+/* the core dimension's size will be determined by the operands. */
+#define UFUNC_CORE_DIM_SIZE_INFERRED 0x0002
+/* the core dimension may be absent */
+#define UFUNC_CORE_DIM_CAN_IGNORE 0x0004
+/* flags inferred during execution */
+#define UFUNC_CORE_DIM_MISSING 0x00040000
#define UFUNC_ERR_IGNORE 0
#define UFUNC_ERR_WARN 1
@@ -276,6 +304,12 @@ typedef struct _tagPyUFuncObject {
* This case allows reduction with multiple axes at once.
*/
#define PyUFunc_ReorderableNone -2
+/*
+ * UFunc unit is an identity_value, and the order of operations can be reordered
+ * This case allows reduction with multiple axes at once.
+ */
+#define PyUFunc_IdentityValue -3
+
#define UFUNC_REDUCE 0
#define UFUNC_ACCUMULATE 1
@@ -306,30 +340,6 @@ typedef struct _loop1d_info {
#define UFUNC_PYVALS_NAME "UFUNC_PYVALS"
-#define UFUNC_CHECK_ERROR(arg) \
- do {if ((((arg)->obj & UFUNC_OBJ_NEEDS_API) && PyErr_Occurred()) || \
- ((arg)->errormask && \
- PyUFunc_checkfperr((arg)->errormask, \
- (arg)->errobj, \
- &(arg)->first))) \
- goto fail;} while (0)
-
-
-/* keep in sync with ieee754.c.src */
-#if defined(sun) || defined(__BSD__) || defined(__OpenBSD__) || \
- (defined(__FreeBSD__) && (__FreeBSD_version < 502114)) || \
- defined(__NetBSD__) || \
- defined(__GLIBC__) || defined(__APPLE__) || \
- defined(__CYGWIN__) || defined(__MINGW32__) || \
- (defined(__FreeBSD__) && (__FreeBSD_version >= 502114)) || \
- defined(_AIX) || \
- defined(_MSC_VER) || \
- defined(__osf__) && defined(__alpha)
-#else
-#define NO_FLOATING_POINT_SUPPORT
-#endif
-
-
/*
* THESE MACROS ARE DEPRECATED.
* Use npy_set_floatstatus_* in the npymath library.
@@ -339,10 +349,6 @@ typedef struct _loop1d_info {
#define UFUNC_FPE_UNDERFLOW NPY_FPE_UNDERFLOW
#define UFUNC_FPE_INVALID NPY_FPE_INVALID
-#define UFUNC_CHECK_STATUS(ret) \
- { \
- ret = npy_clear_floatstatus(); \
- }
#define generate_divbyzero_error() npy_set_floatstatus_divbyzero()
#define generate_overflow_error() npy_set_floatstatus_overflow()
diff --git a/numpy/core/info.py b/numpy/core/info.py
deleted file mode 100644
index c6f7bbcf2..000000000
--- a/numpy/core/info.py
+++ /dev/null
@@ -1,87 +0,0 @@
-"""Defines a multi-dimensional array and useful procedures for Numerical computation.
-
-Functions
-
-- array - NumPy Array construction
-- zeros - Return an array of all zeros
-- empty - Return an uninitialized array
-- shape - Return shape of sequence or array
-- rank - Return number of dimensions
-- size - Return number of elements in entire array or a
- certain dimension
-- fromstring - Construct array from (byte) string
-- take - Select sub-arrays using sequence of indices
-- put - Set sub-arrays using sequence of 1-D indices
-- putmask - Set portion of arrays using a mask
-- reshape - Return array with new shape
-- repeat - Repeat elements of array
-- choose - Construct new array from indexed array tuple
-- correlate - Correlate two 1-d arrays
-- searchsorted - Search for element in 1-d array
-- sum - Total sum over a specified dimension
-- average - Average, possibly weighted, over axis or array.
-- cumsum - Cumulative sum over a specified dimension
-- product - Total product over a specified dimension
-- cumproduct - Cumulative product over a specified dimension
-- alltrue - Logical and over an entire axis
-- sometrue - Logical or over an entire axis
-- allclose - Tests if sequences are essentially equal
-
-More Functions:
-
-- arange - Return regularly spaced array
-- asarray - Guarantee NumPy array
-- convolve - Convolve two 1-d arrays
-- swapaxes - Exchange axes
-- concatenate - Join arrays together
-- transpose - Permute axes
-- sort - Sort elements of array
-- argsort - Indices of sorted array
-- argmax - Index of largest value
-- argmin - Index of smallest value
-- inner - Innerproduct of two arrays
-- dot - Dot product (matrix multiplication)
-- outer - Outerproduct of two arrays
-- resize - Return array with arbitrary new shape
-- indices - Tuple of indices
-- fromfunction - Construct array from universal function
-- diagonal - Return diagonal array
-- trace - Trace of array
-- dump - Dump array to file object (pickle)
-- dumps - Return pickled string representing data
-- load - Return array stored in file object
-- loads - Return array from pickled string
-- ravel - Return array as 1-D
-- nonzero - Indices of nonzero elements for 1-D array
-- shape - Shape of array
-- where - Construct array from binary result
-- compress - Elements of array where condition is true
-- clip - Clip array between two values
-- ones - Array of all ones
-- identity - 2-D identity array (matrix)
-
-(Universal) Math Functions
-
- add logical_or exp
- subtract logical_xor log
- multiply logical_not log10
- divide maximum sin
- divide_safe minimum sinh
- conjugate bitwise_and sqrt
- power bitwise_or tan
- absolute bitwise_xor tanh
- negative invert ceil
- greater left_shift fabs
- greater_equal right_shift floor
- less arccos arctan2
- less_equal arcsin fmod
- equal arctan hypot
- not_equal cos around
- logical_and cosh sign
- arccosh arcsinh arctanh
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['testing']
-global_symbols = ['*']
diff --git a/numpy/core/machar.py b/numpy/core/machar.py
index 7578544fe..202580bdb 100644
--- a/numpy/core/machar.py
+++ b/numpy/core/machar.py
@@ -10,10 +10,12 @@ from __future__ import division, absolute_import, print_function
__all__ = ['MachAr']
from numpy.core.fromnumeric import any
-from numpy.core.numeric import errstate
+from numpy.core._ufunc_config import errstate
+from numpy.core.overrides import set_module
# Need to speed this up...especially for longfloat
+@set_module('numpy')
class MachAr(object):
"""
Diagnosing machine parameters.
diff --git a/numpy/core/memmap.py b/numpy/core/memmap.py
index 8269f537f..062645551 100644
--- a/numpy/core/memmap.py
+++ b/numpy/core/memmap.py
@@ -3,8 +3,9 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from .numeric import uint8, ndarray, dtype
from numpy.compat import (
- long, basestring, is_pathlib_path, contextlib_nullcontext
+ long, basestring, os_fspath, contextlib_nullcontext, is_pathlib_path
)
+from numpy.core.overrides import set_module
__all__ = ['memmap']
@@ -19,6 +20,8 @@ mode_equivalents = {
"write":"w+"
}
+
+@set_module('numpy')
class memmap(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
@@ -132,9 +135,9 @@ class memmap(ndarray):
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
- memmap([[ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.],
- [ 0., 0., 0., 0.]], dtype=float32)
+ memmap([[0., 0., 0., 0.],
+ [0., 0., 0., 0.],
+ [0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
@@ -218,10 +221,8 @@ class memmap(ndarray):
if hasattr(filename, 'read'):
f_ctx = contextlib_nullcontext(filename)
- elif is_pathlib_path(filename):
- f_ctx = filename.open(('r' if mode == 'c' else mode)+'b')
else:
- f_ctx = open(filename, ('r' if mode == 'c' else mode)+'b')
+ f_ctx = open(os_fspath(filename), ('r' if mode == 'c' else mode)+'b')
with f_ctx as fid:
fid.seek(0, 2)
@@ -245,7 +246,7 @@ class memmap(ndarray):
bytes = long(offset + size*_dbytes)
- if mode == 'w+' or (mode == 'r+' and flen < bytes):
+ if mode in ('w+', 'r+') and flen < bytes:
fid.seek(bytes - 1, 0)
fid.write(b'\0')
fid.flush()
@@ -268,14 +269,13 @@ class memmap(ndarray):
self.offset = offset
self.mode = mode
- if isinstance(filename, basestring):
- self.filename = os.path.abspath(filename)
- elif is_pathlib_path(filename):
+ if is_pathlib_path(filename):
+ # special case - if we were constructed with a pathlib.path,
+ # then filename is a path object, not a string
self.filename = filename.resolve()
- # py3 returns int for TemporaryFile().name
- elif (hasattr(filename, "name") and
- isinstance(filename.name, basestring)):
- self.filename = os.path.abspath(filename.name)
+ elif hasattr(fid, "name") and isinstance(fid.name, basestring):
+ # py3 returns int for TemporaryFile().name
+ self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
diff --git a/numpy/core/multiarray.py b/numpy/core/multiarray.py
index 673328397..c0fcc10ff 100644
--- a/numpy/core/multiarray.py
+++ b/numpy/core/multiarray.py
@@ -6,11 +6,18 @@ by importing from the extension module.
"""
+import functools
+import sys
+import warnings
+import sys
+
+from . import overrides
from . import _multiarray_umath
+import numpy as np
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
_fastCopyAndTranspose, _flagdict, _insert, _reconstruct, _vec_string,
- _ARRAY_API, _monotonicity
+ _ARRAY_API, _monotonicity, _get_ndarray_c_version
)
__all__ = [
@@ -25,13 +32,1600 @@ __all__ = [
'count_nonzero', 'c_einsum', 'datetime_as_string', 'datetime_data',
'digitize', 'dot', 'dragon4_positional', 'dragon4_scientific', 'dtype',
'empty', 'empty_like', 'error', 'flagsobj', 'flatiter', 'format_longfloat',
- 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'getbuffer', 'inner',
+ 'frombuffer', 'fromfile', 'fromiter', 'fromstring', 'inner',
'int_asbuffer', 'interp', 'interp_complex', 'is_busday', 'lexsort',
'matmul', 'may_share_memory', 'min_scalar_type', 'ndarray', 'nditer',
- 'nested_iters', 'newbuffer', 'normalize_axis_index', 'packbits',
+ 'nested_iters', 'normalize_axis_index', 'packbits',
'promote_types', 'putmask', 'ravel_multi_index', 'result_type', 'scalar',
'set_datetimeparse_function', 'set_legacy_print_mode', 'set_numeric_ops',
'set_string_function', 'set_typeDict', 'shares_memory', 'test_interrupt',
'tracemalloc_domain', 'typeinfo', 'unpackbits', 'unravel_index', 'vdot',
'where', 'zeros']
+if sys.version_info.major < 3:
+ __all__ += ['newbuffer', 'getbuffer']
+
+# For backward compatibility, make sure pickle imports these functions from here
+_reconstruct.__module__ = 'numpy.core.multiarray'
+scalar.__module__ = 'numpy.core.multiarray'
+
+
+arange.__module__ = 'numpy'
+array.__module__ = 'numpy'
+datetime_data.__module__ = 'numpy'
+empty.__module__ = 'numpy'
+frombuffer.__module__ = 'numpy'
+fromfile.__module__ = 'numpy'
+fromiter.__module__ = 'numpy'
+frompyfunc.__module__ = 'numpy'
+fromstring.__module__ = 'numpy'
+geterrobj.__module__ = 'numpy'
+may_share_memory.__module__ = 'numpy'
+nested_iters.__module__ = 'numpy'
+promote_types.__module__ = 'numpy'
+set_numeric_ops.__module__ = 'numpy'
+seterrobj.__module__ = 'numpy'
+zeros.__module__ = 'numpy'
+
+
+# We can't verify dispatcher signatures because NumPy's C functions don't
+# support introspection.
+array_function_from_c_func_and_dispatcher = functools.partial(
+ overrides.array_function_from_dispatcher,
+ module='numpy', docs_from_dispatcher=True, verify=False)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.empty_like)
+def empty_like(prototype, dtype=None, order=None, subok=None, shape=None):
+ """
+ empty_like(prototype, dtype=None, order='K', subok=True, shape=None)
+
+ Return a new array with the same shape and type as a given array.
+
+ Parameters
+ ----------
+ prototype : array_like
+ The shape and data-type of `prototype` define these same attributes
+ of the returned array.
+ dtype : data-type, optional
+ Overrides the data type of the result.
+
+ .. versionadded:: 1.6.0
+ order : {'C', 'F', 'A', or 'K'}, optional
+ Overrides the memory layout of the result. 'C' means C-order,
+ 'F' means F-order, 'A' means 'F' if ``prototype`` is Fortran
+ contiguous, 'C' otherwise. 'K' means match the layout of ``prototype``
+ as closely as possible.
+
+ .. versionadded:: 1.6.0
+ subok : bool, optional.
+ If True, then the newly created array will use the sub-class
+ type of 'a', otherwise it will be a base-class array. Defaults
+ to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ out : ndarray
+ Array of uninitialized (arbitrary) data with the same
+ shape and type as `prototype`.
+
+ See Also
+ --------
+ ones_like : Return an array of ones with shape and type of input.
+ zeros_like : Return an array of zeros with shape and type of input.
+ full_like : Return a new array with shape of input filled with value.
+ empty : Return a new uninitialized array.
+
+ Notes
+ -----
+ This function does *not* initialize the returned array; to do that use
+ `zeros_like` or `ones_like` instead. It may be marginally faster than
+ the functions that do set the array values.
+
+ Examples
+ --------
+ >>> a = ([1,2,3], [4,5,6]) # a is array-like
+ >>> np.empty_like(a)
+ array([[-1073741821, -1073741821, 3], # uninitialized
+ [ 0, 0, -1073741821]])
+ >>> a = np.array([[1., 2., 3.],[4.,5.,6.]])
+ >>> np.empty_like(a)
+ array([[ -2.00000715e+000, 1.48219694e-323, -2.00000572e+000], # uninitialized
+ [ 4.38791518e-305, -2.00000715e+000, 4.17269252e-309]])
+
+ """
+ return (prototype,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.concatenate)
+def concatenate(arrays, axis=None, out=None):
+ """
+ concatenate((a1, a2, ...), axis=0, out=None)
+
+ Join a sequence of arrays along an existing axis.
+
+ Parameters
+ ----------
+ a1, a2, ... : sequence of array_like
+ The arrays must have the same shape, except in the dimension
+ corresponding to `axis` (the first, by default).
+ axis : int, optional
+ The axis along which the arrays will be joined. If axis is None,
+ arrays are flattened before use. Default is 0.
+ out : ndarray, optional
+ If provided, the destination to place the result. The shape must be
+ correct, matching that of what concatenate would have returned if no
+ out argument were specified.
+
+ Returns
+ -------
+ res : ndarray
+ The concatenated array.
+
+ See Also
+ --------
+ ma.concatenate : Concatenate function that preserves input masks.
+ array_split : Split an array into multiple sub-arrays of equal or
+ near-equal size.
+ split : Split array into a list of multiple sub-arrays of equal size.
+ hsplit : Split array into multiple sub-arrays horizontally (column wise)
+ vsplit : Split array into multiple sub-arrays vertically (row wise)
+ dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
+ stack : Stack a sequence of arrays along a new axis.
+ hstack : Stack arrays in sequence horizontally (column wise)
+ vstack : Stack arrays in sequence vertically (row wise)
+ dstack : Stack arrays in sequence depth wise (along third dimension)
+ block : Assemble arrays from blocks.
+
+ Notes
+ -----
+ When one or more of the arrays to be concatenated is a MaskedArray,
+ this function will return a MaskedArray object instead of an ndarray,
+ but the input masks are *not* preserved. In cases where a MaskedArray
+ is expected as input, use the ma.concatenate function from the masked
+ array module instead.
+
+ Examples
+ --------
+ >>> a = np.array([[1, 2], [3, 4]])
+ >>> b = np.array([[5, 6]])
+ >>> np.concatenate((a, b), axis=0)
+ array([[1, 2],
+ [3, 4],
+ [5, 6]])
+ >>> np.concatenate((a, b.T), axis=1)
+ array([[1, 2, 5],
+ [3, 4, 6]])
+ >>> np.concatenate((a, b), axis=None)
+ array([1, 2, 3, 4, 5, 6])
+
+ This function will not preserve masking of MaskedArray inputs.
+
+ >>> a = np.ma.arange(3)
+ >>> a[1] = np.ma.masked
+ >>> b = np.arange(2, 5)
+ >>> a
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
+ >>> b
+ array([2, 3, 4])
+ >>> np.concatenate([a, b])
+ masked_array(data=[0, 1, 2, 2, 3, 4],
+ mask=False,
+ fill_value=999999)
+ >>> np.ma.concatenate([a, b])
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
+
+ """
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.inner)
+def inner(a, b):
+ """
+ inner(a, b)
+
+ Inner product of two arrays.
+
+ Ordinary inner product of vectors for 1-D arrays (without complex
+ conjugation), in higher dimensions a sum product over the last axes.
+
+ Parameters
+ ----------
+ a, b : array_like
+ If `a` and `b` are nonscalar, their last dimensions must match.
+
+ Returns
+ -------
+ out : ndarray
+ `out.shape = a.shape[:-1] + b.shape[:-1]`
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` and `b` has different size.
+
+ See Also
+ --------
+ tensordot : Sum products over arbitrary axes.
+ dot : Generalised matrix product, using second last dimension of `b`.
+ einsum : Einstein summation convention.
+
+ Notes
+ -----
+ For vectors (1-D arrays) it computes the ordinary inner-product::
+
+ np.inner(a, b) = sum(a[:]*b[:])
+
+ More generally, if `ndim(a) = r > 0` and `ndim(b) = s > 0`::
+
+ np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1))
+
+ or explicitly::
+
+ np.inner(a, b)[i0,...,ir-1,j0,...,js-1]
+ = sum(a[i0,...,ir-1,:]*b[j0,...,js-1,:])
+
+ In addition `a` or `b` may be scalars, in which case::
+
+ np.inner(a,b) = a*b
+
+ Examples
+ --------
+ Ordinary inner product for vectors:
+
+ >>> a = np.array([1,2,3])
+ >>> b = np.array([0,1,0])
+ >>> np.inner(a, b)
+ 2
+
+ A multidimensional example:
+
+ >>> a = np.arange(24).reshape((2,3,4))
+ >>> b = np.arange(4)
+ >>> np.inner(a, b)
+ array([[ 14, 38, 62],
+ [ 86, 110, 134]])
+
+ An example where `b` is a scalar:
+
+ >>> np.inner(np.eye(2), 7)
+ array([[7., 0.],
+ [0., 7.]])
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
+def where(condition, x=None, y=None):
+ """
+ where(condition, [x, y])
+
+ Return elements chosen from `x` or `y` depending on `condition`.
+
+ .. note::
+ When only `condition` is provided, this function is a shorthand for
+ ``np.asarray(condition).nonzero()``. Using `nonzero` directly should be
+ preferred, as it behaves correctly for subclasses. The rest of this
+ documentation covers only the case where all three arguments are
+ provided.
+
+ Parameters
+ ----------
+ condition : array_like, bool
+ Where True, yield `x`, otherwise yield `y`.
+ x, y : array_like
+ Values from which to choose. `x`, `y` and `condition` need to be
+ broadcastable to some shape.
+
+ Returns
+ -------
+ out : ndarray
+ An array with elements from `x` where `condition` is True, and elements
+ from `y` elsewhere.
+
+ See Also
+ --------
+ choose
+ nonzero : The function that is called when x and y are omitted
+
+ Notes
+ -----
+ If all the arrays are 1-D, `where` is equivalent to::
+
+ [xv if c else yv
+ for c, xv, yv in zip(condition, x, y)]
+
+ Examples
+ --------
+ >>> a = np.arange(10)
+ >>> a
+ array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+ >>> np.where(a < 5, a, 10*a)
+ array([ 0, 1, 2, 3, 4, 50, 60, 70, 80, 90])
+
+ This can be used on multidimensional arrays too:
+
+ >>> np.where([[True, False], [True, True]],
+ ... [[1, 2], [3, 4]],
+ ... [[9, 8], [7, 6]])
+ array([[1, 8],
+ [3, 4]])
+
+ The shapes of x, y, and the condition are broadcast together:
+
+ >>> x, y = np.ogrid[:3, :4]
+ >>> np.where(x < y, x, 10 + y) # both x and 10+y are broadcast
+ array([[10, 0, 0, 0],
+ [10, 11, 1, 1],
+ [10, 11, 12, 2]])
+
+ >>> a = np.array([[0, 1, 2],
+ ... [0, 2, 4],
+ ... [0, 3, 6]])
+ >>> np.where(a < 4, a, -1) # -1 is broadcast
+ array([[ 0, 1, 2],
+ [ 0, 2, -1],
+ [ 0, 3, -1]])
+ """
+ return (condition, x, y)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.lexsort)
+def lexsort(keys, axis=None):
+ """
+ lexsort(keys, axis=-1)
+
+ Perform an indirect stable sort using a sequence of keys.
+
+ Given multiple sorting keys, which can be interpreted as columns in a
+ spreadsheet, lexsort returns an array of integer indices that describes
+ the sort order by multiple columns. The last key in the sequence is used
+ for the primary sort order, the second-to-last key for the secondary sort
+ order, and so on. The keys argument must be a sequence of objects that
+ can be converted to arrays of the same shape. If a 2D array is provided
+ for the keys argument, it's rows are interpreted as the sorting keys and
+ sorting is according to the last row, second last row etc.
+
+ Parameters
+ ----------
+ keys : (k, N) array or tuple containing k (N,)-shaped sequences
+ The `k` different "columns" to be sorted. The last column (or row if
+ `keys` is a 2D array) is the primary sort key.
+ axis : int, optional
+ Axis to be indirectly sorted. By default, sort over the last axis.
+
+ Returns
+ -------
+ indices : (N,) ndarray of ints
+ Array of indices that sort the keys along the specified axis.
+
+ See Also
+ --------
+ argsort : Indirect sort.
+ ndarray.sort : In-place sort.
+ sort : Return a sorted copy of an array.
+
+ Examples
+ --------
+ Sort names: first by surname, then by name.
+
+ >>> surnames = ('Hertz', 'Galilei', 'Hertz')
+ >>> first_names = ('Heinrich', 'Galileo', 'Gustav')
+ >>> ind = np.lexsort((first_names, surnames))
+ >>> ind
+ array([1, 2, 0])
+
+ >>> [surnames[i] + ", " + first_names[i] for i in ind]
+ ['Galilei, Galileo', 'Hertz, Gustav', 'Hertz, Heinrich']
+
+ Sort two columns of numbers:
+
+ >>> a = [1,5,1,4,3,4,4] # First column
+ >>> b = [9,4,0,4,0,2,1] # Second column
+ >>> ind = np.lexsort((b,a)) # Sort by a, then by b
+ >>> ind
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ >>> [(a[i],b[i]) for i in ind]
+ [(1, 0), (1, 9), (3, 0), (4, 1), (4, 2), (4, 4), (5, 4)]
+
+ Note that sorting is first according to the elements of ``a``.
+ Secondary sorting is according to the elements of ``b``.
+
+ A normal ``argsort`` would have yielded:
+
+ >>> [(a[i],b[i]) for i in np.argsort(a)]
+ [(1, 9), (1, 0), (3, 0), (4, 4), (4, 2), (4, 1), (5, 4)]
+
+ Structured arrays are sorted lexically by ``argsort``:
+
+ >>> x = np.array([(1,9), (5,4), (1,0), (4,4), (3,0), (4,2), (4,1)],
+ ... dtype=np.dtype([('x', int), ('y', int)]))
+
+ >>> np.argsort(x) # or np.argsort(x, order=('x', 'y'))
+ array([2, 0, 4, 6, 5, 3, 1])
+
+ """
+ if isinstance(keys, tuple):
+ return keys
+ else:
+ return (keys,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.can_cast)
+def can_cast(from_, to, casting=None):
+ """
+ can_cast(from_, to, casting='safe')
+
+ Returns True if cast between data types can occur according to the
+ casting rule. If from is a scalar or array scalar, also returns
+ True if the scalar value can be cast without overflow or truncation
+ to an integer.
+
+ Parameters
+ ----------
+ from_ : dtype, dtype specifier, scalar, or array
+ Data type, scalar, or array to cast from.
+ to : dtype or dtype specifier
+ Data type to cast to.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+
+ Returns
+ -------
+ out : bool
+ True if cast can occur according to the casting rule.
+
+ Notes
+ -----
+ .. versionchanged:: 1.17.0
+ Casting between a simple data type and a structured one is possible only
+ for "unsafe" casting. Casting to multiple fields is allowed, but
+ casting from multiple fields is not.
+
+ .. versionchanged:: 1.9.0
+ Casting from numeric to string types in 'safe' casting mode requires
+ that the string dtype length is long enough to store the maximum
+ integer/float value converted.
+
+ See also
+ --------
+ dtype, result_type
+
+ Examples
+ --------
+ Basic examples
+
+ >>> np.can_cast(np.int32, np.int64)
+ True
+ >>> np.can_cast(np.float64, complex)
+ True
+ >>> np.can_cast(complex, float)
+ False
+
+ >>> np.can_cast('i8', 'f8')
+ True
+ >>> np.can_cast('i8', 'f4')
+ False
+ >>> np.can_cast('i4', 'S4')
+ False
+
+ Casting scalars
+
+ >>> np.can_cast(100, 'i1')
+ True
+ >>> np.can_cast(150, 'i1')
+ False
+ >>> np.can_cast(150, 'u1')
+ True
+
+ >>> np.can_cast(3.5e100, np.float32)
+ False
+ >>> np.can_cast(1000.0, np.float32)
+ True
+
+ Array scalar checks the value, array does not
+
+ >>> np.can_cast(np.array(1000.0), np.float32)
+ True
+ >>> np.can_cast(np.array([1000.0]), np.float32)
+ False
+
+ Using the casting rules
+
+ >>> np.can_cast('i8', 'i8', 'no')
+ True
+ >>> np.can_cast('<i8', '>i8', 'no')
+ False
+
+ >>> np.can_cast('<i8', '>i8', 'equiv')
+ True
+ >>> np.can_cast('<i4', '>i8', 'equiv')
+ False
+
+ >>> np.can_cast('<i4', '>i8', 'safe')
+ True
+ >>> np.can_cast('<i8', '>i4', 'safe')
+ False
+
+ >>> np.can_cast('<i8', '>i4', 'same_kind')
+ True
+ >>> np.can_cast('<i8', '>u4', 'same_kind')
+ False
+
+ >>> np.can_cast('<i8', '>u4', 'unsafe')
+ True
+
+ """
+ return (from_,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.min_scalar_type)
+def min_scalar_type(a):
+ """
+ min_scalar_type(a)
+
+ For scalar ``a``, returns the data type with the smallest size
+ and smallest scalar kind which can hold its value. For non-scalar
+ array ``a``, returns the vector's dtype unmodified.
+
+ Floating point values are not demoted to integers,
+ and complex values are not demoted to floats.
+
+ Parameters
+ ----------
+ a : scalar or array_like
+ The value whose minimal data type is to be found.
+
+ Returns
+ -------
+ out : dtype
+ The minimal data type.
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ See Also
+ --------
+ result_type, promote_types, dtype, can_cast
+
+ Examples
+ --------
+ >>> np.min_scalar_type(10)
+ dtype('uint8')
+
+ >>> np.min_scalar_type(-260)
+ dtype('int16')
+
+ >>> np.min_scalar_type(3.1)
+ dtype('float16')
+
+ >>> np.min_scalar_type(1e50)
+ dtype('float64')
+
+ >>> np.min_scalar_type(np.arange(4,dtype='f8'))
+ dtype('float64')
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.result_type)
+def result_type(*arrays_and_dtypes):
+ """
+ result_type(*arrays_and_dtypes)
+
+ Returns the type that results from applying the NumPy
+ type promotion rules to the arguments.
+
+ Type promotion in NumPy works similarly to the rules in languages
+ like C++, with some slight differences. When both scalars and
+ arrays are used, the array's type takes precedence and the actual value
+ of the scalar is taken into account.
+
+ For example, calculating 3*a, where a is an array of 32-bit floats,
+ intuitively should result in a 32-bit float output. If the 3 is a
+ 32-bit integer, the NumPy rules indicate it can't convert losslessly
+ into a 32-bit float, so a 64-bit float should be the result type.
+ By examining the value of the constant, '3', we see that it fits in
+ an 8-bit integer, which can be cast losslessly into the 32-bit float.
+
+ Parameters
+ ----------
+ arrays_and_dtypes : list of arrays and dtypes
+ The operands of some operation whose result type is needed.
+
+ Returns
+ -------
+ out : dtype
+ The result type.
+
+ See also
+ --------
+ dtype, promote_types, min_scalar_type, can_cast
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ The specific algorithm used is as follows.
+
+ Categories are determined by first checking which of boolean,
+ integer (int/uint), or floating point (float/complex) the maximum
+ kind of all the arrays and the scalars are.
+
+ If there are only scalars or the maximum category of the scalars
+ is higher than the maximum category of the arrays,
+ the data types are combined with :func:`promote_types`
+ to produce the return value.
+
+ Otherwise, `min_scalar_type` is called on each array, and
+ the resulting data types are all combined with :func:`promote_types`
+ to produce the return value.
+
+ The set of int values is not a subset of the uint values for types
+ with the same number of bits, something not reflected in
+ :func:`min_scalar_type`, but handled as a special case in `result_type`.
+
+ Examples
+ --------
+ >>> np.result_type(3, np.arange(7, dtype='i1'))
+ dtype('int8')
+
+ >>> np.result_type('i4', 'c8')
+ dtype('complex128')
+
+ >>> np.result_type(3.0, -2)
+ dtype('float64')
+
+ """
+ return arrays_and_dtypes
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.dot)
+def dot(a, b, out=None):
+ """
+ dot(a, b, out=None)
+
+ Dot product of two arrays. Specifically,
+
+ - If both `a` and `b` are 1-D arrays, it is inner product of vectors
+ (without complex conjugation).
+
+ - If both `a` and `b` are 2-D arrays, it is matrix multiplication,
+ but using :func:`matmul` or ``a @ b`` is preferred.
+
+ - If either `a` or `b` is 0-D (scalar), it is equivalent to :func:`multiply`
+ and using ``numpy.multiply(a, b)`` or ``a * b`` is preferred.
+
+ - If `a` is an N-D array and `b` is a 1-D array, it is a sum product over
+ the last axis of `a` and `b`.
+
+ - If `a` is an N-D array and `b` is an M-D array (where ``M>=2``), it is a
+ sum product over the last axis of `a` and the second-to-last axis of `b`::
+
+ dot(a, b)[i,j,k,m] = sum(a[i,j,:] * b[k,:,m])
+
+ Parameters
+ ----------
+ a : array_like
+ First argument.
+ b : array_like
+ Second argument.
+ out : ndarray, optional
+ Output argument. This must have the exact kind that would be returned
+ if it was not used. In particular, it must have the right type, must be
+ C-contiguous, and its dtype must be the dtype that would be returned
+ for `dot(a,b)`. This is a performance feature. Therefore, if these
+ conditions are not met, an exception is raised, instead of attempting
+ to be flexible.
+
+ Returns
+ -------
+ output : ndarray
+ Returns the dot product of `a` and `b`. If `a` and `b` are both
+ scalars or both 1-D arrays then a scalar is returned; otherwise
+ an array is returned.
+ If `out` is given, then it is returned.
+
+ Raises
+ ------
+ ValueError
+ If the last dimension of `a` is not the same size as
+ the second-to-last dimension of `b`.
+
+ See Also
+ --------
+ vdot : Complex-conjugating dot product.
+ tensordot : Sum products over arbitrary axes.
+ einsum : Einstein summation convention.
+ matmul : '@' operator as method with out parameter.
+
+ Examples
+ --------
+ >>> np.dot(3, 4)
+ 12
+
+ Neither argument is complex-conjugated:
+
+ >>> np.dot([2j, 3j], [2j, 3j])
+ (-13+0j)
+
+ For 2-D arrays it is the matrix product:
+
+ >>> a = [[1, 0], [0, 1]]
+ >>> b = [[4, 1], [2, 2]]
+ >>> np.dot(a, b)
+ array([[4, 1],
+ [2, 2]])
+
+ >>> a = np.arange(3*4*5*6).reshape((3,4,5,6))
+ >>> b = np.arange(3*4*5*6)[::-1].reshape((5,4,6,3))
+ >>> np.dot(a, b)[2,3,2,1,2,2]
+ 499128
+ >>> sum(a[2,3,2,:] * b[1,2,:,2])
+ 499128
+
+ """
+ return (a, b, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.vdot)
+def vdot(a, b):
+ """
+ vdot(a, b)
+
+ Return the dot product of two vectors.
+
+ The vdot(`a`, `b`) function handles complex numbers differently than
+ dot(`a`, `b`). If the first argument is complex the complex conjugate
+ of the first argument is used for the calculation of the dot product.
+
+ Note that `vdot` handles multidimensional arrays differently than `dot`:
+ it does *not* perform a matrix product, but flattens input arguments
+ to 1-D vectors first. Consequently, it should only be used for vectors.
+
+ Parameters
+ ----------
+ a : array_like
+ If `a` is complex the complex conjugate is taken before calculation
+ of the dot product.
+ b : array_like
+ Second argument to the dot product.
+
+ Returns
+ -------
+ output : ndarray
+ Dot product of `a` and `b`. Can be an int, float, or
+ complex depending on the types of `a` and `b`.
+
+ See Also
+ --------
+ dot : Return the dot product without using the complex conjugate of the
+ first argument.
+
+ Examples
+ --------
+ >>> a = np.array([1+2j,3+4j])
+ >>> b = np.array([5+6j,7+8j])
+ >>> np.vdot(a, b)
+ (70-8j)
+ >>> np.vdot(b, a)
+ (70+8j)
+
+ Note that higher-dimensional arrays are flattened!
+
+ >>> a = np.array([[1, 4], [5, 6]])
+ >>> b = np.array([[4, 1], [2, 2]])
+ >>> np.vdot(a, b)
+ 30
+ >>> np.vdot(b, a)
+ 30
+ >>> 1*4 + 4*1 + 5*2 + 6*2
+ 30
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.bincount)
+def bincount(x, weights=None, minlength=None):
+ """
+ bincount(x, weights=None, minlength=0)
+
+ Count number of occurrences of each value in array of non-negative ints.
+
+ The number of bins (of size 1) is one larger than the largest value in
+ `x`. If `minlength` is specified, there will be at least this number
+ of bins in the output array (though it will be longer if necessary,
+ depending on the contents of `x`).
+ Each bin gives the number of occurrences of its index value in `x`.
+ If `weights` is specified the input array is weighted by it, i.e. if a
+ value ``n`` is found at position ``i``, ``out[n] += weight[i]`` instead
+ of ``out[n] += 1``.
+
+ Parameters
+ ----------
+ x : array_like, 1 dimension, nonnegative ints
+ Input array.
+ weights : array_like, optional
+ Weights, array of the same shape as `x`.
+ minlength : int, optional
+ A minimum number of bins for the output array.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ out : ndarray of ints
+ The result of binning the input array.
+ The length of `out` is equal to ``np.amax(x)+1``.
+
+ Raises
+ ------
+ ValueError
+ If the input is not 1-dimensional, or contains elements with negative
+ values, or if `minlength` is negative.
+ TypeError
+ If the type of the input is float or complex.
+
+ See Also
+ --------
+ histogram, digitize, unique
+
+ Examples
+ --------
+ >>> np.bincount(np.arange(5))
+ array([1, 1, 1, 1, 1])
+ >>> np.bincount(np.array([0, 1, 1, 3, 2, 1, 7]))
+ array([1, 3, 1, 1, 0, 0, 0, 1])
+
+ >>> x = np.array([0, 1, 1, 3, 2, 1, 7, 23])
+ >>> np.bincount(x).size == np.amax(x)+1
+ True
+
+ The input array needs to be of integer dtype, otherwise a
+ TypeError is raised:
+
+ >>> np.bincount(np.arange(5, dtype=float))
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ TypeError: array cannot be safely cast to required type
+
+ A possible use of ``bincount`` is to perform sums over
+ variable-size chunks of an array, using the ``weights`` keyword.
+
+ >>> w = np.array([0.3, 0.5, 0.2, 0.7, 1., -0.6]) # weights
+ >>> x = np.array([0, 1, 1, 2, 2, 2])
+ >>> np.bincount(x, weights=w)
+ array([ 0.3, 0.7, 1.1])
+
+ """
+ return (x, weights)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.ravel_multi_index)
+def ravel_multi_index(multi_index, dims, mode=None, order=None):
+ """
+ ravel_multi_index(multi_index, dims, mode='raise', order='C')
+
+ Converts a tuple of index arrays into an array of flat
+ indices, applying boundary modes to the multi-index.
+
+ Parameters
+ ----------
+ multi_index : tuple of array_like
+ A tuple of integer arrays, one array for each dimension.
+ dims : tuple of ints
+ The shape of array into which the indices from ``multi_index`` apply.
+ mode : {'raise', 'wrap', 'clip'}, optional
+ Specifies how out-of-bounds indices are handled. Can specify
+ either one mode or a tuple of modes, one mode per index.
+
+ * 'raise' -- raise an error (default)
+ * 'wrap' -- wrap around
+ * 'clip' -- clip to the range
+
+ In 'clip' mode, a negative index which would normally
+ wrap will clip to 0 instead.
+ order : {'C', 'F'}, optional
+ Determines whether the multi-index should be viewed as
+ indexing in row-major (C-style) or column-major
+ (Fortran-style) order.
+
+ Returns
+ -------
+ raveled_indices : ndarray
+ An array of indices into the flattened version of an array
+ of dimensions ``dims``.
+
+ See Also
+ --------
+ unravel_index
+
+ Notes
+ -----
+ .. versionadded:: 1.6.0
+
+ Examples
+ --------
+ >>> arr = np.array([[3,6,6],[4,5,1]])
+ >>> np.ravel_multi_index(arr, (7,6))
+ array([22, 41, 37])
+ >>> np.ravel_multi_index(arr, (7,6), order='F')
+ array([31, 41, 13])
+ >>> np.ravel_multi_index(arr, (4,6), mode='clip')
+ array([22, 23, 19])
+ >>> np.ravel_multi_index(arr, (4,4), mode=('clip','wrap'))
+ array([12, 13, 13])
+
+ >>> np.ravel_multi_index((3,1,4,1), (6,7,8,9))
+ 1621
+ """
+ return multi_index
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unravel_index)
+def unravel_index(indices, shape=None, order=None, dims=None):
+ """
+ unravel_index(indices, shape, order='C')
+
+ Converts a flat index or array of flat indices into a tuple
+ of coordinate arrays.
+
+ Parameters
+ ----------
+ indices : array_like
+ An integer array whose elements are indices into the flattened
+ version of an array of dimensions ``shape``. Before version 1.6.0,
+ this function accepted just one index value.
+ shape : tuple of ints
+ The shape of the array to use for unraveling ``indices``.
+
+ .. versionchanged:: 1.16.0
+ Renamed from ``dims`` to ``shape``.
+
+ order : {'C', 'F'}, optional
+ Determines whether the indices should be viewed as indexing in
+ row-major (C-style) or column-major (Fortran-style) order.
+
+ .. versionadded:: 1.6.0
+
+ Returns
+ -------
+ unraveled_coords : tuple of ndarray
+ Each array in the tuple has the same shape as the ``indices``
+ array.
+
+ See Also
+ --------
+ ravel_multi_index
+
+ Examples
+ --------
+ >>> np.unravel_index([22, 41, 37], (7,6))
+ (array([3, 6, 6]), array([4, 5, 1]))
+ >>> np.unravel_index([31, 41, 13], (7,6), order='F')
+ (array([3, 6, 6]), array([4, 5, 1]))
+
+ >>> np.unravel_index(1621, (6,7,8,9))
+ (3, 1, 4, 1)
+
+ """
+ if dims is not None:
+ warnings.warn("'shape' argument should be used instead of 'dims'",
+ DeprecationWarning, stacklevel=3)
+ return (indices,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.copyto)
+def copyto(dst, src, casting=None, where=None):
+ """
+ copyto(dst, src, casting='same_kind', where=True)
+
+ Copies values from one array to another, broadcasting as necessary.
+
+ Raises a TypeError if the `casting` rule is violated, and if
+ `where` is provided, it selects which elements to copy.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dst : ndarray
+ The array into which values are copied.
+ src : array_like
+ The array from which values are copied.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ Controls what kind of data casting may occur when copying.
+
+ * 'no' means the data types should not be cast at all.
+ * 'equiv' means only byte-order changes are allowed.
+ * 'safe' means only casts which can preserve values are allowed.
+ * 'same_kind' means only safe casts or casts within a kind,
+ like float64 to float32, are allowed.
+ * 'unsafe' means any data conversions may be done.
+ where : array_like of bool, optional
+ A boolean array which is broadcasted to match the dimensions
+ of `dst`, and selects elements to copy from `src` to `dst`
+ wherever it contains the value True.
+ """
+ return (dst, src, where)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.putmask)
+def putmask(a, mask, values):
+ """
+ putmask(a, mask, values)
+
+ Changes elements of an array based on conditional and input values.
+
+ Sets ``a.flat[n] = values[n]`` for each n where ``mask.flat[n]==True``.
+
+ If `values` is not the same size as `a` and `mask` then it will repeat.
+ This gives behavior different from ``a[mask] = values``.
+
+ Parameters
+ ----------
+ a : array_like
+ Target array.
+ mask : array_like
+ Boolean mask array. It has to be the same shape as `a`.
+ values : array_like
+ Values to put into `a` where `mask` is True. If `values` is smaller
+ than `a` it will be repeated.
+
+ See Also
+ --------
+ place, put, take, copyto
+
+ Examples
+ --------
+ >>> x = np.arange(6).reshape(2, 3)
+ >>> np.putmask(x, x>2, x**2)
+ >>> x
+ array([[ 0, 1, 2],
+ [ 9, 16, 25]])
+
+ If `values` is smaller than `a` it is repeated:
+
+ >>> x = np.arange(5)
+ >>> np.putmask(x, x>1, [-33, -44])
+ >>> x
+ array([ 0, 1, -33, -44, -33])
+
+ """
+ return (a, mask, values)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.packbits)
+def packbits(a, axis=None, bitorder='big'):
+ """
+ packbits(a, axis=None, bitorder='big')
+
+ Packs the elements of a binary-valued array into bits in a uint8 array.
+
+ The result is padded to full bytes by inserting zero bits at the end.
+
+ Parameters
+ ----------
+ a : array_like
+ An array of integers or booleans whose elements should be packed to
+ bits.
+ axis : int, optional
+ The dimension over which bit-packing is done.
+ ``None`` implies packing the flattened array.
+ bitorder : {'big', 'little'}, optional
+ The order of the input bits. 'big' will mimic bin(val),
+ ``[0, 0, 0, 0, 0, 0, 1, 1] => 3 = 0b00000011 => ``, 'little' will
+ reverse the order so ``[1, 1, 0, 0, 0, 0, 0, 0] => 3``.
+ Defaults to 'big'.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ packed : ndarray
+ Array of type uint8 whose elements represent bits corresponding to the
+ logical (0 or nonzero) value of the input elements. The shape of
+ `packed` has the same number of dimensions as the input (unless `axis`
+ is None, in which case the output is 1-D).
+
+ See Also
+ --------
+ unpackbits: Unpacks elements of a uint8 array into a binary-valued output
+ array.
+
+ Examples
+ --------
+ >>> a = np.array([[[1,0,1],
+ ... [0,1,0]],
+ ... [[1,1,0],
+ ... [0,0,1]]])
+ >>> b = np.packbits(a, axis=-1)
+ >>> b
+ array([[[160],
+ [ 64]],
+ [[192],
+ [ 32]]], dtype=uint8)
+
+ Note that in binary 160 = 1010 0000, 64 = 0100 0000, 192 = 1100 0000,
+ and 32 = 0010 0000.
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.unpackbits)
+def unpackbits(a, axis=None, count=None, bitorder='big'):
+ """
+ unpackbits(a, axis=None, count=None, bitorder='big')
+
+ Unpacks elements of a uint8 array into a binary-valued output array.
+
+ Each element of `a` represents a bit-field that should be unpacked
+ into a binary-valued output array. The shape of the output array is
+ either 1-D (if `axis` is ``None``) or the same shape as the input
+ array with unpacking done along the axis specified.
+
+ Parameters
+ ----------
+ a : ndarray, uint8 type
+ Input array.
+ axis : int, optional
+ The dimension over which bit-unpacking is done.
+ ``None`` implies unpacking the flattened array.
+ count : int or None, optional
+ The number of elements to unpack along `axis`, provided as a way
+ of undoing the effect of packing a size that is not a multiple
+ of eight. A non-negative number means to only unpack `count`
+ bits. A negative number means to trim off that many bits from
+ the end. ``None`` means to unpack the entire array (the
+ default). Counts larger than the available number of bits will
+ add zero padding to the output. Negative counts must not
+ exceed the available number of bits.
+
+ .. versionadded:: 1.17.0
+
+ bitorder : {'big', 'little'}, optional
+ The order of the returned bits. 'big' will mimic bin(val),
+ ``3 = 0b00000011 => [0, 0, 0, 0, 0, 0, 1, 1]``, 'little' will reverse
+ the order to ``[1, 1, 0, 0, 0, 0, 0, 0]``.
+ Defaults to 'big'.
+
+ .. versionadded:: 1.17.0
+
+ Returns
+ -------
+ unpacked : ndarray, uint8 type
+ The elements are binary-valued (0 or 1).
+
+ See Also
+ --------
+ packbits : Packs the elements of a binary-valued array into bits in
+ a uint8 array.
+
+ Examples
+ --------
+ >>> a = np.array([[2], [7], [23]], dtype=np.uint8)
+ >>> a
+ array([[ 2],
+ [ 7],
+ [23]], dtype=uint8)
+ >>> b = np.unpackbits(a, axis=1)
+ >>> b
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1]], dtype=uint8)
+ >>> c = np.unpackbits(a, axis=1, count=-3)
+ >>> c
+ array([[0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0],
+ [0, 0, 0, 1, 0]], dtype=uint8)
+
+ >>> p = np.packbits(b, axis=0)
+ >>> np.unpackbits(p, axis=0)
+ array([[0, 0, 0, 0, 0, 0, 1, 0],
+ [0, 0, 0, 0, 0, 1, 1, 1],
+ [0, 0, 0, 1, 0, 1, 1, 1],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0],
+ [0, 0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
+ >>> np.array_equal(b, np.unpackbits(p, axis=0, count=b.shape[0]))
+ True
+
+ """
+ return (a,)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.shares_memory)
+def shares_memory(a, b, max_work=None):
+ """
+ shares_memory(a, b, max_work=None)
+
+ Determine if two arrays share memory
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem (maximum number
+ of candidate solutions to consider). The following special
+ values are recognized:
+
+ max_work=MAY_SHARE_EXACT (default)
+ The problem is solved exactly. In this case, the function returns
+ True only if there is an element shared between the arrays.
+ max_work=MAY_SHARE_BOUNDS
+ Only the memory bounds of a and b are checked.
+
+ Raises
+ ------
+ numpy.TooHardError
+ Exceeded max_work.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ may_share_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.may_share_memory)
+def may_share_memory(a, b, max_work=None):
+ """
+ may_share_memory(a, b, max_work=None)
+
+ Determine if two arrays might share memory
+
+ A return of True does not necessarily mean that the two arrays
+ share any element. It just means that they *might*.
+
+ Only the memory bounds of a and b are checked by default.
+
+ Parameters
+ ----------
+ a, b : ndarray
+ Input arrays
+ max_work : int, optional
+ Effort to spend on solving the overlap problem. See
+ `shares_memory` for details. Default for ``may_share_memory``
+ is to do a bounds check.
+
+ Returns
+ -------
+ out : bool
+
+ See Also
+ --------
+ shares_memory
+
+ Examples
+ --------
+ >>> np.may_share_memory(np.array([1,2]), np.array([5,8,9]))
+ False
+ >>> x = np.zeros([3, 4])
+ >>> np.may_share_memory(x[:,0], x[:,1])
+ True
+
+ """
+ return (a, b)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.is_busday)
+def is_busday(dates, weekmask=None, holidays=None, busdaycal=None, out=None):
+ """
+ is_busday(dates, weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ Calculates which of the given dates are valid days, and which are not.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of bool, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of bool
+ An array with the same shape as ``dates``, containing True for
+ each valid day, and False for each invalid day.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ busday_offset : Applies an offset counted in valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # The weekdays are Friday, Saturday, and Monday
+ ... np.is_busday(['2011-07-01', '2011-07-02', '2011-07-18'],
+ ... holidays=['2011-07-01', '2011-07-04', '2011-07-17'])
+ array([False, False, True])
+ """
+ return (dates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_offset)
+def busday_offset(dates, offsets, roll=None, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_offset(dates, offsets, roll='raise', weekmask='1111100', holidays=None, busdaycal=None, out=None)
+
+ First adjusts the date to fall on a valid day according to
+ the ``roll`` rule, then applies offsets to the given dates
+ counted in valid days.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ dates : array_like of datetime64[D]
+ The array of dates to process.
+ offsets : array_like of int
+ The array of offsets, which is broadcast with ``dates``.
+ roll : {'raise', 'nat', 'forward', 'following', 'backward', 'preceding', 'modifiedfollowing', 'modifiedpreceding'}, optional
+ How to treat dates that do not fall on a valid day. The default
+ is 'raise'.
+
+ * 'raise' means to raise an exception for an invalid day.
+ * 'nat' means to return a NaT (not-a-time) for an invalid day.
+ * 'forward' and 'following' mean to take the first valid day
+ later in time.
+ * 'backward' and 'preceding' mean to take the first valid day
+ earlier in time.
+ * 'modifiedfollowing' means to take the first valid day
+ later in time unless it is across a Month boundary, in which
+ case to take the first valid day earlier in time.
+ * 'modifiedpreceding' means to take the first valid day
+ earlier in time unless it is across a Month boundary, in which
+ case to take the first valid day later in time.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of datetime64[D], optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of datetime64[D]
+ An array with a shape from broadcasting ``dates`` and ``offsets``
+ together, containing the dates with offsets applied.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_count : Counts how many valid days are in a half-open date range.
+
+ Examples
+ --------
+ >>> # First business day in October 2011 (not accounting for holidays)
+ ... np.busday_offset('2011-10', 0, roll='forward')
+ numpy.datetime64('2011-10-03')
+ >>> # Last business day in February 2012 (not accounting for holidays)
+ ... np.busday_offset('2012-03', -1, roll='forward')
+ numpy.datetime64('2012-02-29')
+ >>> # Third Wednesday in January 2011
+ ... np.busday_offset('2011-01', 2, roll='forward', weekmask='Wed')
+ numpy.datetime64('2011-01-19')
+ >>> # 2012 Mother's Day in Canada and the U.S.
+ ... np.busday_offset('2012-05', 1, roll='forward', weekmask='Sun')
+ numpy.datetime64('2012-05-13')
+
+ >>> # First business day on or after a date
+ ... np.busday_offset('2011-03-20', 0, roll='forward')
+ numpy.datetime64('2011-03-21')
+ >>> np.busday_offset('2011-03-22', 0, roll='forward')
+ numpy.datetime64('2011-03-22')
+ >>> # First business day after a date
+ ... np.busday_offset('2011-03-20', 1, roll='backward')
+ numpy.datetime64('2011-03-21')
+ >>> np.busday_offset('2011-03-22', 1, roll='backward')
+ numpy.datetime64('2011-03-23')
+ """
+ return (dates, offsets, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(_multiarray_umath.busday_count)
+def busday_count(begindates, enddates, weekmask=None, holidays=None,
+ busdaycal=None, out=None):
+ """
+ busday_count(begindates, enddates, weekmask='1111100', holidays=[], busdaycal=None, out=None)
+
+ Counts the number of valid days between `begindates` and
+ `enddates`, not including the day of `enddates`.
+
+ If ``enddates`` specifies a date value that is earlier than the
+ corresponding ``begindates`` date value, the count will be negative.
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ begindates : array_like of datetime64[D]
+ The array of the first dates for counting.
+ enddates : array_like of datetime64[D]
+ The array of the end dates for counting, which are excluded
+ from the count themselves.
+ weekmask : str or array_like of bool, optional
+ A seven-element array indicating which of Monday through Sunday are
+ valid days. May be specified as a length-seven list or array, like
+ [1,1,1,1,1,0,0]; a length-seven string, like '1111100'; or a string
+ like "Mon Tue Wed Thu Fri", made up of 3-character abbreviations for
+ weekdays, optionally separated by white space. Valid abbreviations
+ are: Mon Tue Wed Thu Fri Sat Sun
+ holidays : array_like of datetime64[D], optional
+ An array of dates to consider as invalid dates. They may be
+ specified in any order, and NaT (not-a-time) dates are ignored.
+ This list is saved in a normalized form that is suited for
+ fast calculations of valid days.
+ busdaycal : busdaycalendar, optional
+ A `busdaycalendar` object which specifies the valid days. If this
+ parameter is provided, neither weekmask nor holidays may be
+ provided.
+ out : array of int, optional
+ If provided, this array is filled with the result.
+
+ Returns
+ -------
+ out : array of int
+ An array with a shape from broadcasting ``begindates`` and ``enddates``
+ together, containing the number of valid days between
+ the begin and end dates.
+
+ See Also
+ --------
+ busdaycalendar: An object that specifies a custom set of valid days.
+ is_busday : Returns a boolean array indicating valid days.
+ busday_offset : Applies an offset counted in valid days.
+
+ Examples
+ --------
+ >>> # Number of weekdays in January 2011
+ ... np.busday_count('2011-01', '2011-02')
+ 21
+ >>> # Number of weekdays in 2011
+ >>> np.busday_count('2011', '2012')
+ 260
+ >>> # Number of Saturdays in 2011
+ ... np.busday_count('2011', '2012', weekmask='Sat')
+ 53
+ """
+ return (begindates, enddates, weekmask, holidays, out)
+
+
+@array_function_from_c_func_and_dispatcher(
+ _multiarray_umath.datetime_as_string)
+def datetime_as_string(arr, unit=None, timezone=None, casting=None):
+ """
+ datetime_as_string(arr, unit=None, timezone='naive', casting='same_kind')
+
+ Convert an array of datetimes into an array of strings.
+
+ Parameters
+ ----------
+ arr : array_like of datetime64
+ The array of UTC timestamps to format.
+ unit : str
+ One of None, 'auto', or a :ref:`datetime unit <arrays.dtypes.dateunits>`.
+ timezone : {'naive', 'UTC', 'local'} or tzinfo
+ Timezone information to use when displaying the datetime. If 'UTC', end
+ with a Z to indicate UTC time. If 'local', convert to the local timezone
+ first, and suffix with a +-#### timezone offset. If a tzinfo object,
+ then do as with 'local', but use the specified timezone.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}
+ Casting to allow when changing between datetime units.
+
+ Returns
+ -------
+ str_arr : ndarray
+ An array of strings the same shape as `arr`.
+
+ Examples
+ --------
+ >>> import pytz
+ >>> d = np.arange('2002-10-27T04:30', 4*60, 60, dtype='M8[m]')
+ >>> d
+ array(['2002-10-27T04:30', '2002-10-27T05:30', '2002-10-27T06:30',
+ '2002-10-27T07:30'], dtype='datetime64[m]')
+
+ Setting the timezone to UTC shows the same information, but with a Z suffix
+
+ >>> np.datetime_as_string(d, timezone='UTC')
+ array(['2002-10-27T04:30Z', '2002-10-27T05:30Z', '2002-10-27T06:30Z',
+ '2002-10-27T07:30Z'], dtype='<U35')
+
+ Note that we picked datetimes that cross a DST boundary. Passing in a
+ ``pytz`` timezone object will print the appropriate offset
+
+ >>> np.datetime_as_string(d, timezone=pytz.timezone('US/Eastern'))
+ array(['2002-10-27T00:30-0400', '2002-10-27T01:30-0400',
+ '2002-10-27T01:30-0500', '2002-10-27T02:30-0500'], dtype='<U39')
+
+ Passing in a unit will change the precision
+
+ >>> np.datetime_as_string(d, unit='h')
+ array(['2002-10-27T04', '2002-10-27T05', '2002-10-27T06', '2002-10-27T07'],
+ dtype='<U32')
+ >>> np.datetime_as_string(d, unit='s')
+ array(['2002-10-27T04:30:00', '2002-10-27T05:30:00', '2002-10-27T06:30:00',
+ '2002-10-27T07:30:00'], dtype='<U38')
+
+ 'casting' can be used to specify whether precision can be changed
+ >>> np.datetime_as_string(d, unit='h', casting='safe')
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot create a datetime string as units 'h' from a NumPy
+ datetime with units 'm' according to the rule 'safe'
+ """
+ return (arr,)
diff --git a/numpy/core/numeric.py b/numpy/core/numeric.py
index 52c796ade..b5568fd86 100644
--- a/numpy/core/numeric.py
+++ b/numpy/core/numeric.py
@@ -1,18 +1,15 @@
from __future__ import division, absolute_import, print_function
-try:
- # Accessing collections abstract classes from collections
- # has been deprecated since Python 3.3
- import collections.abc as collections_abc
-except ImportError:
- import collections as collections_abc
+import functools
import itertools
import operator
import sys
import warnings
import numbers
+import contextlib
import numpy as np
+from numpy.compat import pickle, basestring
from . import multiarray
from .multiarray import (
_fastCopyAndTranspose as fastCopyAndTranspose, ALLOW_THREADS,
@@ -27,33 +24,29 @@ from .multiarray import (
if sys.version_info[0] < 3:
from .multiarray import newbuffer, getbuffer
+from . import overrides
from . import umath
-from .umath import (multiply, invert, sin, UFUNC_BUFSIZE_DEFAULT,
- ERR_IGNORE, ERR_WARN, ERR_RAISE, ERR_CALL, ERR_PRINT,
- ERR_LOG, ERR_DEFAULT, PINF, NAN)
+from . import shape_base
+from .overrides import set_module
+from .umath import (multiply, invert, sin, PINF, NAN)
from . import numerictypes
from .numerictypes import longlong, intc, int_, float_, complex_, bool_
-from ._internal import TooHardError, AxisError
+from ._exceptions import TooHardError, AxisError
+from ._asarray import asarray, asanyarray
+from ._ufunc_config import errstate
bitwise_not = invert
ufunc = type(sin)
newaxis = None
if sys.version_info[0] >= 3:
- import pickle
- basestring = str
import builtins
else:
- import cPickle as pickle
import __builtin__ as builtins
-def loads(*args, **kwargs):
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.core.numeric.loads is deprecated, use pickle.loads instead",
- DeprecationWarning, stacklevel=2)
- return pickle.loads(*args, **kwargs)
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
__all__ = [
@@ -62,15 +55,13 @@ __all__ = [
'fromstring', 'fromfile', 'frombuffer', 'int_asbuffer', 'where',
'argwhere', 'copyto', 'concatenate', 'fastCopyAndTranspose', 'lexsort',
'set_numeric_ops', 'can_cast', 'promote_types', 'min_scalar_type',
- 'result_type', 'asarray', 'asanyarray', 'ascontiguousarray',
- 'asfortranarray', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
+ 'result_type', 'isfortran', 'empty_like', 'zeros_like', 'ones_like',
'correlate', 'convolve', 'inner', 'dot', 'outer', 'vdot', 'roll',
- 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian', 'require',
+ 'rollaxis', 'moveaxis', 'cross', 'tensordot', 'little_endian',
'fromiter', 'array_equal', 'array_equiv', 'indices', 'fromfunction',
- 'isclose', 'load', 'loads', 'isscalar', 'binary_repr', 'base_repr', 'ones',
- 'identity', 'allclose', 'compare_chararrays', 'putmask', 'seterr',
- 'geterr', 'setbufsize', 'getbufsize', 'seterrcall', 'geterrcall',
- 'errstate', 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
+ 'isclose', 'isscalar', 'binary_repr', 'base_repr', 'ones',
+ 'identity', 'allclose', 'compare_chararrays', 'putmask',
+ 'flatnonzero', 'Inf', 'inf', 'infty', 'Infinity', 'nan', 'NaN',
'False_', 'True_', 'bitwise_not', 'CLIP', 'RAISE', 'WRAP', 'MAXDIMS',
'BUFSIZE', 'ALLOW_THREADS', 'ComplexWarning', 'full', 'full_like',
'matmul', 'shares_memory', 'may_share_memory', 'MAY_SHARE_BOUNDS',
@@ -80,6 +71,7 @@ if sys.version_info[0] < 3:
__all__.extend(['getbuffer', 'newbuffer'])
+@set_module('numpy')
class ComplexWarning(RuntimeWarning):
"""
The warning raised when casting a complex dtype to a real dtype.
@@ -91,7 +83,12 @@ class ComplexWarning(RuntimeWarning):
pass
-def zeros_like(a, dtype=None, order='K', subok=True):
+def _zeros_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
+ return (a,)
+
+
+@array_function_dispatch(_zeros_like_dispatcher)
+def zeros_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
@@ -115,6 +112,12 @@ def zeros_like(a, dtype=None, order='K', subok=True):
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -141,18 +144,19 @@ def zeros_like(a, dtype=None, order='K', subok=True):
>>> y = np.arange(3, dtype=float)
>>> y
- array([ 0., 1., 2.])
+ array([0., 1., 2.])
>>> np.zeros_like(y)
- array([ 0., 0., 0.])
+ array([0., 0., 0.])
"""
- res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
# needed instead of a 0 to get same result as zeros for for string dtypes
z = zeros(1, dtype=res.dtype)
multiarray.copyto(res, z, casting='unsafe')
return res
+@set_module('numpy')
def ones(shape, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with ones.
@@ -185,19 +189,19 @@ def ones(shape, dtype=None, order='C'):
Examples
--------
>>> np.ones(5)
- array([ 1., 1., 1., 1., 1.])
+ array([1., 1., 1., 1., 1.])
>>> np.ones((5,), dtype=int)
array([1, 1, 1, 1, 1])
>>> np.ones((2, 1))
- array([[ 1.],
- [ 1.]])
+ array([[1.],
+ [1.]])
>>> s = (2,2)
>>> np.ones(s)
- array([[ 1., 1.],
- [ 1., 1.]])
+ array([[1., 1.],
+ [1., 1.]])
"""
a = empty(shape, dtype, order)
@@ -205,7 +209,12 @@ def ones(shape, dtype=None, order='C'):
return a
-def ones_like(a, dtype=None, order='K', subok=True):
+def _ones_like_dispatcher(a, dtype=None, order=None, subok=None, shape=None):
+ return (a,)
+
+
+@array_function_dispatch(_ones_like_dispatcher)
+def ones_like(a, dtype=None, order='K', subok=True, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
@@ -229,6 +238,12 @@ def ones_like(a, dtype=None, order='K', subok=True):
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -255,16 +270,17 @@ def ones_like(a, dtype=None, order='K', subok=True):
>>> y = np.arange(3, dtype=float)
>>> y
- array([ 0., 1., 2.])
+ array([0., 1., 2.])
>>> np.ones_like(y)
- array([ 1., 1., 1.])
+ array([1., 1., 1.])
"""
- res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, 1, casting='unsafe')
return res
+@set_module('numpy')
def full(shape, fill_value, dtype=None, order='C'):
"""
Return a new array of given shape and type, filled with `fill_value`.
@@ -297,8 +313,8 @@ def full(shape, fill_value, dtype=None, order='C'):
Examples
--------
>>> np.full((2, 2), np.inf)
- array([[ inf, inf],
- [ inf, inf]])
+ array([[inf, inf],
+ [inf, inf]])
>>> np.full((2, 2), 10)
array([[10, 10],
[10, 10]])
@@ -311,7 +327,12 @@ def full(shape, fill_value, dtype=None, order='C'):
return a
-def full_like(a, fill_value, dtype=None, order='K', subok=True):
+def _full_like_dispatcher(a, fill_value, dtype=None, order=None, subok=None, shape=None):
+ return (a,)
+
+
+@array_function_dispatch(_full_like_dispatcher)
+def full_like(a, fill_value, dtype=None, order='K', subok=True, shape=None):
"""
Return a full array with the same shape and type as a given array.
@@ -333,6 +354,12 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
If True, then the newly created array will use the sub-class
type of 'a', otherwise it will be a base-class array. Defaults
to True.
+ shape : int or sequence of ints, optional.
+ Overrides the shape of the result. If order='K' and the number of
+ dimensions is unchanged, will try to keep order, otherwise,
+ order='C' is implied.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -354,20 +381,25 @@ def full_like(a, fill_value, dtype=None, order='K', subok=True):
>>> np.full_like(x, 0.1)
array([0, 0, 0, 0, 0, 0])
>>> np.full_like(x, 0.1, dtype=np.double)
- array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
>>> np.full_like(x, np.nan, dtype=np.double)
- array([ nan, nan, nan, nan, nan, nan])
+ array([nan, nan, nan, nan, nan, nan])
>>> y = np.arange(6, dtype=np.double)
>>> np.full_like(y, 0.1)
- array([ 0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
+ array([0.1, 0.1, 0.1, 0.1, 0.1, 0.1])
"""
- res = empty_like(a, dtype=dtype, order=order, subok=subok)
+ res = empty_like(a, dtype=dtype, order=order, subok=subok, shape=shape)
multiarray.copyto(res, fill_value, casting='unsafe')
return res
+def _count_nonzero_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_count_nonzero_dispatcher)
def count_nonzero(a, axis=None):
"""
Counts the number of non-zero values in the array ``a``.
@@ -430,304 +462,10 @@ def count_nonzero(a, axis=None):
return a_bool.sum(axis=axis, dtype=np.intp)
-def asarray(a, dtype=None, order=None):
- """Convert the input to an array.
-
- Parameters
- ----------
- a : array_like
- Input data, in any form that can be converted to an array. This
- includes lists, lists of tuples, tuples, tuples of tuples, tuples
- of lists and ndarrays.
- dtype : data-type, optional
- By default, the data-type is inferred from the input data.
- order : {'C', 'F'}, optional
- Whether to use row-major (C-style) or
- column-major (Fortran-style) memory representation.
- Defaults to 'C'.
-
- Returns
- -------
- out : ndarray
- Array interpretation of `a`. No copy is performed if the input
- is already an ndarray with matching dtype and order. If `a` is a
- subclass of ndarray, a base class ndarray is returned.
-
- See Also
- --------
- asanyarray : Similar function which passes through subclasses.
- ascontiguousarray : Convert input to a contiguous array.
- asfarray : Convert input to a floating point ndarray.
- asfortranarray : Convert input to an ndarray with column-major
- memory order.
- asarray_chkfinite : Similar function which checks input for NaNs and Infs.
- fromiter : Create an array from an iterator.
- fromfunction : Construct an array by executing a function on grid
- positions.
-
- Examples
- --------
- Convert a list into an array:
-
- >>> a = [1, 2]
- >>> np.asarray(a)
- array([1, 2])
-
- Existing arrays are not copied:
-
- >>> a = np.array([1, 2])
- >>> np.asarray(a) is a
- True
-
- If `dtype` is set, array is copied only if dtype does not match:
-
- >>> a = np.array([1, 2], dtype=np.float32)
- >>> np.asarray(a, dtype=np.float32) is a
- True
- >>> np.asarray(a, dtype=np.float64) is a
- False
-
- Contrary to `asanyarray`, ndarray subclasses are not passed through:
-
- >>> issubclass(np.recarray, np.ndarray)
- True
- >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
- >>> np.asarray(a) is a
- False
- >>> np.asanyarray(a) is a
- True
-
- """
- return array(a, dtype, copy=False, order=order)
-
-
-def asanyarray(a, dtype=None, order=None):
- """Convert the input to an ndarray, but pass ndarray subclasses through.
-
- Parameters
- ----------
- a : array_like
- Input data, in any form that can be converted to an array. This
- includes scalars, lists, lists of tuples, tuples, tuples of tuples,
- tuples of lists, and ndarrays.
- dtype : data-type, optional
- By default, the data-type is inferred from the input data.
- order : {'C', 'F'}, optional
- Whether to use row-major (C-style) or column-major
- (Fortran-style) memory representation. Defaults to 'C'.
-
- Returns
- -------
- out : ndarray or an ndarray subclass
- Array interpretation of `a`. If `a` is an ndarray or a subclass
- of ndarray, it is returned as-is and no copy is performed.
-
- See Also
- --------
- asarray : Similar function which always returns ndarrays.
- ascontiguousarray : Convert input to a contiguous array.
- asfarray : Convert input to a floating point ndarray.
- asfortranarray : Convert input to an ndarray with column-major
- memory order.
- asarray_chkfinite : Similar function which checks input for NaNs and
- Infs.
- fromiter : Create an array from an iterator.
- fromfunction : Construct an array by executing a function on grid
- positions.
-
- Examples
- --------
- Convert a list into an array:
-
- >>> a = [1, 2]
- >>> np.asanyarray(a)
- array([1, 2])
-
- Instances of `ndarray` subclasses are passed through as-is:
-
- >>> a = np.array([(1.0, 2), (3.0, 4)], dtype='f4,i4').view(np.recarray)
- >>> np.asanyarray(a) is a
- True
-
- """
- return array(a, dtype, copy=False, order=order, subok=True)
-
-
-def ascontiguousarray(a, dtype=None):
- """
- Return a contiguous array in memory (C order).
-
- Parameters
- ----------
- a : array_like
- Input array.
- dtype : str or dtype object, optional
- Data-type of returned array.
-
- Returns
- -------
- out : ndarray
- Contiguous array of same shape and content as `a`, with type `dtype`
- if specified.
-
- See Also
- --------
- asfortranarray : Convert input to an ndarray with column-major
- memory order.
- require : Return an ndarray that satisfies requirements.
- ndarray.flags : Information about the memory layout of the array.
-
- Examples
- --------
- >>> x = np.arange(6).reshape(2,3)
- >>> np.ascontiguousarray(x, dtype=np.float32)
- array([[ 0., 1., 2.],
- [ 3., 4., 5.]], dtype=float32)
- >>> x.flags['C_CONTIGUOUS']
- True
-
- """
- return array(a, dtype, copy=False, order='C', ndmin=1)
-
-
-def asfortranarray(a, dtype=None):
- """
- Return an array laid out in Fortran order in memory.
-
- Parameters
- ----------
- a : array_like
- Input array.
- dtype : str or dtype object, optional
- By default, the data-type is inferred from the input data.
-
- Returns
- -------
- out : ndarray
- The input `a` in Fortran, or column-major, order.
-
- See Also
- --------
- ascontiguousarray : Convert input to a contiguous (C order) array.
- asanyarray : Convert input to an ndarray with either row or
- column-major memory order.
- require : Return an ndarray that satisfies requirements.
- ndarray.flags : Information about the memory layout of the array.
-
- Examples
- --------
- >>> x = np.arange(6).reshape(2,3)
- >>> y = np.asfortranarray(x)
- >>> x.flags['F_CONTIGUOUS']
- False
- >>> y.flags['F_CONTIGUOUS']
- True
-
- """
- return array(a, dtype, copy=False, order='F', ndmin=1)
-
-
-def require(a, dtype=None, requirements=None):
- """
- Return an ndarray of the provided type that satisfies requirements.
-
- This function is useful to be sure that an array with the correct flags
- is returned for passing to compiled code (perhaps through ctypes).
-
- Parameters
- ----------
- a : array_like
- The object to be converted to a type-and-requirement-satisfying array.
- dtype : data-type
- The required data-type. If None preserve the current dtype. If your
- application requires the data to be in native byteorder, include
- a byteorder specification as a part of the dtype specification.
- requirements : str or list of str
- The requirements list can be any of the following
-
- * 'F_CONTIGUOUS' ('F') - ensure a Fortran-contiguous array
- * 'C_CONTIGUOUS' ('C') - ensure a C-contiguous array
- * 'ALIGNED' ('A') - ensure a data-type aligned array
- * 'WRITEABLE' ('W') - ensure a writable array
- * 'OWNDATA' ('O') - ensure an array that owns its own data
- * 'ENSUREARRAY', ('E') - ensure a base array, instead of a subclass
-
- See Also
- --------
- asarray : Convert input to an ndarray.
- asanyarray : Convert to an ndarray, but pass through ndarray subclasses.
- ascontiguousarray : Convert input to a contiguous array.
- asfortranarray : Convert input to an ndarray with column-major
- memory order.
- ndarray.flags : Information about the memory layout of the array.
-
- Notes
- -----
- The returned array will be guaranteed to have the listed requirements
- by making a copy if needed.
-
- Examples
- --------
- >>> x = np.arange(6).reshape(2,3)
- >>> x.flags
- C_CONTIGUOUS : True
- F_CONTIGUOUS : False
- OWNDATA : False
- WRITEABLE : True
- ALIGNED : True
- WRITEBACKIFCOPY : False
- UPDATEIFCOPY : False
-
- >>> y = np.require(x, dtype=np.float32, requirements=['A', 'O', 'W', 'F'])
- >>> y.flags
- C_CONTIGUOUS : False
- F_CONTIGUOUS : True
- OWNDATA : True
- WRITEABLE : True
- ALIGNED : True
- WRITEBACKIFCOPY : False
- UPDATEIFCOPY : False
-
- """
- possible_flags = {'C': 'C', 'C_CONTIGUOUS': 'C', 'CONTIGUOUS': 'C',
- 'F': 'F', 'F_CONTIGUOUS': 'F', 'FORTRAN': 'F',
- 'A': 'A', 'ALIGNED': 'A',
- 'W': 'W', 'WRITEABLE': 'W',
- 'O': 'O', 'OWNDATA': 'O',
- 'E': 'E', 'ENSUREARRAY': 'E'}
- if not requirements:
- return asanyarray(a, dtype=dtype)
- else:
- requirements = set(possible_flags[x.upper()] for x in requirements)
-
- if 'E' in requirements:
- requirements.remove('E')
- subok = False
- else:
- subok = True
-
- order = 'A'
- if requirements >= set(['C', 'F']):
- raise ValueError('Cannot specify both "C" and "F" order')
- elif 'F' in requirements:
- order = 'F'
- requirements.remove('F')
- elif 'C' in requirements:
- order = 'C'
- requirements.remove('C')
-
- arr = array(a, dtype=dtype, order=order, copy=False, subok=subok)
-
- for prop in requirements:
- if not arr.flags[prop]:
- arr = arr.copy(order)
- break
- return arr
-
-
+@set_module('numpy')
def isfortran(a):
"""
- Returns True if the array is Fortran contiguous but *not* C contiguous.
+ Check if the array is Fortran contiguous but *not* C contiguous.
This function is obsolete and, because of changes due to relaxed stride
checking, its return value for the same array may differ for versions
@@ -739,6 +477,11 @@ def isfortran(a):
a : ndarray
Input array.
+ Returns
+ -------
+ isfortran : bool
+ Returns True if the array is Fortran contiguous but *not* C contiguous.
+
Examples
--------
@@ -754,7 +497,7 @@ def isfortran(a):
>>> np.isfortran(a)
False
- >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='FORTRAN')
+ >>> b = np.array([[1, 2, 3], [4, 5, 6]], order='F')
>>> b
array([[1, 2, 3],
[4, 5, 6]])
@@ -780,13 +523,18 @@ def isfortran(a):
C-ordered arrays evaluate as False even if they are also FORTRAN-ordered.
- >>> np.isfortran(np.array([1, 2], order='FORTRAN'))
+ >>> np.isfortran(np.array([1, 2], order='F'))
False
"""
return a.flags.fnc
+def _argwhere_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_argwhere_dispatcher)
def argwhere(a):
"""
Find the indices of array elements that are non-zero, grouped by element.
@@ -798,8 +546,10 @@ def argwhere(a):
Returns
-------
- index_array : ndarray
+ index_array : (N, a.ndim) ndarray
Indices of elements that are non-zero. Indices are grouped by element.
+ This array will have shape ``(N, a.ndim)`` where ``N`` is the number of
+ non-zero items.
See Also
--------
@@ -807,7 +557,8 @@ def argwhere(a):
Notes
-----
- ``np.argwhere(a)`` is the same as ``np.transpose(np.nonzero(a))``.
+ ``np.argwhere(a)`` is almost the same as ``np.transpose(np.nonzero(a))``,
+ but produces a result of the correct shape for a 0D array.
The output of ``argwhere`` is not suitable for indexing arrays.
For this purpose use ``nonzero(a)`` instead.
@@ -825,9 +576,19 @@ def argwhere(a):
[1, 2]])
"""
+ # nonzero does not behave well on 0d, so promote to 1d
+ if np.ndim(a) == 0:
+ a = shape_base.atleast_1d(a)
+ # then remove the added dimension
+ return argwhere(a)[:,:0]
return transpose(nonzero(a))
+def _flatnonzero_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_flatnonzero_dispatcher)
def flatnonzero(a):
"""
Return indices that are non-zero in the flattened version of a.
@@ -879,6 +640,11 @@ def _mode_from_name(mode):
return mode
+def _correlate_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_correlate_dispatcher)
def correlate(a, v, mode='valid'):
"""
Cross-correlation of two 1-dimensional sequences.
@@ -924,11 +690,11 @@ def correlate(a, v, mode='valid'):
Examples
--------
>>> np.correlate([1, 2, 3], [0, 1, 0.5])
- array([ 3.5])
+ array([3.5])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "same")
- array([ 2. , 3.5, 3. ])
+ array([2. , 3.5, 3. ])
>>> np.correlate([1, 2, 3], [0, 1, 0.5], "full")
- array([ 0.5, 2. , 3.5, 3. , 0. ])
+ array([0.5, 2. , 3.5, 3. , 0. ])
Using complex sequences:
@@ -947,6 +713,11 @@ def correlate(a, v, mode='valid'):
return multiarray.correlate2(a, v, mode)
+def _convolve_dispatcher(a, v, mode=None):
+ return (a, v)
+
+
+@array_function_dispatch(_convolve_dispatcher)
def convolve(a, v, mode='full'):
"""
Returns the discrete, linear convolution of two one-dimensional sequences.
@@ -1019,20 +790,20 @@ def convolve(a, v, mode='full'):
before "sliding" the two across one another:
>>> np.convolve([1, 2, 3], [0, 1, 0.5])
- array([ 0. , 1. , 2.5, 4. , 1.5])
+ array([0. , 1. , 2.5, 4. , 1.5])
Only return the middle values of the convolution.
Contains boundary effects, where zeros are taken
into account:
>>> np.convolve([1,2,3],[0,1,0.5], 'same')
- array([ 1. , 2.5, 4. ])
+ array([1. , 2.5, 4. ])
The two arrays are of the same length, so there
is only one position where they completely overlap:
>>> np.convolve([1,2,3],[0,1,0.5], 'valid')
- array([ 2.5])
+ array([2.5])
"""
a, v = array(a, copy=False, ndmin=1), array(v, copy=False, ndmin=1)
@@ -1046,6 +817,11 @@ def convolve(a, v, mode='full'):
return multiarray.correlate(a, v[::-1], mode)
+def _outer_dispatcher(a, b, out=None):
+ return (a, b, out)
+
+
+@array_function_dispatch(_outer_dispatcher)
def outer(a, b, out=None):
"""
Compute the outer product of two vectors.
@@ -1103,11 +879,11 @@ def outer(a, b, out=None):
[-2., -1., 0., 1., 2.]])
>>> im = np.outer(1j*np.linspace(2, -2, 5), np.ones((5,)))
>>> im
- array([[ 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
- [ 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
- [ 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
+ array([[0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j, 0.+2.j],
+ [0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j, 0.+1.j],
+ [0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
+ [0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j, 0.-1.j],
+ [0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j, 0.-2.j]])
>>> grid = rl + im
>>> grid
array([[-2.+2.j, -1.+2.j, 0.+2.j, 1.+2.j, 2.+2.j],
@@ -1120,9 +896,9 @@ def outer(a, b, out=None):
>>> x = np.array(['a', 'b', 'c'], dtype=object)
>>> np.outer(x, [1, 2, 3])
- array([[a, aa, aaa],
- [b, bb, bbb],
- [c, cc, ccc]], dtype=object)
+ array([['a', 'aa', 'aaa'],
+ ['b', 'bb', 'bbb'],
+ ['c', 'cc', 'ccc']], dtype=object)
"""
a = asarray(a)
@@ -1130,21 +906,25 @@ def outer(a, b, out=None):
return multiply(a.ravel()[:, newaxis], b.ravel()[newaxis, :], out)
+def _tensordot_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensordot_dispatcher)
def tensordot(a, b, axes=2):
- """Compute tensor dot product along specified axes for arrays >= 1-D.
+ """
+ Compute tensor dot product along specified axes.
- Given two tensors (arrays of dimension greater than or equal to one),
- `a` and `b`, and an array_like object containing two array_like
- objects, ``(a_axes, b_axes)``, sum the products of `a`'s and `b`'s
- elements (components) over the axes specified by ``a_axes`` and
- ``b_axes``. The third argument can be a single non-negative
- integer_like scalar, ``N``; if it is such, then the last ``N``
- dimensions of `a` and the first ``N`` dimensions of `b` are summed
- over.
+ Given two tensors, `a` and `b`, and an array_like object containing
+ two array_like objects, ``(a_axes, b_axes)``, sum the products of
+ `a`'s and `b`'s elements (components) over the axes specified by
+ ``a_axes`` and ``b_axes``. The third argument can be a single non-negative
+ integer_like scalar, ``N``; if it is such, then the last ``N`` dimensions
+ of `a` and the first ``N`` dimensions of `b` are summed over.
Parameters
----------
- a, b : array_like, len(shape) >= 1
+ a, b : array_like
Tensors to "dot".
axes : int or (2,) array_like
@@ -1155,6 +935,11 @@ def tensordot(a, b, axes=2):
Or, a list of axes to be summed over, first sequence applying to `a`,
second to `b`. Both elements array_like must be of the same length.
+ Returns
+ -------
+ output : ndarray
+ The tensor dot product of the input.
+
See Also
--------
dot, einsum
@@ -1188,11 +973,11 @@ def tensordot(a, b, axes=2):
>>> c.shape
(5, 2)
>>> c
- array([[ 4400., 4730.],
- [ 4532., 4874.],
- [ 4664., 5018.],
- [ 4796., 5162.],
- [ 4928., 5306.]])
+ array([[4400., 4730.],
+ [4532., 4874.],
+ [4664., 5018.],
+ [4796., 5162.],
+ [4928., 5306.]])
>>> # A slower but equivalent way of computing the same...
>>> d = np.zeros((5,2))
>>> for i in range(5):
@@ -1218,40 +1003,40 @@ def tensordot(a, b, axes=2):
[3, 4]],
[[5, 6],
[7, 8]]])
- array([[a, b],
- [c, d]], dtype=object)
+ array([['a', 'b'],
+ ['c', 'd']], dtype=object)
>>> np.tensordot(a, A) # third argument default is 2 for double-contraction
- array([abbcccdddd, aaaaabbbbbbcccccccdddddddd], dtype=object)
+ array(['abbcccdddd', 'aaaaabbbbbbcccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, 1)
- array([[[acc, bdd],
- [aaacccc, bbbdddd]],
- [[aaaaacccccc, bbbbbdddddd],
- [aaaaaaacccccccc, bbbbbbbdddddddd]]], dtype=object)
+ array([[['acc', 'bdd'],
+ ['aaacccc', 'bbbdddd']],
+ [['aaaaacccccc', 'bbbbbdddddd'],
+ ['aaaaaaacccccccc', 'bbbbbbbdddddddd']]], dtype=object)
>>> np.tensordot(a, A, 0) # tensor product (result too long to incl.)
- array([[[[[a, b],
- [c, d]],
+ array([[[[['a', 'b'],
+ ['c', 'd']],
...
>>> np.tensordot(a, A, (0, 1))
- array([[[abbbbb, cddddd],
- [aabbbbbb, ccdddddd]],
- [[aaabbbbbbb, cccddddddd],
- [aaaabbbbbbbb, ccccdddddddd]]], dtype=object)
+ array([[['abbbbb', 'cddddd'],
+ ['aabbbbbb', 'ccdddddd']],
+ [['aaabbbbbbb', 'cccddddddd'],
+ ['aaaabbbbbbbb', 'ccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, (2, 1))
- array([[[abb, cdd],
- [aaabbbb, cccdddd]],
- [[aaaaabbbbbb, cccccdddddd],
- [aaaaaaabbbbbbbb, cccccccdddddddd]]], dtype=object)
+ array([[['abb', 'cdd'],
+ ['aaabbbb', 'cccdddd']],
+ [['aaaaabbbbbb', 'cccccdddddd'],
+ ['aaaaaaabbbbbbbb', 'cccccccdddddddd']]], dtype=object)
>>> np.tensordot(a, A, ((0, 1), (0, 1)))
- array([abbbcccccddddddd, aabbbbccccccdddddddd], dtype=object)
+ array(['abbbcccccddddddd', 'aabbbbccccccdddddddd'], dtype=object)
>>> np.tensordot(a, A, ((2, 1), (1, 0)))
- array([acccbbdddd, aaaaacccccccbbbbbbdddddddd], dtype=object)
+ array(['acccbbdddd', 'aaaaacccccccbbbbbbdddddddd'], dtype=object)
"""
try:
@@ -1318,6 +1103,11 @@ def tensordot(a, b, axes=2):
return res.reshape(olda + oldb)
+def _roll_dispatcher(a, shift, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_roll_dispatcher)
def roll(a, shift, axis=None):
"""
Roll array elements along a given axis.
@@ -1361,6 +1151,8 @@ def roll(a, shift, axis=None):
>>> x = np.arange(10)
>>> np.roll(x, 2)
array([8, 9, 0, 1, 2, 3, 4, 5, 6, 7])
+ >>> np.roll(x, -2)
+ array([2, 3, 4, 5, 6, 7, 8, 9, 0, 1])
>>> x2 = np.reshape(x, (2,5))
>>> x2
@@ -1369,12 +1161,21 @@ def roll(a, shift, axis=None):
>>> np.roll(x2, 1)
array([[9, 0, 1, 2, 3],
[4, 5, 6, 7, 8]])
+ >>> np.roll(x2, -1)
+ array([[1, 2, 3, 4, 5],
+ [6, 7, 8, 9, 0]])
>>> np.roll(x2, 1, axis=0)
array([[5, 6, 7, 8, 9],
[0, 1, 2, 3, 4]])
+ >>> np.roll(x2, -1, axis=0)
+ array([[5, 6, 7, 8, 9],
+ [0, 1, 2, 3, 4]])
>>> np.roll(x2, 1, axis=1)
array([[4, 0, 1, 2, 3],
[9, 5, 6, 7, 8]])
+ >>> np.roll(x2, -1, axis=1)
+ array([[1, 2, 3, 4, 0],
+ [6, 7, 8, 9, 5]])
"""
a = asanyarray(a)
@@ -1407,6 +1208,11 @@ def roll(a, shift, axis=None):
return result
+def _rollaxis_dispatcher(a, axis, start=None):
+ return (a,)
+
+
+@array_function_dispatch(_rollaxis_dispatcher)
def rollaxis(a, axis, start=0):
"""
Roll the specified axis backwards, until it lies in a given position.
@@ -1527,6 +1333,11 @@ def normalize_axis_tuple(axis, ndim, argname=None, allow_duplicate=False):
return axis
+def _moveaxis_dispatcher(a, source, destination):
+ return (a,)
+
+
+@array_function_dispatch(_moveaxis_dispatcher)
def moveaxis(a, source, destination):
"""
Move axes of an array to new positions.
@@ -1603,6 +1414,11 @@ def _move_axis_to_0(a, axis):
return moveaxis(a, axis, 0)
+def _cross_dispatcher(a, b, axisa=None, axisb=None, axisc=None, axis=None):
+ return (a, b)
+
+
+@array_function_dispatch(_cross_dispatcher)
def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
"""
Return the cross product of two (arrays of) vectors.
@@ -1684,7 +1500,7 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
>>> x = [1,2]
>>> y = [4,5]
>>> np.cross(x, y)
- -3
+ array(-3)
Multiple vector cross-products. Note that the direction of the cross
product vector is defined by the `right-hand rule`.
@@ -1803,11 +1619,12 @@ def cross(a, b, axisa=-1, axisb=-1, axisc=-1, axis=None):
little_endian = (sys.byteorder == 'little')
-def indices(dimensions, dtype=int):
+@set_module('numpy')
+def indices(dimensions, dtype=int, sparse=False):
"""
Return an array representing the indices of a grid.
- Compute an array where the subarrays contain index values 0,1,...
+ Compute an array where the subarrays contain index values 0, 1, ...
varying only along the corresponding axis.
Parameters
@@ -1816,28 +1633,38 @@ def indices(dimensions, dtype=int):
The shape of the grid.
dtype : dtype, optional
Data type of the result.
+ sparse : boolean, optional
+ Return a sparse representation of the grid instead of a dense
+ representation. Default is False.
+
+ .. versionadded:: 1.17
Returns
-------
- grid : ndarray
- The array of grid indices,
- ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+ grid : one ndarray or tuple of ndarrays
+ If sparse is False:
+ Returns one array of grid indices,
+ ``grid.shape = (len(dimensions),) + tuple(dimensions)``.
+ If sparse is True:
+ Returns a tuple of arrays, with
+ ``grid[i].shape = (1, ..., 1, dimensions[i], 1, ..., 1)`` with
+ dimensions[i] in the ith place
See Also
--------
- mgrid, meshgrid
+ mgrid, ogrid, meshgrid
Notes
-----
- The output shape is obtained by prepending the number of dimensions
- in front of the tuple of dimensions, i.e. if `dimensions` is a tuple
- ``(r0, ..., rN-1)`` of length ``N``, the output shape is
- ``(N,r0,...,rN-1)``.
+ The output shape in the dense case is obtained by prepending the number
+ of dimensions in front of the tuple of dimensions, i.e. if `dimensions`
+ is a tuple ``(r0, ..., rN-1)`` of length ``N``, the output shape is
+ ``(N, r0, ..., rN-1)``.
The subarrays ``grid[k]`` contains the N-D array of indices along the
``k-th`` axis. Explicitly::
- grid[k,i0,i1,...,iN-1] = ik
+ grid[k, i0, i1, ..., iN-1] = ik
Examples
--------
@@ -1862,18 +1689,40 @@ def indices(dimensions, dtype=int):
Note that it would be more straightforward in the above example to
extract the required elements directly with ``x[:2, :3]``.
+ If sparse is set to true, the grid will be returned in a sparse
+ representation.
+
+ >>> i, j = np.indices((2, 3), sparse=True)
+ >>> i.shape
+ (2, 1)
+ >>> j.shape
+ (1, 3)
+ >>> i # row indices
+ array([[0],
+ [1]])
+ >>> j # column indices
+ array([[0, 1, 2]])
+
"""
dimensions = tuple(dimensions)
N = len(dimensions)
shape = (1,)*N
- res = empty((N,)+dimensions, dtype=dtype)
+ if sparse:
+ res = tuple()
+ else:
+ res = empty((N,)+dimensions, dtype=dtype)
for i, dim in enumerate(dimensions):
- res[i] = arange(dim, dtype=dtype).reshape(
+ idx = arange(dim, dtype=dtype).reshape(
shape[:i] + (dim,) + shape[i+1:]
)
+ if sparse:
+ res = res + (idx,)
+ else:
+ res[i] = idx
return res
+@set_module('numpy')
def fromfunction(function, shape, **kwargs):
"""
Construct an array by executing a function over each coordinate.
@@ -1930,6 +1779,11 @@ def fromfunction(function, shape, **kwargs):
return function(*args, **kwargs)
+def _frombuffer(buf, dtype, shape, order):
+ return frombuffer(buf, dtype=dtype).reshape(shape, order=order)
+
+
+@set_module('numpy')
def isscalar(num):
"""
Returns True if the type of `num` is a scalar type.
@@ -1994,10 +1848,10 @@ def isscalar(num):
NumPy supports PEP 3141 numbers:
>>> from fractions import Fraction
- >>> isscalar(Fraction(5, 17))
+ >>> np.isscalar(Fraction(5, 17))
True
>>> from numbers import Number
- >>> isscalar(Number())
+ >>> np.isscalar(Number())
True
"""
@@ -2006,6 +1860,7 @@ def isscalar(num):
or isinstance(num, numbers.Number))
+@set_module('numpy')
def binary_repr(num, width=None):
"""
Return the binary representation of the input number as a string.
@@ -2084,6 +1939,10 @@ def binary_repr(num, width=None):
"will raise an error in the future.", DeprecationWarning,
stacklevel=3)
+ # Ensure that num is a Python integer to avoid overflow or unwanted
+ # casts to floating point.
+ num = operator.index(num)
+
if num == 0:
return '0' * (width or 1)
@@ -2116,6 +1975,7 @@ def binary_repr(num, width=None):
return '1' * (outwidth - binwidth) + binary
+@set_module('numpy')
def base_repr(number, base=2, padding=0):
"""
Return a string representation of a number in the given base system.
@@ -2172,29 +2032,6 @@ def base_repr(number, base=2, padding=0):
return ''.join(reversed(res or '0'))
-def load(file):
- """
- Wrapper around cPickle.load which accepts either a file-like object or
- a filename.
-
- Note that the NumPy binary format is not based on pickle/cPickle anymore.
- For details on the preferred way of loading and saving files, see `load`
- and `save`.
-
- See Also
- --------
- load, save
-
- """
- # NumPy 1.15.0, 2017-12-10
- warnings.warn(
- "np.core.numeric.load is deprecated, use pickle.load instead",
- DeprecationWarning, stacklevel=2)
- if isinstance(file, type("")):
- file = open(file, "rb")
- return pickle.load(file)
-
-
# These are all essentially abbreviations
# These might wind up in a special abbreviations module
@@ -2210,6 +2047,7 @@ def _maketup(descr, val):
return tuple(res)
+@set_module('numpy')
def identity(n, dtype=None):
"""
Return the identity array.
@@ -2233,15 +2071,20 @@ def identity(n, dtype=None):
Examples
--------
>>> np.identity(3)
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
"""
from numpy import eye
return eye(n, dtype=dtype)
+def _allclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_allclose_dispatcher)
def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns True if two arrays are element-wise equal within a tolerance.
@@ -2265,7 +2108,7 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
The absolute tolerance parameter (see Notes).
equal_nan : bool
Whether to compare NaN's as equal. If True, NaN's in `a` will be
- considered equal to NaN's in `b` in the output array.
+ considered equal to NaN's in `b`.
.. versionadded:: 1.10.0
@@ -2313,6 +2156,11 @@ def allclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
return bool(res)
+def _isclose_dispatcher(a, b, rtol=None, atol=None, equal_nan=None):
+ return (a, b)
+
+
+@array_function_dispatch(_isclose_dispatcher)
def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
"""
Returns a boolean array where two arrays are element-wise equal within a
@@ -2371,23 +2219,23 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
Examples
--------
>>> np.isclose([1e10,1e-7], [1.00001e10,1e-8])
- array([True, False])
+ array([ True, False])
>>> np.isclose([1e10,1e-8], [1.00001e10,1e-9])
- array([True, True])
+ array([ True, True])
>>> np.isclose([1e10,1e-8], [1.0001e10,1e-9])
- array([False, True])
+ array([False, True])
>>> np.isclose([1.0, np.nan], [1.0, np.nan])
- array([True, False])
+ array([ True, False])
>>> np.isclose([1.0, np.nan], [1.0, np.nan], equal_nan=True)
- array([True, True])
+ array([ True, True])
>>> np.isclose([1e-8, 1e-7], [0.0, 0.0])
- array([ True, False], dtype=bool)
+ array([ True, False])
>>> np.isclose([1e-100, 1e-7], [0.0, 0.0], atol=0.0)
- array([False, False], dtype=bool)
+ array([False, False])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.0])
- array([ True, True], dtype=bool)
+ array([ True, True])
>>> np.isclose([1e-10, 1e-10], [1e-20, 0.999999e-10], atol=0.0)
- array([False, True], dtype=bool)
+ array([False, True])
"""
def within_tol(x, y, atol, rtol):
with errstate(invalid='ignore'):
@@ -2428,6 +2276,11 @@ def isclose(a, b, rtol=1.e-5, atol=1.e-8, equal_nan=False):
return cond[()] # Flatten 0d arrays to scalars
+def _array_equal_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equal_dispatcher)
def array_equal(a1, a2):
"""
True if two arrays have the same shape and elements, False otherwise.
@@ -2470,6 +2323,11 @@ def array_equal(a1, a2):
return bool(asarray(a1 == a2).all())
+def _array_equiv_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_array_equiv_dispatcher)
def array_equiv(a1, a2):
"""
Returns True if input arrays are shape consistent and all elements equal.
@@ -2517,433 +2375,6 @@ def array_equiv(a1, a2):
return bool(asarray(a1 == a2).all())
-_errdict = {"ignore": ERR_IGNORE,
- "warn": ERR_WARN,
- "raise": ERR_RAISE,
- "call": ERR_CALL,
- "print": ERR_PRINT,
- "log": ERR_LOG}
-
-_errdict_rev = {}
-for key in _errdict.keys():
- _errdict_rev[_errdict[key]] = key
-del key
-
-
-def seterr(all=None, divide=None, over=None, under=None, invalid=None):
- """
- Set how floating-point errors are handled.
-
- Note that operations on integer scalar types (such as `int16`) are
- handled like floating point, and are affected by these settings.
-
- Parameters
- ----------
- all : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
- Set treatment for all types of floating-point errors at once:
-
- - ignore: Take no action when the exception occurs.
- - warn: Print a `RuntimeWarning` (via the Python `warnings` module).
- - raise: Raise a `FloatingPointError`.
- - call: Call a function specified using the `seterrcall` function.
- - print: Print a warning directly to ``stdout``.
- - log: Record error in a Log object specified by `seterrcall`.
-
- The default is not to change the current behavior.
- divide : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
- Treatment for division by zero.
- over : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
- Treatment for floating-point overflow.
- under : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
- Treatment for floating-point underflow.
- invalid : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}, optional
- Treatment for invalid floating-point operation.
-
- Returns
- -------
- old_settings : dict
- Dictionary containing the old settings.
-
- See also
- --------
- seterrcall : Set a callback function for the 'call' mode.
- geterr, geterrcall, errstate
-
- Notes
- -----
- The floating-point exceptions are defined in the IEEE 754 standard [1]_:
-
- - Division by zero: infinite result obtained from finite numbers.
- - Overflow: result too large to be expressed.
- - Underflow: result so close to zero that some precision
- was lost.
- - Invalid operation: result is not an expressible number, typically
- indicates that a NaN was produced.
-
- .. [1] https://en.wikipedia.org/wiki/IEEE_754
-
- Examples
- --------
- >>> old_settings = np.seterr(all='ignore') #seterr to known value
- >>> np.seterr(over='raise')
- {'over': 'ignore', 'divide': 'ignore', 'invalid': 'ignore',
- 'under': 'ignore'}
- >>> np.seterr(**old_settings) # reset to default
- {'over': 'raise', 'divide': 'ignore', 'invalid': 'ignore',
- 'under': 'ignore'}
-
- >>> np.int16(32000) * np.int16(3)
- 30464
- >>> old_settings = np.seterr(all='warn', over='raise')
- >>> np.int16(32000) * np.int16(3)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- FloatingPointError: overflow encountered in short_scalars
-
- >>> old_settings = np.seterr(all='print')
- >>> np.geterr()
- {'over': 'print', 'divide': 'print', 'invalid': 'print', 'under': 'print'}
- >>> np.int16(32000) * np.int16(3)
- Warning: overflow encountered in short_scalars
- 30464
-
- """
-
- pyvals = umath.geterrobj()
- old = geterr()
-
- if divide is None:
- divide = all or old['divide']
- if over is None:
- over = all or old['over']
- if under is None:
- under = all or old['under']
- if invalid is None:
- invalid = all or old['invalid']
-
- maskvalue = ((_errdict[divide] << SHIFT_DIVIDEBYZERO) +
- (_errdict[over] << SHIFT_OVERFLOW) +
- (_errdict[under] << SHIFT_UNDERFLOW) +
- (_errdict[invalid] << SHIFT_INVALID))
-
- pyvals[1] = maskvalue
- umath.seterrobj(pyvals)
- return old
-
-
-def geterr():
- """
- Get the current way of handling floating-point errors.
-
- Returns
- -------
- res : dict
- A dictionary with keys "divide", "over", "under", and "invalid",
- whose values are from the strings "ignore", "print", "log", "warn",
- "raise", and "call". The keys represent possible floating-point
- exceptions, and the values define how these exceptions are handled.
-
- See Also
- --------
- geterrcall, seterr, seterrcall
-
- Notes
- -----
- For complete documentation of the types of floating-point exceptions and
- treatment options, see `seterr`.
-
- Examples
- --------
- >>> np.geterr()
- {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
- 'under': 'ignore'}
- >>> np.arange(3.) / np.arange(3.)
- array([ NaN, 1., 1.])
-
- >>> oldsettings = np.seterr(all='warn', over='raise')
- >>> np.geterr()
- {'over': 'raise', 'divide': 'warn', 'invalid': 'warn', 'under': 'warn'}
- >>> np.arange(3.) / np.arange(3.)
- __main__:1: RuntimeWarning: invalid value encountered in divide
- array([ NaN, 1., 1.])
-
- """
- maskvalue = umath.geterrobj()[1]
- mask = 7
- res = {}
- val = (maskvalue >> SHIFT_DIVIDEBYZERO) & mask
- res['divide'] = _errdict_rev[val]
- val = (maskvalue >> SHIFT_OVERFLOW) & mask
- res['over'] = _errdict_rev[val]
- val = (maskvalue >> SHIFT_UNDERFLOW) & mask
- res['under'] = _errdict_rev[val]
- val = (maskvalue >> SHIFT_INVALID) & mask
- res['invalid'] = _errdict_rev[val]
- return res
-
-
-def setbufsize(size):
- """
- Set the size of the buffer used in ufuncs.
-
- Parameters
- ----------
- size : int
- Size of buffer.
-
- """
- if size > 10e6:
- raise ValueError("Buffer size, %s, is too big." % size)
- if size < 5:
- raise ValueError("Buffer size, %s, is too small." % size)
- if size % 16 != 0:
- raise ValueError("Buffer size, %s, is not a multiple of 16." % size)
-
- pyvals = umath.geterrobj()
- old = getbufsize()
- pyvals[0] = size
- umath.seterrobj(pyvals)
- return old
-
-
-def getbufsize():
- """
- Return the size of the buffer used in ufuncs.
-
- Returns
- -------
- getbufsize : int
- Size of ufunc buffer in bytes.
-
- """
- return umath.geterrobj()[0]
-
-
-def seterrcall(func):
- """
- Set the floating-point error callback function or log object.
-
- There are two ways to capture floating-point error messages. The first
- is to set the error-handler to 'call', using `seterr`. Then, set
- the function to call using this function.
-
- The second is to set the error-handler to 'log', using `seterr`.
- Floating-point errors then trigger a call to the 'write' method of
- the provided object.
-
- Parameters
- ----------
- func : callable f(err, flag) or object with write method
- Function to call upon floating-point errors ('call'-mode) or
- object whose 'write' method is used to log such message ('log'-mode).
-
- The call function takes two arguments. The first is a string describing
- the type of error (such as "divide by zero", "overflow", "underflow",
- or "invalid value"), and the second is the status flag. The flag is a
- byte, whose four least-significant bits indicate the type of error, one
- of "divide", "over", "under", "invalid"::
-
- [0 0 0 0 divide over under invalid]
-
- In other words, ``flags = divide + 2*over + 4*under + 8*invalid``.
-
- If an object is provided, its write method should take one argument,
- a string.
-
- Returns
- -------
- h : callable, log instance or None
- The old error handler.
-
- See Also
- --------
- seterr, geterr, geterrcall
-
- Examples
- --------
- Callback upon error:
-
- >>> def err_handler(type, flag):
- ... print("Floating point error (%s), with flag %s" % (type, flag))
- ...
-
- >>> saved_handler = np.seterrcall(err_handler)
- >>> save_err = np.seterr(all='call')
-
- >>> np.array([1, 2, 3]) / 0.0
- Floating point error (divide by zero), with flag 1
- array([ Inf, Inf, Inf])
-
- >>> np.seterrcall(saved_handler)
- <function err_handler at 0x...>
- >>> np.seterr(**save_err)
- {'over': 'call', 'divide': 'call', 'invalid': 'call', 'under': 'call'}
-
- Log error message:
-
- >>> class Log(object):
- ... def write(self, msg):
- ... print("LOG: %s" % msg)
- ...
-
- >>> log = Log()
- >>> saved_handler = np.seterrcall(log)
- >>> save_err = np.seterr(all='log')
-
- >>> np.array([1, 2, 3]) / 0.0
- LOG: Warning: divide by zero encountered in divide
- <BLANKLINE>
- array([ Inf, Inf, Inf])
-
- >>> np.seterrcall(saved_handler)
- <__main__.Log object at 0x...>
- >>> np.seterr(**save_err)
- {'over': 'log', 'divide': 'log', 'invalid': 'log', 'under': 'log'}
-
- """
- if func is not None and not isinstance(func, collections_abc.Callable):
- if not hasattr(func, 'write') or not isinstance(func.write, collections_abc.Callable):
- raise ValueError("Only callable can be used as callback")
- pyvals = umath.geterrobj()
- old = geterrcall()
- pyvals[2] = func
- umath.seterrobj(pyvals)
- return old
-
-
-def geterrcall():
- """
- Return the current callback function used on floating-point errors.
-
- When the error handling for a floating-point error (one of "divide",
- "over", "under", or "invalid") is set to 'call' or 'log', the function
- that is called or the log instance that is written to is returned by
- `geterrcall`. This function or log instance has been set with
- `seterrcall`.
-
- Returns
- -------
- errobj : callable, log instance or None
- The current error handler. If no handler was set through `seterrcall`,
- ``None`` is returned.
-
- See Also
- --------
- seterrcall, seterr, geterr
-
- Notes
- -----
- For complete documentation of the types of floating-point exceptions and
- treatment options, see `seterr`.
-
- Examples
- --------
- >>> np.geterrcall() # we did not yet set a handler, returns None
-
- >>> oldsettings = np.seterr(all='call')
- >>> def err_handler(type, flag):
- ... print("Floating point error (%s), with flag %s" % (type, flag))
- >>> oldhandler = np.seterrcall(err_handler)
- >>> np.array([1, 2, 3]) / 0.0
- Floating point error (divide by zero), with flag 1
- array([ Inf, Inf, Inf])
-
- >>> cur_handler = np.geterrcall()
- >>> cur_handler is err_handler
- True
-
- """
- return umath.geterrobj()[2]
-
-
-class _unspecified(object):
- pass
-
-
-_Unspecified = _unspecified()
-
-
-class errstate(object):
- """
- errstate(**kwargs)
-
- Context manager for floating-point error handling.
-
- Using an instance of `errstate` as a context manager allows statements in
- that context to execute with a known error handling behavior. Upon entering
- the context the error handling is set with `seterr` and `seterrcall`, and
- upon exiting it is reset to what it was before.
-
- Parameters
- ----------
- kwargs : {divide, over, under, invalid}
- Keyword arguments. The valid keywords are the possible floating-point
- exceptions. Each keyword should have a string value that defines the
- treatment for the particular error. Possible values are
- {'ignore', 'warn', 'raise', 'call', 'print', 'log'}.
-
- See Also
- --------
- seterr, geterr, seterrcall, geterrcall
-
- Notes
- -----
- For complete documentation of the types of floating-point exceptions and
- treatment options, see `seterr`.
-
- Examples
- --------
- >>> olderr = np.seterr(all='ignore') # Set error handling to known state.
-
- >>> np.arange(3) / 0.
- array([ NaN, Inf, Inf])
- >>> with np.errstate(divide='warn'):
- ... np.arange(3) / 0.
- ...
- __main__:2: RuntimeWarning: divide by zero encountered in divide
- array([ NaN, Inf, Inf])
-
- >>> np.sqrt(-1)
- nan
- >>> with np.errstate(invalid='raise'):
- ... np.sqrt(-1)
- Traceback (most recent call last):
- File "<stdin>", line 2, in <module>
- FloatingPointError: invalid value encountered in sqrt
-
- Outside the context the error handling behavior has not changed:
-
- >>> np.geterr()
- {'over': 'warn', 'divide': 'warn', 'invalid': 'warn',
- 'under': 'ignore'}
-
- """
- # Note that we don't want to run the above doctests because they will fail
- # without a from __future__ import with_statement
-
- def __init__(self, **kwargs):
- self.call = kwargs.pop('call', _Unspecified)
- self.kwargs = kwargs
-
- def __enter__(self):
- self.oldstate = seterr(**self.kwargs)
- if self.call is not _Unspecified:
- self.oldcall = seterrcall(self.call)
-
- def __exit__(self, *exc_info):
- seterr(**self.oldstate)
- if self.call is not _Unspecified:
- seterrcall(self.oldcall)
-
-
-def _setdef():
- defval = [UFUNC_BUFSIZE_DEFAULT, ERR_DEFAULT, None]
- umath.seterrobj(defval)
-
-
-# set the default values
-_setdef()
-
Inf = inf = infty = Infinity = PINF
nan = NaN = NAN
False_ = bool_(False)
@@ -2964,7 +2395,13 @@ from . import fromnumeric
from .fromnumeric import *
from . import arrayprint
from .arrayprint import *
+from . import _asarray
+from ._asarray import *
+from . import _ufunc_config
+from ._ufunc_config import *
extend_all(fromnumeric)
extend_all(umath)
extend_all(numerictypes)
extend_all(arrayprint)
+extend_all(_asarray)
+extend_all(_ufunc_config)
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index 3ff9ceef0..ab1ff65a4 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -92,6 +92,7 @@ from numpy.core.multiarray import (
datetime_as_string, busday_offset, busday_count, is_busday,
busdaycalendar
)
+from numpy.core.overrides import set_module
# we add more at the bottom
__all__ = ['sctypeDict', 'sctypeNA', 'typeDict', 'typeNA', 'sctypes',
@@ -116,8 +117,8 @@ from ._type_aliases import (
_concrete_types,
_concrete_typeinfo,
_bits_of,
- _kind_to_stem,
)
+from ._dtype import _kind_name
# we don't export these for import *, but we do want them accessible
# as numerictypes.bool, etc.
@@ -139,6 +140,7 @@ genericTypeRank = ['bool', 'int8', 'uint8', 'int16', 'uint16',
'complex32', 'complex64', 'complex128', 'complex160',
'complex192', 'complex256', 'complex512', 'object']
+@set_module('numpy')
def maximum_sctype(t):
"""
Return the scalar type of highest precision of the same kind as the input.
@@ -162,32 +164,33 @@ def maximum_sctype(t):
Examples
--------
>>> np.maximum_sctype(int)
- <type 'numpy.int64'>
+ <class 'numpy.int64'>
>>> np.maximum_sctype(np.uint8)
- <type 'numpy.uint64'>
+ <class 'numpy.uint64'>
>>> np.maximum_sctype(complex)
- <type 'numpy.complex192'>
+ <class 'numpy.complex256'> # may vary
>>> np.maximum_sctype(str)
- <type 'numpy.string_'>
+ <class 'numpy.str_'>
>>> np.maximum_sctype('i2')
- <type 'numpy.int64'>
+ <class 'numpy.int64'>
>>> np.maximum_sctype('f4')
- <type 'numpy.float96'>
+ <class 'numpy.float128'> # may vary
"""
g = obj2sctype(t)
if g is None:
return t
t = g
- bits = _bits_of(t)
- base = _kind_to_stem[dtype(t).kind]
+ base = _kind_name(dtype(t))
if base in sctypes:
return sctypes[base][-1]
else:
return t
+
+@set_module('numpy')
def issctype(rep):
"""
Determines whether the given object represents a scalar data-type.
@@ -232,6 +235,8 @@ def issctype(rep):
except Exception:
return False
+
+@set_module('numpy')
def obj2sctype(rep, default=None):
"""
Return the scalar dtype or NumPy equivalent of Python type of an object.
@@ -256,22 +261,21 @@ def obj2sctype(rep, default=None):
Examples
--------
>>> np.obj2sctype(np.int32)
- <type 'numpy.int32'>
+ <class 'numpy.int32'>
>>> np.obj2sctype(np.array([1., 2.]))
- <type 'numpy.float64'>
+ <class 'numpy.float64'>
>>> np.obj2sctype(np.array([1.j]))
- <type 'numpy.complex128'>
+ <class 'numpy.complex128'>
>>> np.obj2sctype(dict)
- <type 'numpy.object_'>
+ <class 'numpy.object_'>
>>> np.obj2sctype('string')
- <type 'numpy.string_'>
>>> np.obj2sctype(1, default=list)
- <type 'list'>
+ <class 'list'>
"""
- # prevent abtract classes being upcast
+ # prevent abstract classes being upcast
if isinstance(rep, type) and issubclass(rep, generic):
return rep
# extract dtype from arrays
@@ -286,6 +290,7 @@ def obj2sctype(rep, default=None):
return res.type
+@set_module('numpy')
def issubclass_(arg1, arg2):
"""
Determine if a class is a subclass of a second class.
@@ -314,7 +319,7 @@ def issubclass_(arg1, arg2):
Examples
--------
>>> np.issubclass_(np.int32, int)
- True
+ False # True on Python 2.7
>>> np.issubclass_(np.int32, float)
False
@@ -324,6 +329,8 @@ def issubclass_(arg1, arg2):
except TypeError:
return False
+
+@set_module('numpy')
def issubsctype(arg1, arg2):
"""
Determine if the first argument is a subclass of the second argument.
@@ -340,12 +347,12 @@ def issubsctype(arg1, arg2):
See Also
--------
- issctype, issubdtype,obj2sctype
+ issctype, issubdtype, obj2sctype
Examples
--------
>>> np.issubsctype('S8', str)
- True
+ False
>>> np.issubsctype(np.array([1]), int)
True
>>> np.issubsctype(np.array([1]), float)
@@ -354,6 +361,8 @@ def issubsctype(arg1, arg2):
"""
return issubclass(obj2sctype(arg1), obj2sctype(arg2))
+
+@set_module('numpy')
def issubdtype(arg1, arg2):
"""
Returns True if first argument is a typecode lower/equal in type hierarchy.
@@ -447,6 +456,8 @@ def _construct_lookups():
_construct_lookups()
+
+@set_module('numpy')
def sctype2char(sctype):
"""
Return the string representation of a scalar dtype.
@@ -474,9 +485,9 @@ def sctype2char(sctype):
Examples
--------
- >>> for sctype in [np.int32, float, complex, np.string_, np.ndarray]:
+ >>> for sctype in [np.int32, np.double, np.complex, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
- l
+ l # may vary
d
D
S
@@ -587,6 +598,8 @@ def _register_types():
_register_types()
+
+@set_module('numpy')
def find_common_type(array_types, scalar_types):
"""
Determine common type following standard coercion rules.
diff --git a/numpy/core/overrides.py b/numpy/core/overrides.py
index 906292613..55c7bd1ea 100644
--- a/numpy/core/overrides.py
+++ b/numpy/core/overrides.py
@@ -1,69 +1,24 @@
-"""Preliminary implementation of NEP-18
-
-TODO: rewrite this in C for performance.
-"""
+"""Implementation of __array_function__ overrides from NEP-18."""
import collections
import functools
+import os
+import textwrap
-from numpy.core.multiarray import ndarray
+from numpy.core._multiarray_umath import (
+ add_docstring, implement_array_function, _get_implementing_args)
from numpy.compat._inspect import getargspec
-_NDARRAY_ARRAY_FUNCTION = ndarray.__array_function__
-
-
-def get_overloaded_types_and_args(relevant_args):
- """Returns a list of arguments on which to call __array_function__.
+ARRAY_FUNCTION_ENABLED = bool(
+ int(os.environ.get('NUMPY_EXPERIMENTAL_ARRAY_FUNCTION', 1)))
- Parameters
- ----------
- relevant_args : iterable of array-like
- Iterable of array-like arguments to check for __array_function__
- methods.
- Returns
- -------
- overloaded_types : collection of types
- Types of arguments from relevant_args with __array_function__ methods.
- overloaded_args : list
- Arguments from relevant_args on which to call __array_function__
- methods, in the order in which they should be called.
+add_docstring(
+ implement_array_function,
"""
- # Runtime is O(num_arguments * num_unique_types)
- overloaded_types = []
- overloaded_args = []
- for arg in relevant_args:
- arg_type = type(arg)
- # We only collect arguments if they have a unique type, which ensures
- # reasonable performance even with a long list of possibly overloaded
- # arguments.
- if (arg_type not in overloaded_types and
- hasattr(arg_type, '__array_function__')):
-
- overloaded_types.append(arg_type)
-
- # By default, insert this argument at the end, but if it is
- # subclass of another argument, insert it before that argument.
- # This ensures "subclasses before superclasses".
- index = len(overloaded_args)
- for i, old_arg in enumerate(overloaded_args):
- if issubclass(arg_type, type(old_arg)):
- index = i
- break
- overloaded_args.insert(index, arg)
-
- # Special handling for ndarray.__array_function__
- overloaded_args = [
- arg for arg in overloaded_args
- if type(arg).__array_function__ is not _NDARRAY_ARRAY_FUNCTION
- ]
-
- return overloaded_types, overloaded_args
-
-
-def array_function_implementation_or_override(
- implementation, public_api, relevant_args, args, kwargs):
- """Implement a function with checks for __array_function__ overrides.
+ Implement a function with checks for __array_function__ overrides.
+
+ All arguments are required, and can only be passed by position.
Arguments
---------
@@ -71,43 +26,44 @@ def array_function_implementation_or_override(
Function that implements the operation on NumPy array without
overrides when called like ``implementation(*args, **kwargs)``.
public_api : function
- Function exposed by NumPy's public API riginally called like
- ``public_api(*args, **kwargs`` on which arguments are now being
+ Function exposed by NumPy's public API originally called like
+ ``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __array_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
- kwargs : tuple
+ kwargs : dict
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
- Result from calling `implementation()` or an `__array_function__`
+ Result from calling ``implementation()`` or an ``__array_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
- """
- # Check for __array_function__ methods.
- types, overloaded_args = get_overloaded_types_and_args(relevant_args)
- if not overloaded_args:
- return implementation(*args, **kwargs)
+ """)
+
- # Call overrides
- for overloaded_arg in overloaded_args:
- # Use `public_api` instead of `implemenation` so __array_function__
- # implementations can do equality/identity comparisons.
- result = overloaded_arg.__array_function__(
- public_api, types, args, kwargs)
+# exposed for testing purposes; used internally by implement_array_function
+add_docstring(
+ _get_implementing_args,
+ """
+ Collect arguments on which to call __array_function__.
- if result is not NotImplemented:
- return result
+ Parameters
+ ----------
+ relevant_args : iterable of array-like
+ Iterable of possibly array-like arguments to check for
+ __array_function__ methods.
- raise TypeError('no implementation found for {} on types that implement '
- '__array_function__: {}'
- .format(public_api, list(map(type, overloaded_args))))
+ Returns
+ -------
+ Sequence of arguments with __array_function__ methods, in the order in
+ which they should be called.
+ """)
ArgSpec = collections.namedtuple('ArgSpec', 'args varargs keywords defaults')
@@ -135,20 +91,120 @@ def verify_matching_signatures(implementation, dispatcher):
'default argument values')
-def array_function_dispatch(dispatcher, verify=True):
- """Decorator for adding dispatch with the __array_function__ protocol."""
+def set_module(module):
+ """Decorator for overriding __module__ on a function or class.
+
+ Example usage::
+
+ @set_module('numpy')
+ def example():
+ pass
+
+ assert example.__module__ == 'numpy'
+ """
+ def decorator(func):
+ if module is not None:
+ func.__module__ = module
+ return func
+ return decorator
+
+
+
+# Call textwrap.dedent here instead of in the function so as to avoid
+# calling dedent multiple times on the same text
+_wrapped_func_source = textwrap.dedent("""
+ @functools.wraps(implementation)
+ def {name}(*args, **kwargs):
+ relevant_args = dispatcher(*args, **kwargs)
+ return implement_array_function(
+ implementation, {name}, relevant_args, args, kwargs)
+ """)
+
+
+def array_function_dispatch(dispatcher, module=None, verify=True,
+ docs_from_dispatcher=False):
+ """Decorator for adding dispatch with the __array_function__ protocol.
+
+ See NEP-18 for example usage.
+
+ Parameters
+ ----------
+ dispatcher : callable
+ Function that when called like ``dispatcher(*args, **kwargs)`` with
+ arguments from the NumPy function call returns an iterable of
+ array-like arguments to check for ``__array_function__``.
+ module : str, optional
+ __module__ attribute to set on new function, e.g., ``module='numpy'``.
+ By default, module is copied from the decorated function.
+ verify : bool, optional
+ If True, verify the that the signature of the dispatcher and decorated
+ function signatures match exactly: all required and optional arguments
+ should appear in order with the same names, but the default values for
+ all optional arguments should be ``None``. Only disable verification
+ if the dispatcher's signature needs to deviate for some particular
+ reason, e.g., because the function has a signature like
+ ``func(*args, **kwargs)``.
+ docs_from_dispatcher : bool, optional
+ If True, copy docs from the dispatcher function onto the dispatched
+ function, rather than from the implementation. This is useful for
+ functions defined in C, which otherwise don't have docstrings.
+
+ Returns
+ -------
+ Function suitable for decorating the implementation of a NumPy function.
+ """
+
+ if not ARRAY_FUNCTION_ENABLED:
+ def decorator(implementation):
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+ if module is not None:
+ implementation.__module__ = module
+ return implementation
+ return decorator
+
def decorator(implementation):
- # TODO: only do this check when the appropriate flag is enabled or for
- # a dev install. We want this check for testing but don't want to
- # slow down all numpy imports.
if verify:
verify_matching_signatures(implementation, dispatcher)
- @functools.wraps(implementation)
- def public_api(*args, **kwargs):
- relevant_args = dispatcher(*args, **kwargs)
- return array_function_implementation_or_override(
- implementation, public_api, relevant_args, args, kwargs)
+ if docs_from_dispatcher:
+ add_docstring(implementation, dispatcher.__doc__)
+
+ # Equivalently, we could define this function directly instead of using
+ # exec. This version has the advantage of giving the helper function a
+ # more interpettable name. Otherwise, the original function does not
+ # show up at all in many cases, e.g., if it's written in C or if the
+ # dispatcher gets an invalid keyword argument.
+ source = _wrapped_func_source.format(name=implementation.__name__)
+
+ source_object = compile(
+ source, filename='<__array_function__ internals>', mode='exec')
+ scope = {
+ 'implementation': implementation,
+ 'dispatcher': dispatcher,
+ 'functools': functools,
+ 'implement_array_function': implement_array_function,
+ }
+ exec(source_object, scope)
+
+ public_api = scope[implementation.__name__]
+
+ if module is not None:
+ public_api.__module__ = module
+
+ public_api._implementation = implementation
+
return public_api
return decorator
+
+
+def array_function_from_dispatcher(
+ implementation, module=None, verify=True, docs_from_dispatcher=True):
+ """Like array_function_dispatcher, but with function arguments flipped."""
+
+ def decorator(dispatcher):
+ return array_function_dispatch(
+ dispatcher, module, verify=verify,
+ docs_from_dispatcher=docs_from_dispatcher)(implementation)
+ return decorator
diff --git a/numpy/core/records.py b/numpy/core/records.py
index a483871ba..a1439f9df 100644
--- a/numpy/core/records.py
+++ b/numpy/core/records.py
@@ -7,10 +7,9 @@ Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
- >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
+ >>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', np.int64), ('y', np.float64)])
>>> a
- array([(1, 2.0), (1, 2.0)],
- dtype=[('x', '<i4'), ('y', '<f8')])
+ array([(1, 2.), (1, 2.)], dtype=[('x', '<i8'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
@@ -21,7 +20,7 @@ one would a dictionary::
array([1, 1])
>>> a['y']
- array([ 2., 2.])
+ array([2., 2.])
Record arrays allow us to access fields as properties::
@@ -31,7 +30,7 @@ Record arrays allow us to access fields as properties::
array([1, 1])
>>> ar.y
- array([ 2., 2.])
+ array([2., 2.])
"""
from __future__ import division, absolute_import, print_function
@@ -39,10 +38,14 @@ from __future__ import division, absolute_import, print_function
import sys
import os
import warnings
+from collections import Counter, OrderedDict
from . import numeric as sb
from . import numerictypes as nt
-from numpy.compat import isfileobj, bytes, long, unicode
+from numpy.compat import (
+ isfileobj, bytes, long, unicode, os_fspath, contextlib_nullcontext
+)
+from numpy.core.overrides import set_module
from .arrayprint import get_printoptions
# All of the functions allow formats to be a dtype
@@ -73,15 +76,28 @@ _byteorderconv = {'b':'>',
numfmt = nt.typeDict
+# taken from OrderedDict recipes in the Python documentation
+# https://docs.python.org/3.3/library/collections.html#ordereddict-examples-and-recipes
+class _OrderedCounter(Counter, OrderedDict):
+ """Counter that remembers the order elements are first encountered"""
+
+ def __repr__(self):
+ return '%s(%r)' % (self.__class__.__name__, OrderedDict(self))
+
+ def __reduce__(self):
+ return self.__class__, (OrderedDict(self),)
+
+
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
- dup = []
- for i in range(len(list)):
- if (list[i] in list[i + 1:]):
- if (list[i] not in dup):
- dup.append(list[i])
- return dup
+ return [
+ item
+ for item, counts in _OrderedCounter(list).items()
+ if counts > 1
+ ]
+
+@set_module('numpy')
class format_parser(object):
"""
Class to convert formats, names, titles description to a dtype.
@@ -125,10 +141,9 @@ class format_parser(object):
Examples
--------
- >>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
+ >>> np.format_parser(['<f8', '<i4', '<a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
- dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),
- (('T3', 'col3'), '|S5')])
+ dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'), (('T3', 'col3'), 'S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
@@ -136,9 +151,9 @@ class format_parser(object):
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
- dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])
- >>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
- dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
+ dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '<S5')])
+ >>> np.format_parser(['<f8', '<i4', '<a5'], [], []).dtype
+ dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', 'S5')])
"""
@@ -148,16 +163,18 @@ class format_parser(object):
self._createdescr(byteorder)
self.dtype = self._descr
- def _parseFormats(self, formats, aligned=0):
+ def _parseFormats(self, formats, aligned=False):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
- if len(formats) < 2:
- formats.append('')
- formats = ','.join(formats)
- dtype = sb.dtype(formats, aligned)
+ dtype = sb.dtype(
+ [('f{}'.format(i), format_) for i, format_ in enumerate(formats)],
+ aligned,
+ )
+ else:
+ dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
@@ -251,8 +268,8 @@ class record(nt.void):
except AttributeError:
#happens if field is Object type
return obj
- if dt.fields:
- return obj.view((self.__class__, obj.dtype.fields))
+ if dt.names is not None:
+ return obj.view((self.__class__, obj.dtype))
return obj
else:
raise AttributeError("'record' object has no "
@@ -276,8 +293,8 @@ class record(nt.void):
obj = nt.void.__getitem__(self, indx)
# copy behavior of record.__getattribute__,
- if isinstance(obj, nt.void) and obj.dtype.fields:
- return obj.view((self.__class__, obj.dtype.fields))
+ if isinstance(obj, nt.void) and obj.dtype.names is not None:
+ return obj.view((self.__class__, obj.dtype))
else:
# return a single element
return obj
@@ -287,10 +304,8 @@ class record(nt.void):
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
- rows = []
fmt = '%% %ds: %%s' % maxlen
- for name in names:
- rows.append(fmt % (name, getattr(self, name)))
+ rows = [fmt % (name, getattr(self, name)) for name in names]
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
@@ -379,20 +394,19 @@ class recarray(ndarray):
--------
Create an array with two fields, ``x`` and ``y``:
- >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
+ >>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', '<f8'), ('y', '<i8')])
>>> x
- array([(1.0, 2), (3.0, 4)],
- dtype=[('x', '<f8'), ('y', '<i4')])
+ array([(1., 2), (3., 4)], dtype=[('x', '<f8'), ('y', '<i8')])
>>> x['x']
- array([ 1., 3.])
+ array([1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
- array([ 1., 3.])
+ array([1., 3.])
>>> x.y
array([2, 4])
@@ -430,7 +444,7 @@ class recarray(ndarray):
return self
def __array_finalize__(self, obj):
- if self.dtype.type is not record and self.dtype.fields:
+ if self.dtype.type is not record and self.dtype.names is not None:
# if self.dtype is not np.record, invoke __setattr__ which will
# convert it to a record if it is a void dtype.
self.dtype = self.dtype
@@ -458,7 +472,7 @@ class recarray(ndarray):
# with void type convert it to the same dtype.type (eg to preserve
# numpy.record type if present), since nested structured fields do not
# inherit type. Don't do this for non-void structures though.
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
return obj
@@ -473,7 +487,7 @@ class recarray(ndarray):
# Automatically convert (void) structured types to records
# (but not non-void structures, subarrays, or non-structured voids)
- if attr == 'dtype' and issubclass(val.type, nt.void) and val.fields:
+ if attr == 'dtype' and issubclass(val.type, nt.void) and val.names is not None:
val = sb.dtype((record, val))
newattr = attr not in self.__dict__
@@ -507,7 +521,7 @@ class recarray(ndarray):
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
obj = obj.view(type(self))
if issubclass(obj.dtype.type, nt.void):
return obj.view(dtype=(self.dtype.type, obj.dtype))
@@ -563,7 +577,7 @@ class recarray(ndarray):
if val is None:
obj = self.getfield(*res)
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
return obj
return obj.view(ndarray)
else:
@@ -579,7 +593,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print(r[1])
- (2, 'dd', 2.0)
+ (2, 'dd', 2.0) # may vary
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
@@ -598,10 +612,7 @@ def fromarrays(arrayList, dtype=None, shape=None, formats=None,
# and determine the formats.
formats = []
for obj in arrayList:
- if not isinstance(obj, ndarray):
- raise ValueError("item in the array list must be an ndarray.")
- formats.append(obj.dtype.str)
- formats = ','.join(formats)
+ formats.append(obj.dtype)
if dtype is not None:
descr = sb.dtype(dtype)
@@ -658,11 +669,11 @@ def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
>>> r.col1
array([456, 2])
>>> r.col2
- array(['dbe', 'de'],
- dtype='|S3')
+ array(['dbe', 'de'], dtype='<U3')
>>> import pickle
- >>> print(pickle.loads(pickle.dumps(r)))
- [(456, 'dbe', 1.2) (2, 'de', 1.3)]
+ >>> pickle.loads(pickle.dumps(r))
+ rec.array([(456, 'dbe', 1.2), ( 2, 'de', 1.3)],
+ dtype=[('col1', '<i8'), ('col2', '<U3'), ('col3', '<f8')])
"""
if formats is None and dtype is None: # slower
@@ -710,7 +721,7 @@ def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
a string"""
if dtype is None and formats is None:
- raise ValueError("Must have dtype= or formats=")
+ raise TypeError("fromstring() needs a 'dtype' or 'formats' argument")
if dtype is not None:
descr = sb.dtype(dtype)
@@ -737,9 +748,9 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
- If file is a string then that file is opened, else it is assumed
- to be a file object. The file object must support random access
- (i.e. it must have tell and seek methods).
+ If file is a string or a path-like object then that file is opened,
+ else it is assumed to be a file object. The file object must
+ support random access (i.e. it must have tell and seek methods).
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
@@ -749,7 +760,7 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
- >>> fd.seek(0)
+ >>> _ = fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print(r[5])
@@ -758,47 +769,52 @@ def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
(10,)
"""
+ if dtype is None and formats is None:
+ raise TypeError("fromfile() needs a 'dtype' or 'formats' argument")
+
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
- name = 0
- if isinstance(fd, str):
- name = 1
- fd = open(fd, 'rb')
- if (offset > 0):
- fd.seek(offset, 1)
- size = get_remaining_size(fd)
-
- if dtype is not None:
- descr = sb.dtype(dtype)
+ if isfileobj(fd):
+ # file already opened
+ ctx = contextlib_nullcontext(fd)
else:
- descr = format_parser(formats, names, titles, aligned, byteorder)._descr
+ # open file
+ ctx = open(os_fspath(fd), 'rb')
- itemsize = descr.itemsize
+ with ctx as fd:
+ if (offset > 0):
+ fd.seek(offset, 1)
+ size = get_remaining_size(fd)
- shapeprod = sb.array(shape).prod()
- shapesize = shapeprod * itemsize
- if shapesize < 0:
- shape = list(shape)
- shape[shape.index(-1)] = size / -shapesize
- shape = tuple(shape)
- shapeprod = sb.array(shape).prod()
+ if dtype is not None:
+ descr = sb.dtype(dtype)
+ else:
+ descr = format_parser(formats, names, titles, aligned, byteorder)._descr
- nbytes = shapeprod * itemsize
+ itemsize = descr.itemsize
- if nbytes > size:
- raise ValueError(
- "Not enough bytes left in file for specified shape and type")
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
+ shapesize = shapeprod * itemsize
+ if shapesize < 0:
+ shape = list(shape)
+ shape[shape.index(-1)] = size // -shapesize
+ shape = tuple(shape)
+ shapeprod = sb.array(shape).prod(dtype=nt.intp)
- # create the array
- _array = recarray(shape, descr)
- nbytesread = fd.readinto(_array.data)
- if nbytesread != nbytes:
- raise IOError("Didn't read as many bytes as expected")
- if name:
- fd.close()
+ nbytes = shapeprod * itemsize
+
+ if nbytes > size:
+ raise ValueError(
+ "Not enough bytes left in file for specified shape and type")
+
+ # create the array
+ _array = recarray(shape, descr)
+ nbytesread = fd.readinto(_array.data)
+ if nbytesread != nbytes:
+ raise IOError("Didn't read as many bytes as expected")
return _array
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index bea9ff392..5f2f4a7b2 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -6,7 +6,9 @@ import pickle
import copy
import warnings
import platform
+import textwrap
from os.path import join
+
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
@@ -171,6 +173,11 @@ def check_math_capabilities(config, moredefs, mathlibs):
if config.check_gcc_function_attribute(dec, fn):
moredefs.append((fname2def(fn), 1))
+ for dec, fn, code, header in OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS:
+ if config.check_gcc_function_attribute_with_intrinsics(dec, fn, code,
+ header):
+ moredefs.append((fname2def(fn), 1))
+
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_gcc_variable_attribute(fn):
m = fn.replace("(", "_").replace(")", "_")
@@ -379,8 +386,9 @@ def check_mathlib(config_cmd):
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
- if config.check_compiler_gcc4():
- return '__attribute__((visibility("hidden")))'
+ hide = '__attribute__((visibility("hidden")))'
+ if config.check_gcc_function_attribute(hide, 'hideme'):
+ return hide
else:
return ''
@@ -455,50 +463,53 @@ def configuration(parent_package='',top_path=None):
rep = check_long_double_representation(config_cmd)
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
+ if check_for_right_shift_internal_compiler_error(config_cmd):
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_LONGLONG_right_shift')
+ moredefs.append('NPY_DO_NOT_OPTIMIZE_ULONGLONG_right_shift')
+
# Py3K check
- if sys.version_info[0] == 3:
+ if sys.version_info[0] >= 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
- target_f = open(target, 'w')
- for d in moredefs:
- if isinstance(d, str):
- target_f.write('#define %s\n' % (d))
+ with open(target, 'w') as target_f:
+ for d in moredefs:
+ if isinstance(d, str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0], d[1]))
+
+ # define inline to our keyword, or nothing
+ target_f.write('#ifndef __cplusplus\n')
+ if inline == 'inline':
+ target_f.write('/* #undef inline */\n')
else:
- target_f.write('#define %s %s\n' % (d[0], d[1]))
-
- # define inline to our keyword, or nothing
- target_f.write('#ifndef __cplusplus\n')
- if inline == 'inline':
- target_f.write('/* #undef inline */\n')
- else:
- target_f.write('#define inline %s\n' % inline)
- target_f.write('#endif\n')
-
- # add the guard to make sure config.h is never included directly,
- # but always through npy_config.h
- target_f.write("""
-#ifndef _NPY_NPY_CONFIG_H_
-#error config.h should never be included directly, include npy_config.h instead
-#endif
-""")
-
- target_f.close()
- print('File:', target)
- target_f = open(target)
- print(target_f.read())
- target_f.close()
- print('EOF')
+ target_f.write('#define inline %s\n' % inline)
+ target_f.write('#endif\n')
+
+ # add the guard to make sure config.h is never included directly,
+ # but always through npy_config.h
+ target_f.write(textwrap.dedent("""
+ #ifndef _NPY_NPY_CONFIG_H_
+ #error config.h should never be included directly, include npy_config.h instead
+ #endif
+ """))
+
+ log.info('File: %s' % target)
+ with open(target) as target_f:
+ log.info(target_f.read())
+ log.info('EOF')
else:
mathlibs = []
- target_f = open(target)
- for line in target_f:
- s = '#define MATHLIB'
- if line.startswith(s):
- value = line[len(s):].strip()
- if value:
- mathlibs.extend(value.split(','))
- target_f.close()
+ with open(target) as target_f:
+ for line in target_f:
+ s = '#define MATHLIB'
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
@@ -561,27 +572,25 @@ def configuration(parent_package='',top_path=None):
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
- target_f = open(target, 'w')
- for d in moredefs:
- if isinstance(d, str):
- target_f.write('#define %s\n' % (d))
- else:
- target_f.write('#define %s %s\n' % (d[0], d[1]))
-
- # Define __STDC_FORMAT_MACROS
- target_f.write("""
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS 1
-#endif
-""")
- target_f.close()
+ with open(target, 'w') as target_f:
+ for d in moredefs:
+ if isinstance(d, str):
+ target_f.write('#define %s\n' % (d))
+ else:
+ target_f.write('#define %s %s\n' % (d[0], d[1]))
+
+ # Define __STDC_FORMAT_MACROS
+ target_f.write(textwrap.dedent("""
+ #ifndef __STDC_FORMAT_MACROS
+ #define __STDC_FORMAT_MACROS 1
+ #endif
+ """))
# Dump the numpyconfig.h header to stdout
- print('File: %s' % target)
- target_f = open(target)
- print(target_f.read())
- target_f.close()
- print('EOF')
+ log.info('File: %s' % target)
+ with open(target) as target_f:
+ log.info(target_f.read())
+ log.info('EOF')
config.add_data_files((header_dir, target))
return target
@@ -607,7 +616,7 @@ def configuration(parent_package='',top_path=None):
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
- config.add_data_files('include/numpy/*.h')
+ config.add_data_dir('include/numpy')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
@@ -630,23 +639,6 @@ def configuration(parent_package='',top_path=None):
]
#######################################################################
- # dummy module #
- #######################################################################
-
- # npymath needs the config.h and numpyconfig.h files to be generated, but
- # build_clib cannot handle generate_config_h and generate_numpyconfig_h
- # (don't ask). Because clib are generated before extensions, we have to
- # explicitly add an extension which has generate_config_h and
- # generate_numpyconfig_h as sources *before* adding npymath.
-
- config.add_extension('_dummy',
- sources=[join('src', 'dummymodule.c'),
- generate_config_h,
- generate_numpyconfig_h,
- generate_numpy_api]
- )
-
- #######################################################################
# npymath library #
#######################################################################
@@ -677,9 +669,11 @@ def configuration(parent_package='',top_path=None):
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')
]
-
+
# Must be true for CRT compilers but not MinGW/cygwin. See gh-9977.
- is_msvc = platform.system() == 'Windows'
+ # Intel and Clang also don't seem happy with /GL
+ is_msvc = (platform.platform().startswith('Windows') and
+ platform.python_compiler().startswith('MS'))
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib',
@@ -697,9 +691,12 @@ def configuration(parent_package='',top_path=None):
#######################################################################
# This library is created for the build but it is not installed
- npysort_sources = [join('src', 'npysort', 'quicksort.c.src'),
+ npysort_sources = [join('src', 'common', 'npy_sort.h.src'),
+ join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
+ join('src', 'npysort', 'timsort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
+ join('src', 'npysort', 'radixsort.c.src'),
join('src', 'common', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'common', 'npy_binsearch.h.src'),
@@ -730,13 +727,17 @@ def configuration(parent_package='',top_path=None):
join('src', 'common', 'cblasfuncs.h'),
join('src', 'common', 'lowlevel_strided_loops.h'),
join('src', 'common', 'mem_overlap.h'),
+ join('src', 'common', 'npy_cblas.h'),
join('src', 'common', 'npy_config.h'),
+ join('src', 'common', 'npy_ctypes.h'),
join('src', 'common', 'npy_extint128.h'),
+ join('src', 'common', 'npy_import.h'),
join('src', 'common', 'npy_longdouble.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.h'),
join('src', 'common', 'ufunc_override.h'),
join('src', 'common', 'umathmodule.h'),
+ join('src', 'common', 'numpyos.h'),
]
common_src = [
@@ -746,6 +747,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'common', 'templ_common.h.src'),
join('src', 'common', 'ucsnarrow.c'),
join('src', 'common', 'ufunc_override.c'),
+ join('src', 'common', 'numpyos.c'),
]
blas_info = get_info('blas_opt', 0)
@@ -768,6 +770,7 @@ def configuration(parent_package='',top_path=None):
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
+ join('src', 'multiarray', 'arrayfunction_override.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
@@ -785,7 +788,6 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'number.h'),
- join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
@@ -821,6 +823,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
+ join('src', 'multiarray', 'arrayfunction_override.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'compiled_base.c'),
@@ -851,7 +854,6 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
- join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
@@ -875,10 +877,9 @@ def configuration(parent_package='',top_path=None):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
- f = open(target, 'w')
- f.write(generate_umath.make_code(generate_umath.defdict,
- generate_umath.__file__))
- f.close()
+ with open(target, 'w') as f:
+ f.write(generate_umath.make_code(generate_umath.defdict,
+ generate_umath.__file__))
return []
umath_src = [
@@ -888,6 +889,10 @@ def configuration(parent_package='',top_path=None):
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
+ join('src', 'umath', 'matmul.h.src'),
+ join('src', 'umath', 'matmul.c.src'),
+ join('src', 'umath', 'clip.h.src'),
+ join('src', 'umath', 'clip.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'extobj.c'),
join('src', 'umath', 'cpuid.c'),
@@ -901,14 +906,15 @@ def configuration(parent_package='',top_path=None):
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('src', 'multiarray', 'common.h'),
+ join('src', 'multiarray', 'number.h'),
join('src', 'common', 'templ_common.h.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'override.h'),
join(codegen_dir, 'generate_ufunc_api.py'),
- ]
+ ]
config.add_extension('_multiarray_umath',
- sources=multiarray_src + umath_src +
+ sources=multiarray_src + umath_src +
npymath_sources + common_src +
[generate_config_h,
generate_numpyconfig_h,
@@ -918,7 +924,7 @@ def configuration(parent_package='',top_path=None):
generate_umath_c,
generate_ufunc_api,
],
- depends=deps + multiarray_deps + umath_deps +
+ depends=deps + multiarray_deps + umath_deps +
common_deps,
libraries=['npymath', 'npysort'],
extra_info=extra_info)
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index e637dbc20..84b78b585 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -5,6 +5,7 @@ import sys
import warnings
import copy
import binascii
+import textwrap
from numpy.distutils.misc_util import mingw32
@@ -14,7 +15,7 @@ from numpy.distutils.misc_util import mingw32
#-------------------
# How to change C_API_VERSION ?
# - increase C_API_VERSION value
-# - record the hash for the new C API with the script cversions.py
+# - record the hash for the new C API with the cversions.py script
# and add the hash to cversions.txt
# The hash values are used to remind developers when the C API number was not
# updated - generates a MismatchCAPIWarning warning which is turned into an
@@ -41,7 +42,8 @@ C_ABI_VERSION = 0x01000009
# 0x0000000b - 1.13.x
# 0x0000000c - 1.14.x
# 0x0000000c - 1.15.x
-C_API_VERSION = 0x0000000c
+# 0x0000000d - 1.16.x
+C_API_VERSION = 0x0000000d
class MismatchCAPIWarning(Warning):
pass
@@ -80,21 +82,20 @@ def get_api_versions(apiversion, codegen_dir):
return curapi_hash, apis_hash[apiversion]
def check_api_version(apiversion, codegen_dir):
- """Emits a MismacthCAPIWarning if the C API version needs updating."""
+ """Emits a MismatchCAPIWarning if the C API version needs updating."""
curapi_hash, api_hash = get_api_versions(apiversion, codegen_dir)
# If different hash, it means that the api .txt files in
# codegen_dir have been updated without the API version being
# updated. Any modification in those .txt files should be reflected
# in the api and eventually abi versions.
- # To compute the checksum of the current API, use
- # code_generators/cversions.py script
+ # To compute the checksum of the current API, use numpy/core/cversions.py
if not curapi_hash == api_hash:
msg = ("API mismatch detected, the C API version "
"numbers have to be updated. Current C api version is %d, "
- "with checksum %s, but recorded checksum for C API version %d in "
- "codegen_dir/cversions.txt is %s. If functions were added in the "
- "C API, you have to update C_API_VERSION in %s."
+ "with checksum %s, but recorded checksum for C API version %d "
+ "in core/codegen_dir/cversions.txt is %s. If functions were "
+ "added in the C API, you have to update C_API_VERSION in %s."
)
warnings.warn(msg % (apiversion, curapi_hash, apiversion, api_hash,
__file__),
@@ -117,6 +118,7 @@ OPTIONAL_HEADERS = [
# sse headers only enabled automatically on amd64/x32 builds
"xmmintrin.h", # SSE
"emmintrin.h", # SSE2
+ "immintrin.h", # AVX
"features.h", # for glibc version linux
"xlocale.h", # see GH#8367
"dlfcn.h", # dladdr
@@ -136,6 +138,8 @@ OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
# broken on OSX 10.11, make sure its not optimized away
("volatile int r = __builtin_cpu_supports", '"sse"',
"stdio.h", "__BUILTIN_CPU_SUPPORTS"),
+ ("volatile int r = __builtin_cpu_supports", '"avx512f"',
+ "stdio.h", "__BUILTIN_CPU_SUPPORTS_AVX512F"),
# MMX only needed for icc, but some clangs don't have it
("_m_from_int64", '0', "emmintrin.h"),
("_mm_load_ps", '(float*)0', "xmmintrin.h"), # SSE
@@ -148,6 +152,8 @@ OPTIONAL_INTRINSICS = [("__builtin_isnan", '5.'),
"stdio.h", "LINK_AVX"),
("__asm__ volatile", '"vpand %ymm1, %ymm2, %ymm3"',
"stdio.h", "LINK_AVX2"),
+ ("__asm__ volatile", '"vpaddd %zmm1, %zmm2, %zmm3"',
+ "stdio.h", "LINK_AVX512F"),
("__asm__ volatile", '"xgetbv"', "stdio.h", "XGETBV"),
]
@@ -164,6 +170,24 @@ OPTIONAL_FUNCTION_ATTRIBUTES = [('__attribute__((optimize("unroll-loops")))',
'attribute_target_avx'),
('__attribute__((target ("avx2")))',
'attribute_target_avx2'),
+ ('__attribute__((target ("avx512f")))',
+ 'attribute_target_avx512f'),
+ ]
+
+# function attributes with intrinsics
+# To ensure your compiler can compile avx intrinsics with just the attributes
+# gcc 4.8.4 support attributes but not with intrisics
+# tested via "#include<%s> int %s %s(void *){code; return 0;};" % (header, attribute, name, code)
+# function name will be converted to HAVE_<upper-case-name> preprocessor macro
+OPTIONAL_FUNCTION_ATTRIBUTES_WITH_INTRINSICS = [('__attribute__((target("avx2,fma")))',
+ 'attribute_target_avx2_with_intrinsics',
+ '__m256 temp = _mm256_set1_ps(1.0); temp = \
+ _mm256_fmadd_ps(temp, temp, temp)',
+ 'immintrin.h'),
+ ('__attribute__((target("avx512f")))',
+ 'attribute_target_avx512f_with_intrinsics',
+ '__m512 temp = _mm512_set1_ps(1.0)',
+ 'immintrin.h'),
]
# variable attributes tested via "int %s a" % attribute
@@ -291,30 +315,24 @@ def pyod(filename):
def _pyod2():
out = []
- fid = open(filename, 'rb')
- try:
+ with open(filename, 'rb') as fid:
yo = [int(oct(int(binascii.b2a_hex(o), 16))) for o in fid.read()]
- for i in range(0, len(yo), 16):
- line = ['%07d' % int(oct(i))]
- line.extend(['%03d' % c for c in yo[i:i+16]])
- out.append(" ".join(line))
- return out
- finally:
- fid.close()
+ for i in range(0, len(yo), 16):
+ line = ['%07d' % int(oct(i))]
+ line.extend(['%03d' % c for c in yo[i:i+16]])
+ out.append(" ".join(line))
+ return out
def _pyod3():
out = []
- fid = open(filename, 'rb')
- try:
+ with open(filename, 'rb') as fid:
yo2 = [oct(o)[2:] for o in fid.read()]
- for i in range(0, len(yo2), 16):
- line = ['%07d' % int(oct(i)[2:])]
- line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
- out.append(" ".join(line))
- return out
- finally:
- fid.close()
+ for i in range(0, len(yo2), 16):
+ line = ['%07d' % int(oct(i)[2:])]
+ line.extend(['%03d' % int(c) for c in yo2[i:i+16]])
+ out.append(" ".join(line))
+ return out
if sys.version_info[0] < 3:
return _pyod2()
@@ -398,3 +416,41 @@ def long_double_representation(lines):
else:
# We never detected the after_sequence
raise ValueError("Could not lock sequences (%s)" % saw)
+
+
+def check_for_right_shift_internal_compiler_error(cmd):
+ """
+ On our arm CI, this fails with an internal compilation error
+
+ The failure looks like the following, and can be reproduced on ARM64 GCC 5.4:
+
+ <source>: In function 'right_shift':
+ <source>:4:20: internal compiler error: in expand_shift_1, at expmed.c:2349
+ ip1[i] = ip1[i] >> in2;
+ ^
+ Please submit a full bug report,
+ with preprocessed source if appropriate.
+ See <http://gcc.gnu.org/bugs.html> for instructions.
+ Compiler returned: 1
+
+ This function returns True if this compiler bug is present, and we need to
+ turn off optimization for the function
+ """
+ cmd._check_compiler()
+ has_optimize = cmd.try_compile(textwrap.dedent("""\
+ __attribute__((optimize("O3"))) void right_shift() {}
+ """), None, None)
+ if not has_optimize:
+ return False
+
+ no_err = cmd.try_compile(textwrap.dedent("""\
+ typedef long the_type; /* fails also for unsigned and long long */
+ __attribute__((optimize("O3"))) void right_shift(the_type in2, the_type *ip1, int n) {
+ for (int i = 0; i < n; i++) {
+ if (in2 < (the_type)sizeof(the_type) * 8) {
+ ip1[i] = ip1[i] >> in2;
+ }
+ }
+ }
+ """), None, None)
+ return not no_err
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index feb1605bc..d7e769e62 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -3,11 +3,26 @@ from __future__ import division, absolute_import, print_function
__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
'stack', 'vstack']
+import functools
+import operator
+import warnings
from . import numeric as _nx
-from .numeric import array, asanyarray, newaxis
+from . import overrides
+from ._asarray import array, asanyarray
from .multiarray import normalize_axis_index
+from . import fromnumeric as _from_nx
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+def _atleast_1d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_1d_dispatcher)
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
@@ -33,13 +48,13 @@ def atleast_1d(*arys):
Examples
--------
>>> np.atleast_1d(1.0)
- array([ 1.])
+ array([1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
- array([[ 0., 1., 2.],
- [ 3., 4., 5.],
- [ 6., 7., 8.]])
+ array([[0., 1., 2.],
+ [3., 4., 5.],
+ [6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
@@ -60,6 +75,12 @@ def atleast_1d(*arys):
else:
return res
+
+def _atleast_2d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_2d_dispatcher)
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
@@ -85,11 +106,11 @@ def atleast_2d(*arys):
Examples
--------
>>> np.atleast_2d(3.0)
- array([[ 3.]])
+ array([[3.]])
>>> x = np.arange(3.0)
>>> np.atleast_2d(x)
- array([[ 0., 1., 2.]])
+ array([[0., 1., 2.]])
>>> np.atleast_2d(x).base is x
True
@@ -103,7 +124,7 @@ def atleast_2d(*arys):
if ary.ndim == 0:
result = ary.reshape(1, 1)
elif ary.ndim == 1:
- result = ary[newaxis,:]
+ result = ary[_nx.newaxis, :]
else:
result = ary
res.append(result)
@@ -112,6 +133,12 @@ def atleast_2d(*arys):
else:
return res
+
+def _atleast_3d_dispatcher(*arys):
+ return arys
+
+
+@array_function_dispatch(_atleast_3d_dispatcher)
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
@@ -139,7 +166,7 @@ def atleast_3d(*arys):
Examples
--------
>>> np.atleast_3d(3.0)
- array([[[ 3.]]])
+ array([[[3.]]])
>>> x = np.arange(3.0)
>>> np.atleast_3d(x).shape
@@ -152,7 +179,7 @@ def atleast_3d(*arys):
True
>>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
- ... print(arr, arr.shape)
+ ... print(arr, arr.shape) # doctest: +SKIP
...
[[[1]
[2]]] (1, 2, 1)
@@ -167,9 +194,9 @@ def atleast_3d(*arys):
if ary.ndim == 0:
result = ary.reshape(1, 1, 1)
elif ary.ndim == 1:
- result = ary[newaxis,:, newaxis]
+ result = ary[_nx.newaxis, :, _nx.newaxis]
elif ary.ndim == 2:
- result = ary[:,:, newaxis]
+ result = ary[:, :, _nx.newaxis]
else:
result = ary
res.append(result)
@@ -179,6 +206,22 @@ def atleast_3d(*arys):
return res
+def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
+ if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
+ warnings.warn('arrays to stack must be passed as a "sequence" type '
+ 'such as list or tuple. Support for non-sequence '
+ 'iterables such as generators is deprecated as of '
+ 'NumPy 1.16 and will raise an error in the future.',
+ FutureWarning, stacklevel=stacklevel)
+ return ()
+ return arrays
+
+
+def _vhstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_vhstack_dispatcher)
def vstack(tup):
"""
Stack arrays in sequence vertically (row wise).
@@ -231,8 +274,16 @@ def vstack(tup):
[4]])
"""
- return _nx.concatenate([atleast_2d(_m) for _m in tup], 0)
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+ arrs = atleast_2d(*tup)
+ if not isinstance(arrs, list):
+ arrs = [arrs]
+ return _nx.concatenate(arrs, 0)
+
+@array_function_dispatch(_vhstack_dispatcher)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise).
@@ -280,7 +331,13 @@ def hstack(tup):
[3, 4]])
"""
- arrs = [atleast_1d(_m) for _m in tup]
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+
+ arrs = atleast_1d(*tup)
+ if not isinstance(arrs, list):
+ arrs = [arrs]
# As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
if arrs and arrs[0].ndim == 1:
return _nx.concatenate(arrs, 0)
@@ -288,13 +345,23 @@ def hstack(tup):
return _nx.concatenate(arrs, 1)
+def _stack_dispatcher(arrays, axis=None, out=None):
+ arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
+ if out is not None:
+ # optimize for the typical case where only arrays is provided
+ arrays = list(arrays)
+ arrays.append(out)
+ return arrays
+
+
+@array_function_dispatch(_stack_dispatcher)
def stack(arrays, axis=0, out=None):
"""
Join a sequence of arrays along a new axis.
- The `axis` parameter specifies the index of the new axis in the dimensions
- of the result. For example, if ``axis=0`` it will be the first dimension
- and if ``axis=-1`` it will be the last dimension.
+ The ``axis`` parameter specifies the index of the new axis in the
+ dimensions of the result. For example, if ``axis=0`` it will be the first
+ dimension and if ``axis=-1`` it will be the last dimension.
.. versionadded:: 1.10.0
@@ -302,8 +369,10 @@ def stack(arrays, axis=0, out=None):
----------
arrays : sequence of array_like
Each array must have the same shape.
+
axis : int, optional
The axis in the result array along which the input arrays are stacked.
+
out : ndarray, optional
If provided, the destination to place the result. The shape must be
correct, matching that of what stack would have returned if no
@@ -344,11 +413,15 @@ def stack(arrays, axis=0, out=None):
[3, 4]])
"""
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(arrays, stacklevel=2)
+
arrays = [asanyarray(arr) for arr in arrays]
if not arrays:
raise ValueError('need at least one array to stack')
- shapes = set(arr.shape for arr in arrays)
+ shapes = {arr.shape for arr in arrays}
if len(shapes) != 1:
raise ValueError('all input arrays must have the same shape')
@@ -360,6 +433,14 @@ def stack(arrays, axis=0, out=None):
return _nx.concatenate(expanded_arrays, axis=axis, out=out)
+# Internal functions to eliminate the overhead of repeated dispatch in one of
+# the two possible paths inside np.block.
+# Use getattr to protect against __array_function__ being disabled.
+_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
+_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
+_concatenate = getattr(_from_nx.concatenate, '__wrapped__', _from_nx.concatenate)
+
+
def _block_format_index(index):
"""
Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
@@ -394,6 +475,10 @@ def _block_check_depths_match(arrays, parent_index=[]):
refer to it, and the last index along the empty axis will be `None`.
max_arr_ndim : int
The maximum of the ndims of the arrays nested in `arrays`.
+ final_size: int
+ The number of elements in the final array. This is used the motivate
+ the choice of algorithm used using benchmarking wisdom.
+
"""
if type(arrays) is tuple:
# not strictly necessary, but saves us from:
@@ -412,8 +497,9 @@ def _block_check_depths_match(arrays, parent_index=[]):
idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
for i, arr in enumerate(arrays))
- first_index, max_arr_ndim = next(idxs_ndims)
- for index, ndim in idxs_ndims:
+ first_index, max_arr_ndim, final_size = next(idxs_ndims)
+ for index, ndim, size in idxs_ndims:
+ final_size += size
if ndim > max_arr_ndim:
max_arr_ndim = ndim
if len(index) != len(first_index):
@@ -428,13 +514,15 @@ def _block_check_depths_match(arrays, parent_index=[]):
# propagate our flag that indicates an empty list at the bottom
if index[-1] is None:
first_index = index
- return first_index, max_arr_ndim
+
+ return first_index, max_arr_ndim, final_size
elif type(arrays) is list and len(arrays) == 0:
# We've 'bottomed out' on an empty list
- return parent_index + [None], 0
+ return parent_index + [None], 0, 0
else:
# We've 'bottomed out' - arrays is either a scalar or an array
- return parent_index, _nx.ndim(arrays)
+ size = _size(arrays)
+ return parent_index, _ndim(arrays), size
def _atleast_nd(a, ndim):
@@ -443,9 +531,132 @@ def _atleast_nd(a, ndim):
return array(a, ndmin=ndim, copy=False, subok=True)
+def _accumulate(values):
+ # Helper function because Python 2.7 doesn't have
+ # itertools.accumulate
+ value = 0
+ accumulated = []
+ for v in values:
+ value += v
+ accumulated.append(value)
+ return accumulated
+
+
+def _concatenate_shapes(shapes, axis):
+ """Given array shapes, return the resulting shape and slices prefixes.
+
+ These help in nested concatation.
+ Returns
+ -------
+ shape: tuple of int
+ This tuple satisfies:
+ ```
+ shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
+ shape == concatenate(arrs, axis).shape
+ ```
+
+ slice_prefixes: tuple of (slice(start, end), )
+ For a list of arrays being concatenated, this returns the slice
+ in the larger array at axis that needs to be sliced into.
+
+ For example, the following holds:
+ ```
+ ret = concatenate([a, b, c], axis)
+ _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
+
+ ret[(slice(None),) * axis + sl_a] == a
+ ret[(slice(None),) * axis + sl_b] == b
+ ret[(slice(None),) * axis + sl_c] == c
+ ```
+
+ These are called slice prefixes since they are used in the recursive
+ blocking algorithm to compute the left-most slices during the
+ recursion. Therefore, they must be prepended to rest of the slice
+ that was computed deeper in the recursion.
+
+ These are returned as tuples to ensure that they can quickly be added
+ to existing slice tuple without creating a new tuple everytime.
+
+ """
+ # Cache a result that will be reused.
+ shape_at_axis = [shape[axis] for shape in shapes]
+
+ # Take a shape, any shape
+ first_shape = shapes[0]
+ first_shape_pre = first_shape[:axis]
+ first_shape_post = first_shape[axis+1:]
+
+ if any(shape[:axis] != first_shape_pre or
+ shape[axis+1:] != first_shape_post for shape in shapes):
+ raise ValueError(
+ 'Mismatched array shapes in block along axis {}.'.format(axis))
+
+ shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
+
+ offsets_at_axis = _accumulate(shape_at_axis)
+ slice_prefixes = [(slice(start, end),)
+ for start, end in zip([0] + offsets_at_axis,
+ offsets_at_axis)]
+ return shape, slice_prefixes
+
+
+def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
+ """
+ Returns the shape of the final array, along with a list
+ of slices and a list of arrays that can be used for assignment inside the
+ new array
+
+ Parameters
+ ----------
+ arrays : nested list of arrays
+ The arrays to check
+ max_depth : list of int
+ The number of nested lists
+ result_ndim: int
+ The number of dimensions in thefinal array.
+
+ Returns
+ -------
+ shape : tuple of int
+ The shape that the final array will take on.
+ slices: list of tuple of slices
+ The slices into the full array required for assignment. These are
+ required to be prepended with ``(Ellipsis, )`` to obtain to correct
+ final index.
+ arrays: list of ndarray
+ The data to assign to each slice of the full array
+
+ """
+ if depth < max_depth:
+ shapes, slices, arrays = zip(
+ *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
+ for arr in arrays])
+
+ axis = result_ndim - max_depth + depth
+ shape, slice_prefixes = _concatenate_shapes(shapes, axis)
+
+ # Prepend the slice prefix and flatten the slices
+ slices = [slice_prefix + the_slice
+ for slice_prefix, inner_slices in zip(slice_prefixes, slices)
+ for the_slice in inner_slices]
+
+ # Flatten the array list
+ arrays = functools.reduce(operator.add, arrays)
+
+ return shape, slices, arrays
+ else:
+ # We've 'bottomed out' - arrays is either a scalar or an array
+ # type(arrays) is not list
+ # Return the slice and the array inside a list to be consistent with
+ # the recursive case.
+ arr = _atleast_nd(arrays, result_ndim)
+ return arr.shape, [()], [arr]
+
+
def _block(arrays, max_depth, result_ndim, depth=0):
"""
- Internal implementation of block. `arrays` is the argument passed to
+ Internal implementation of block based on repeated concatenation.
+ `arrays` is the argument passed to
block. `max_depth` is the depth of nested lists within `arrays` and
`result_ndim` is the greatest of the dimensions of the arrays in
`arrays` and the depth of the lists in `arrays` (see block docstring
@@ -454,13 +665,26 @@ def _block(arrays, max_depth, result_ndim, depth=0):
if depth < max_depth:
arrs = [_block(arr, max_depth, result_ndim, depth+1)
for arr in arrays]
- return _nx.concatenate(arrs, axis=-(max_depth-depth))
+ return _concatenate(arrs, axis=-(max_depth-depth))
else:
# We've 'bottomed out' - arrays is either a scalar or an array
# type(arrays) is not list
return _atleast_nd(arrays, result_ndim)
+def _block_dispatcher(arrays):
+ # Use type(...) is list to match the behavior of np.block(), which special
+ # cases list specifically rather than allowing for generic iterables or
+ # tuple. Also, we know that list.__array_function__ will never exist.
+ if type(arrays) is list:
+ for subarrays in arrays:
+ for subarray in _block_dispatcher(subarrays):
+ yield subarray
+ else:
+ yield arrays
+
+
+@array_function_dispatch(_block_dispatcher)
def block(arrays):
"""
Assemble an nd-array from nested lists of blocks.
@@ -555,11 +779,11 @@ def block(arrays):
... [A, np.zeros((2, 3))],
... [np.ones((3, 2)), B ]
... ])
- array([[ 2., 0., 0., 0., 0.],
- [ 0., 2., 0., 0., 0.],
- [ 1., 1., 3., 0., 0.],
- [ 1., 1., 0., 3., 0.],
- [ 1., 1., 0., 0., 3.]])
+ array([[2., 0., 0., 0., 0.],
+ [0., 2., 0., 0., 0.],
+ [1., 1., 3., 0., 0.],
+ [1., 1., 0., 3., 0.],
+ [1., 1., 0., 0., 3.]])
With a list of depth 1, `block` can be used as `hstack`
@@ -569,7 +793,7 @@ def block(arrays):
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.block([a, b, 10]) # hstack([a, b, 10])
- array([1, 2, 3, 2, 3, 4, 10])
+ array([ 1, 2, 3, 2, 3, 4, 10])
>>> A = np.ones((2, 2), int)
>>> B = 2 * A
@@ -609,7 +833,38 @@ def block(arrays):
"""
- bottom_index, arr_ndim = _block_check_depths_match(arrays)
+ arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
+
+ # It was found through benchmarking that making an array of final size
+ # around 256x256 was faster by straight concatenation on a
+ # i7-7700HQ processor and dual channel ram 2400MHz.
+ # It didn't seem to matter heavily on the dtype used.
+ #
+ # A 2D array using repeated concatenation requires 2 copies of the array.
+ #
+ # The fastest algorithm will depend on the ratio of CPU power to memory
+ # speed.
+ # One can monitor the results of the benchmark
+ # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
+ # to tune this parameter until a C version of the `_block_info_recursion`
+ # algorithm is implemented which would likely be faster than the python
+ # version.
+ if list_ndim * final_size > (2 * 512 * 512):
+ return _block_slicing(arrays, list_ndim, result_ndim)
+ else:
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+
+# These helper functions are mostly used for testing.
+# They allow us to write tests that directly call `_block_slicing`
+# or `_block_concatenate` without blocking large arrays to force the wisdom
+# to trigger the desired path.
+def _block_setup(arrays):
+ """
+ Returns
+ (`arrays`, list_ndim, result_ndim, final_size)
+ """
+ bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
list_ndim = len(bottom_index)
if bottom_index and bottom_index[-1] is None:
raise ValueError(
@@ -617,7 +872,31 @@ def block(arrays):
_block_format_index(bottom_index)
)
)
- result = _block(arrays, list_ndim, max(arr_ndim, list_ndim))
+ result_ndim = max(arr_ndim, list_ndim)
+ return arrays, list_ndim, result_ndim, final_size
+
+
+def _block_slicing(arrays, list_ndim, result_ndim):
+ shape, slices, arrays = _block_info_recursion(
+ arrays, list_ndim, result_ndim)
+ dtype = _nx.result_type(*[arr.dtype for arr in arrays])
+
+ # Test preferring F only in the case that all input arrays are F
+ F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
+ C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
+ order = 'F' if F_order and not C_order else 'C'
+ result = _nx.empty(shape=shape, dtype=dtype, order=order)
+ # Note: In a c implementation, the function
+ # PyArray_CreateMultiSortedStridePerm could be used for more advanced
+ # guessing of the desired order.
+
+ for the_slice, arr in zip(slices, arrays):
+ result[(Ellipsis,) + the_slice] = arr
+ return result
+
+
+def _block_concatenate(arrays, list_ndim, result_ndim):
+ result = _block(arrays, list_ndim, result_ndim)
if list_ndim == 0:
# Catch an edge case where _block returns a view because
# `arrays` is a single numpy array and not a list of numpy arrays.
diff --git a/numpy/core/src/common/array_assign.c b/numpy/core/src/common/array_assign.c
index ac3fdbef7..0ac1b01c6 100644
--- a/numpy/core/src/common/array_assign.c
+++ b/numpy/core/src/common/array_assign.c
@@ -79,7 +79,7 @@ broadcast_error: {
Py_DECREF(errmsg);
return -1;
- }
+ }
}
/* See array_assign.h for parameter documentation */
@@ -125,9 +125,13 @@ raw_array_is_aligned(int ndim, npy_intp *shape,
return npy_is_aligned((void *)align_check, alignment);
}
- else {
+ else if (alignment == 1) {
return 1;
}
+ else {
+ /* always return false for alignment == 0, which means cannot-be-aligned */
+ return 0;
+ }
}
NPY_NO_EXPORT int
diff --git a/numpy/core/src/common/array_assign.h b/numpy/core/src/common/array_assign.h
index 07438c5e8..69ef56bb4 100644
--- a/numpy/core/src/common/array_assign.h
+++ b/numpy/core/src/common/array_assign.h
@@ -87,8 +87,10 @@ broadcast_strides(int ndim, npy_intp *shape,
/*
* Checks whether a data pointer + set of strides refers to a raw
- * array whose elements are all aligned to a given alignment.
- * alignment should be a power of two.
+ * array whose elements are all aligned to a given alignment. Returns
+ * 1 if data is aligned to alignment or 0 if not.
+ * alignment should be a power of two, or may be the sentinel value 0 to mean
+ * cannot-be-aligned, in which case 0 (false) is always returned.
*/
NPY_NO_EXPORT int
raw_array_is_aligned(int ndim, npy_intp *shape,
diff --git a/numpy/core/src/common/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c
index 6460c5db1..39572fed4 100644
--- a/numpy/core/src/common/cblasfuncs.c
+++ b/numpy/core/src/common/cblasfuncs.c
@@ -182,12 +182,13 @@ _select_matrix_shape(PyArrayObject *array)
* This also makes sure that the data segment is aligned with
* an itemsize address as well by returning one if not true.
*/
-static int
+NPY_NO_EXPORT int
_bad_strides(PyArrayObject *ap)
{
int itemsize = PyArray_ITEMSIZE(ap);
int i, N=PyArray_NDIM(ap);
npy_intp *strides = PyArray_STRIDES(ap);
+ npy_intp *dims = PyArray_DIMS(ap);
if (((npy_intp)(PyArray_DATA(ap)) % itemsize) != 0) {
return 1;
@@ -196,12 +197,14 @@ _bad_strides(PyArrayObject *ap)
if ((strides[i] < 0) || (strides[i] % itemsize) != 0) {
return 1;
}
+ if ((strides[i] == 0 && dims[i] > 1)) {
+ return 1;
+ }
}
return 0;
}
-
/*
* dot(a,b)
* Returns the dot product of a and b for arrays of floating point types.
diff --git a/numpy/core/src/common/get_attr_string.h b/numpy/core/src/common/get_attr_string.h
index bec87c5ed..d458d9550 100644
--- a/numpy/core/src/common/get_attr_string.h
+++ b/numpy/core/src/common/get_attr_string.h
@@ -103,7 +103,6 @@ PyArray_LookupSpecial(PyObject *obj, char *name)
if (_is_basic_python_type(tp)) {
return NULL;
}
-
return maybe_get_attr((PyObject *)tp, name);
}
diff --git a/numpy/core/src/common/lowlevel_strided_loops.h b/numpy/core/src/common/lowlevel_strided_loops.h
index 5f139cffb..bacd27473 100644
--- a/numpy/core/src/common/lowlevel_strided_loops.h
+++ b/numpy/core/src/common/lowlevel_strided_loops.h
@@ -4,6 +4,9 @@
#include <npy_config.h>
#include "mem_overlap.h"
+/* For PyArray_ macros used below */
+#include "numpy/ndarrayobject.h"
+
/*
* NOTE: This API should remain private for the time being, to allow
* for further refinement. I think the 'aligned' mechanism
diff --git a/numpy/core/src/common/npy_ctypes.h b/numpy/core/src/common/npy_ctypes.h
new file mode 100644
index 000000000..c0cc4f1a1
--- /dev/null
+++ b/numpy/core/src/common/npy_ctypes.h
@@ -0,0 +1,50 @@
+#ifndef NPY_CTYPES_H
+#define NPY_CTYPES_H
+
+#include <Python.h>
+
+#include "npy_import.h"
+
+/*
+ * Check if a python type is a ctypes class.
+ *
+ * Works like the Py<type>_Check functions, returning true if the argument
+ * looks like a ctypes object.
+ *
+ * This entire function is just a wrapper around the Python function of the
+ * same name.
+ */
+NPY_INLINE static int
+npy_ctypes_check(PyTypeObject *obj)
+{
+ static PyObject *py_func = NULL;
+ PyObject *ret_obj;
+ int ret;
+
+ npy_cache_import("numpy.core._internal", "npy_ctypes_check", &py_func);
+ if (py_func == NULL) {
+ goto fail;
+ }
+
+ ret_obj = PyObject_CallFunctionObjArgs(py_func, (PyObject *)obj, NULL);
+ if (ret_obj == NULL) {
+ goto fail;
+ }
+
+ ret = PyObject_IsTrue(ret_obj);
+ Py_DECREF(ret_obj);
+ if (ret == -1) {
+ goto fail;
+ }
+
+ return ret;
+
+fail:
+ /* If the above fails, then we should just assume that the type is not from
+ * ctypes
+ */
+ PyErr_Clear();
+ return 0;
+}
+
+#endif
diff --git a/numpy/core/src/common/npy_longdouble.c b/numpy/core/src/common/npy_longdouble.c
index 508fbceac..c580e0cce 100644
--- a/numpy/core/src/common/npy_longdouble.c
+++ b/numpy/core/src/common/npy_longdouble.c
@@ -1,17 +1,12 @@
#include <Python.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
#include "numpy/ndarraytypes.h"
#include "numpy/npy_math.h"
-
-/* This is a backport of Py_SETREF */
-#define NPY_SETREF(op, op2) \
- do { \
- PyObject *_py_tmp = (PyObject *)(op); \
- (op) = (op2); \
- Py_DECREF(_py_tmp); \
- } while (0)
-
+#include "npy_pycompat.h"
+#include "numpyos.h"
/*
* Heavily derived from PyLong_FromDouble
@@ -66,7 +61,7 @@ npy_longdouble_to_PyLong(npy_longdouble ldval)
npy_ulonglong chunk = (npy_ulonglong)frac;
PyObject *l_chunk;
/* v = v << chunk_size */
- NPY_SETREF(v, PyNumber_Lshift(v, l_chunk_size));
+ Py_SETREF(v, PyNumber_Lshift(v, l_chunk_size));
if (v == NULL) {
goto done;
}
@@ -77,7 +72,7 @@ npy_longdouble_to_PyLong(npy_longdouble ldval)
goto done;
}
/* v = v | chunk */
- NPY_SETREF(v, PyNumber_Or(v, l_chunk));
+ Py_SETREF(v, PyNumber_Or(v, l_chunk));
Py_DECREF(l_chunk);
if (v == NULL) {
goto done;
@@ -90,7 +85,7 @@ npy_longdouble_to_PyLong(npy_longdouble ldval)
/* v = -v */
if (neg) {
- NPY_SETREF(v, PyNumber_Negative(v));
+ Py_SETREF(v, PyNumber_Negative(v));
if (v == NULL) {
goto done;
}
@@ -100,3 +95,84 @@ done:
Py_DECREF(l_chunk_size);
return v;
}
+
+/* Helper function to get unicode(PyLong).encode('utf8') */
+static PyObject *
+_PyLong_Bytes(PyObject *long_obj) {
+ PyObject *bytes;
+#if defined(NPY_PY3K)
+ PyObject *unicode = PyObject_Str(long_obj);
+ if (unicode == NULL) {
+ return NULL;
+ }
+ bytes = PyUnicode_AsUTF8String(unicode);
+ Py_DECREF(unicode);
+#else
+ bytes = PyObject_Str(long_obj);
+#endif
+ return bytes;
+}
+
+
+/**
+ * TODO: currently a hack that converts the long through a string. This is
+ * correct, but slow.
+ *
+ * Another approach would be to do this numerically, in a similar way to
+ * PyLong_AsDouble.
+ * However, in order to respect rounding modes correctly, this needs to know
+ * the size of the mantissa, which is platform-dependent.
+ */
+NPY_VISIBILITY_HIDDEN npy_longdouble
+npy_longdouble_from_PyLong(PyObject *long_obj) {
+ npy_longdouble result = 1234;
+ char *end;
+ char *cstr;
+ PyObject *bytes;
+
+ /* convert the long to a string */
+ bytes = _PyLong_Bytes(long_obj);
+ if (bytes == NULL) {
+ return -1;
+ }
+
+ cstr = PyBytes_AsString(bytes);
+ if (cstr == NULL) {
+ goto fail;
+ }
+ end = NULL;
+
+ /* convert the string to a long double and capture errors */
+ errno = 0;
+ result = NumPyOS_ascii_strtold(cstr, &end);
+ if (errno == ERANGE) {
+ /* strtold returns INFINITY of the correct sign. */
+ if (PyErr_Warn(PyExc_RuntimeWarning,
+ "overflow encountered in conversion from python long") < 0) {
+ goto fail;
+ }
+ }
+ else if (errno) {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not parse python long as longdouble: %s (%s)",
+ cstr,
+ strerror(errno));
+ goto fail;
+ }
+
+ /* Extra characters at the end of the string, or nothing parsed */
+ if (end == cstr || *end != '\0') {
+ PyErr_Format(PyExc_RuntimeError,
+ "Could not parse long as longdouble: %s",
+ cstr);
+ goto fail;
+ }
+
+ /* finally safe to decref now that we're done with `end` */
+ Py_DECREF(bytes);
+ return result;
+
+fail:
+ Py_DECREF(bytes);
+ return -1;
+}
diff --git a/numpy/core/src/common/npy_longdouble.h b/numpy/core/src/common/npy_longdouble.h
index 036b53070..01db06de7 100644
--- a/numpy/core/src/common/npy_longdouble.h
+++ b/numpy/core/src/common/npy_longdouble.h
@@ -14,4 +14,14 @@
NPY_VISIBILITY_HIDDEN PyObject *
npy_longdouble_to_PyLong(npy_longdouble ldval);
+/* Convert a python `long` integer to a npy_longdouble
+ *
+ * This performs the same task as PyLong_AsDouble, but for long doubles
+ * which have a greater range.
+ *
+ * Returns -1 if an error occurs.
+ */
+NPY_VISIBILITY_HIDDEN npy_longdouble
+npy_longdouble_from_PyLong(PyObject *long_obj);
+
#endif
diff --git a/numpy/core/src/common/npy_partition.h.src b/numpy/core/src/common/npy_partition.h.src
index a22cf911c..97dc2536b 100644
--- a/numpy/core/src/common/npy_partition.h.src
+++ b/numpy/core/src/common/npy_partition.h.src
@@ -113,9 +113,6 @@ get_argpartition_func(int type, NPY_SELECTKIND which)
npy_intp i;
npy_intp ntypes = ARRAY_SIZE(_part_map);
- if (which >= NPY_NSELECTS) {
- return NULL;
- }
for (i = 0; i < ntypes; i++) {
if (type == _part_map[i].typenum) {
return _part_map[i].argpart[which];
diff --git a/numpy/core/src/common/npy_sort.h b/numpy/core/src/common/npy_sort.h
deleted file mode 100644
index 8c6f05623..000000000
--- a/numpy/core/src/common/npy_sort.h
+++ /dev/null
@@ -1,204 +0,0 @@
-#ifndef __NPY_SORT_H__
-#define __NPY_SORT_H__
-
-/* Python include is for future object sorts */
-#include <Python.h>
-#include <numpy/npy_common.h>
-#include <numpy/ndarraytypes.h>
-
-#define NPY_ENOMEM 1
-#define NPY_ECOMP 2
-
-static NPY_INLINE int npy_get_msb(npy_uintp unum)
-{
- int depth_limit = 0;
- while (unum >>= 1) {
- depth_limit++;
- }
- return depth_limit;
-}
-
-int quicksort_bool(void *vec, npy_intp cnt, void *null);
-int heapsort_bool(void *vec, npy_intp cnt, void *null);
-int mergesort_bool(void *vec, npy_intp cnt, void *null);
-int aquicksort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_bool(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_byte(void *vec, npy_intp cnt, void *null);
-int heapsort_byte(void *vec, npy_intp cnt, void *null);
-int mergesort_byte(void *vec, npy_intp cnt, void *null);
-int aquicksort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_byte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ubyte(void *vec, npy_intp cnt, void *null);
-int heapsort_ubyte(void *vec, npy_intp cnt, void *null);
-int mergesort_ubyte(void *vec, npy_intp cnt, void *null);
-int aquicksort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ubyte(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_short(void *vec, npy_intp cnt, void *null);
-int heapsort_short(void *vec, npy_intp cnt, void *null);
-int mergesort_short(void *vec, npy_intp cnt, void *null);
-int aquicksort_short(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_short(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_short(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ushort(void *vec, npy_intp cnt, void *null);
-int heapsort_ushort(void *vec, npy_intp cnt, void *null);
-int mergesort_ushort(void *vec, npy_intp cnt, void *null);
-int aquicksort_ushort(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ushort(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ushort(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_int(void *vec, npy_intp cnt, void *null);
-int heapsort_int(void *vec, npy_intp cnt, void *null);
-int mergesort_int(void *vec, npy_intp cnt, void *null);
-int aquicksort_int(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_int(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_int(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_uint(void *vec, npy_intp cnt, void *null);
-int heapsort_uint(void *vec, npy_intp cnt, void *null);
-int mergesort_uint(void *vec, npy_intp cnt, void *null);
-int aquicksort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_uint(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_long(void *vec, npy_intp cnt, void *null);
-int heapsort_long(void *vec, npy_intp cnt, void *null);
-int mergesort_long(void *vec, npy_intp cnt, void *null);
-int aquicksort_long(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_long(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_long(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ulong(void *vec, npy_intp cnt, void *null);
-int heapsort_ulong(void *vec, npy_intp cnt, void *null);
-int mergesort_ulong(void *vec, npy_intp cnt, void *null);
-int aquicksort_ulong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ulong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ulong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_longlong(void *vec, npy_intp cnt, void *null);
-int heapsort_longlong(void *vec, npy_intp cnt, void *null);
-int mergesort_longlong(void *vec, npy_intp cnt, void *null);
-int aquicksort_longlong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_longlong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_longlong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_ulonglong(void *vec, npy_intp cnt, void *null);
-int heapsort_ulonglong(void *vec, npy_intp cnt, void *null);
-int mergesort_ulonglong(void *vec, npy_intp cnt, void *null);
-int aquicksort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_ulonglong(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_half(void *vec, npy_intp cnt, void *null);
-int heapsort_half(void *vec, npy_intp cnt, void *null);
-int mergesort_half(void *vec, npy_intp cnt, void *null);
-int aquicksort_half(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_half(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_half(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_float(void *vec, npy_intp cnt, void *null);
-int heapsort_float(void *vec, npy_intp cnt, void *null);
-int mergesort_float(void *vec, npy_intp cnt, void *null);
-int aquicksort_float(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_float(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_float(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_double(void *vec, npy_intp cnt, void *null);
-int heapsort_double(void *vec, npy_intp cnt, void *null);
-int mergesort_double(void *vec, npy_intp cnt, void *null);
-int aquicksort_double(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_double(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_double(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_longdouble(void *vec, npy_intp cnt, void *null);
-int heapsort_longdouble(void *vec, npy_intp cnt, void *null);
-int mergesort_longdouble(void *vec, npy_intp cnt, void *null);
-int aquicksort_longdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_longdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_longdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_cfloat(void *vec, npy_intp cnt, void *null);
-int heapsort_cfloat(void *vec, npy_intp cnt, void *null);
-int mergesort_cfloat(void *vec, npy_intp cnt, void *null);
-int aquicksort_cfloat(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_cfloat(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_cfloat(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_cdouble(void *vec, npy_intp cnt, void *null);
-int heapsort_cdouble(void *vec, npy_intp cnt, void *null);
-int mergesort_cdouble(void *vec, npy_intp cnt, void *null);
-int aquicksort_cdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_cdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_cdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_clongdouble(void *vec, npy_intp cnt, void *null);
-int heapsort_clongdouble(void *vec, npy_intp cnt, void *null);
-int mergesort_clongdouble(void *vec, npy_intp cnt, void *null);
-int aquicksort_clongdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_clongdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_clongdouble(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_string(void *vec, npy_intp cnt, void *arr);
-int heapsort_string(void *vec, npy_intp cnt, void *arr);
-int mergesort_string(void *vec, npy_intp cnt, void *arr);
-int aquicksort_string(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int aheapsort_string(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int amergesort_string(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-
-
-int quicksort_unicode(void *vec, npy_intp cnt, void *arr);
-int heapsort_unicode(void *vec, npy_intp cnt, void *arr);
-int mergesort_unicode(void *vec, npy_intp cnt, void *arr);
-int aquicksort_unicode(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int aheapsort_unicode(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int amergesort_unicode(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-
-
-int quicksort_datetime(void *vec, npy_intp cnt, void *null);
-int heapsort_datetime(void *vec, npy_intp cnt, void *null);
-int mergesort_datetime(void *vec, npy_intp cnt, void *null);
-int aquicksort_datetime(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_datetime(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_datetime(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int quicksort_timedelta(void *vec, npy_intp cnt, void *null);
-int heapsort_timedelta(void *vec, npy_intp cnt, void *null);
-int mergesort_timedelta(void *vec, npy_intp cnt, void *null);
-int aquicksort_timedelta(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int aheapsort_timedelta(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-int amergesort_timedelta(void *vec, npy_intp *ind, npy_intp cnt, void *null);
-
-
-int npy_quicksort(void *vec, npy_intp cnt, void *arr);
-int npy_heapsort(void *vec, npy_intp cnt, void *arr);
-int npy_mergesort(void *vec, npy_intp cnt, void *arr);
-int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
-
-#endif
diff --git a/numpy/core/src/common/npy_sort.h.src b/numpy/core/src/common/npy_sort.h.src
new file mode 100644
index 000000000..16a105499
--- /dev/null
+++ b/numpy/core/src/common/npy_sort.h.src
@@ -0,0 +1,100 @@
+#ifndef __NPY_SORT_H__
+#define __NPY_SORT_H__
+
+/* Python include is for future object sorts */
+#include <Python.h>
+#include <numpy/npy_common.h>
+#include <numpy/ndarraytypes.h>
+
+#define NPY_ENOMEM 1
+#define NPY_ECOMP 2
+
+static NPY_INLINE int npy_get_msb(npy_uintp unum)
+{
+ int depth_limit = 0;
+ while (unum >>= 1) {
+ depth_limit++;
+ }
+ return depth_limit;
+}
+
+
+/*
+ *****************************************************************************
+ ** NUMERIC SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble, datetime, timedelta#
+ */
+
+int quicksort_@suff@(void *vec, npy_intp cnt, void *null);
+int heapsort_@suff@(void *vec, npy_intp cnt, void *null);
+int mergesort_@suff@(void *vec, npy_intp cnt, void *null);
+int timsort_@suff@(void *vec, npy_intp cnt, void *null);
+int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+
+/**end repeat**/
+
+/**begin repeat
+ *
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong#
+ */
+
+int radixsort_@suff@(void *vec, npy_intp cnt, void *null);
+int aradixsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *null);
+
+/**end repeat**/
+
+
+
+/*
+ *****************************************************************************
+ ** STRING SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #suff = string, unicode#
+ */
+
+int quicksort_@suff@(void *vec, npy_intp cnt, void *arr);
+int heapsort_@suff@(void *vec, npy_intp cnt, void *arr);
+int mergesort_@suff@(void *vec, npy_intp cnt, void *arr);
+int timsort_@suff@(void *vec, npy_intp cnt, void *arr);
+int aquicksort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int aheapsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int amergesort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int atimsort_@suff@(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+
+/**end repeat**/
+
+
+/*
+ *****************************************************************************
+ ** GENERIC SORT **
+ *****************************************************************************
+ */
+
+
+int npy_quicksort(void *vec, npy_intp cnt, void *arr);
+int npy_heapsort(void *vec, npy_intp cnt, void *arr);
+int npy_mergesort(void *vec, npy_intp cnt, void *arr);
+int npy_timsort(void *vec, npy_intp cnt, void *arr);
+int npy_aquicksort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int npy_aheapsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int npy_amergesort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+int npy_atimsort(void *vec, npy_intp *ind, npy_intp cnt, void *arr);
+
+#endif
diff --git a/numpy/core/src/multiarray/numpyos.c b/numpy/core/src/common/numpyos.c
index 52dcbf3c8..d60b1ca17 100644
--- a/numpy/core/src/multiarray/numpyos.c
+++ b/numpy/core/src/common/numpyos.c
@@ -769,3 +769,31 @@ NumPyOS_ascii_ftoLf(FILE *fp, long double *value)
}
return r;
}
+
+NPY_NO_EXPORT npy_longlong
+NumPyOS_strtoll(const char *str, char **endptr, int base)
+{
+#if defined HAVE_STRTOLL
+ return strtoll(str, endptr, base);
+#elif defined _MSC_VER
+ return _strtoi64(str, endptr, base);
+#else
+ /* ok on 64 bit posix */
+ return PyOS_strtol(str, endptr, base);
+#endif
+}
+
+NPY_NO_EXPORT npy_ulonglong
+NumPyOS_strtoull(const char *str, char **endptr, int base)
+{
+#if defined HAVE_STRTOULL
+ return strtoull(str, endptr, base);
+#elif defined _MSC_VER
+ return _strtoui64(str, endptr, base);
+#else
+ /* ok on 64 bit posix */
+ return PyOS_strtoul(str, endptr, base);
+#endif
+}
+
+
diff --git a/numpy/core/src/multiarray/numpyos.h b/numpy/core/src/common/numpyos.h
index 7ca795a6f..4deed8400 100644
--- a/numpy/core/src/multiarray/numpyos.h
+++ b/numpy/core/src/common/numpyos.h
@@ -31,4 +31,11 @@ NumPyOS_ascii_ftoLf(FILE *fp, long double *value);
NPY_NO_EXPORT int
NumPyOS_ascii_isspace(int c);
+/* Convert a string to an int in an arbitrary base */
+NPY_NO_EXPORT npy_longlong
+NumPyOS_strtoll(const char *str, char **endptr, int base);
+
+/* Convert a string to an int in an arbitrary base */
+NPY_NO_EXPORT npy_ulonglong
+NumPyOS_strtoull(const char *str, char **endptr, int base);
#endif
diff --git a/numpy/core/src/common/ufunc_override.c b/numpy/core/src/common/ufunc_override.c
index 33b54c665..89f08a9cb 100644
--- a/numpy/core/src/common/ufunc_override.c
+++ b/numpy/core/src/common/ufunc_override.c
@@ -1,10 +1,9 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-#define NO_IMPORT_ARRAY
+#define _MULTIARRAYMODULE
#include "npy_pycompat.h"
#include "get_attr_string.h"
#include "npy_import.h"
-
#include "ufunc_override.h"
/*
@@ -12,45 +11,39 @@
* is not the default, i.e., the object is not an ndarray, and its
* __array_ufunc__ is not the same as that of ndarray.
*
- * Returns a new reference, the value of type(obj).__array_ufunc__
- *
- * If the __array_ufunc__ matches that of ndarray, or does not exist, return
- * NULL.
- *
- * Note that since this module is used with both multiarray and umath, we do
- * not have access to PyArray_Type and therewith neither to PyArray_CheckExact
- * nor to the default __array_ufunc__ method, so instead we import locally.
- * TODO: Can this really not be done more smartly?
+ * Returns a new reference, the value of type(obj).__array_ufunc__ if it
+ * exists and is different from that of ndarray, and NULL otherwise.
*/
NPY_NO_EXPORT PyObject *
-get_non_default_array_ufunc(PyObject *obj)
+PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj)
{
- static PyObject *ndarray = NULL;
static PyObject *ndarray_array_ufunc = NULL;
PyObject *cls_array_ufunc;
- /* on first entry, import and cache ndarray and its __array_ufunc__ */
- if (ndarray == NULL) {
- npy_cache_import("numpy.core.multiarray", "ndarray", &ndarray);
- ndarray_array_ufunc = PyObject_GetAttrString(ndarray,
+ /* On first entry, cache ndarray's __array_ufunc__ */
+ if (ndarray_array_ufunc == NULL) {
+ ndarray_array_ufunc = PyObject_GetAttrString((PyObject *)&PyArray_Type,
"__array_ufunc__");
}
/* Fast return for ndarray */
- if ((PyObject *)Py_TYPE(obj) == ndarray) {
+ if (PyArray_CheckExact(obj)) {
return NULL;
}
- /* does the class define __array_ufunc__? */
+ /*
+ * Does the class define __array_ufunc__? (Note that LookupSpecial has fast
+ * return for basic python types, so no need to worry about those here)
+ */
cls_array_ufunc = PyArray_LookupSpecial(obj, "__array_ufunc__");
if (cls_array_ufunc == NULL) {
return NULL;
}
- /* is it different from ndarray.__array_ufunc__? */
- if (cls_array_ufunc != ndarray_array_ufunc) {
- return cls_array_ufunc;
+ /* Ignore if the same as ndarray.__array_ufunc__ */
+ if (cls_array_ufunc == ndarray_array_ufunc) {
+ Py_DECREF(cls_array_ufunc);
+ return NULL;
}
- Py_DECREF(cls_array_ufunc);
- return NULL;
+ return cls_array_ufunc;
}
/*
@@ -62,9 +55,9 @@ get_non_default_array_ufunc(PyObject *obj)
*/
NPY_NO_EXPORT int
-has_non_default_array_ufunc(PyObject * obj)
+PyUFunc_HasOverride(PyObject * obj)
{
- PyObject *method = get_non_default_array_ufunc(obj);
+ PyObject *method = PyUFuncOverride_GetNonDefaultArrayUfunc(obj);
if (method) {
Py_DECREF(method);
return 1;
@@ -78,164 +71,51 @@ has_non_default_array_ufunc(PyObject * obj)
* Get possible out argument from kwds, and returns the number of outputs
* contained within it: if a tuple, the number of elements in it, 1 otherwise.
* The out argument itself is returned in out_kwd_obj, and the outputs
- * in the out_obj array (all as borrowed references).
+ * in the out_obj array (as borrowed references).
*
- * Returns -1 if kwds is not a dict, 0 if no outputs found.
+ * Returns 0 if no outputs found, -1 if kwds is not a dict (with an error set).
*/
-static int
-get_out_objects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs)
+NPY_NO_EXPORT int
+PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs)
{
if (kwds == NULL) {
+ Py_INCREF(Py_None);
+ *out_kwd_obj = Py_None;
return 0;
}
if (!PyDict_CheckExact(kwds)) {
PyErr_SetString(PyExc_TypeError,
- "Internal Numpy error: call to PyUFunc_WithOverride "
+ "Internal Numpy error: call to PyUFuncOverride_GetOutObjects "
"with non-dict kwds");
+ *out_kwd_obj = NULL;
return -1;
}
/* borrowed reference */
*out_kwd_obj = PyDict_GetItemString(kwds, "out");
if (*out_kwd_obj == NULL) {
+ Py_INCREF(Py_None);
+ *out_kwd_obj = Py_None;
return 0;
}
if (PyTuple_CheckExact(*out_kwd_obj)) {
- *out_objs = PySequence_Fast_ITEMS(*out_kwd_obj);
- return PySequence_Fast_GET_SIZE(*out_kwd_obj);
- }
- else {
- *out_objs = out_kwd_obj;
- return 1;
- }
-}
-
-/*
- * For each positional argument and each argument in a possible "out"
- * keyword, look for overrides of the standard ufunc behaviour, i.e.,
- * non-default __array_ufunc__ methods.
- *
- * Returns the number of overrides, setting corresponding objects
- * in PyObject array ``with_override`` and the corresponding
- * __array_ufunc__ methods in ``methods`` (both using new references).
- *
- * Only the first override for a given class is returned.
- *
- * returns -1 on failure.
- */
-NPY_NO_EXPORT int
-PyUFunc_WithOverride(PyObject *args, PyObject *kwds,
- PyObject **with_override, PyObject **methods)
-{
- int i;
- int num_override_args = 0;
- int narg, nout = 0;
- PyObject *out_kwd_obj;
- PyObject **arg_objs, **out_objs;
-
- narg = PyTuple_Size(args);
- if (narg < 0) {
- return -1;
- }
- arg_objs = PySequence_Fast_ITEMS(args);
-
- nout = get_out_objects(kwds, &out_kwd_obj, &out_objs);
- if (nout < 0) {
- return -1;
- }
-
- for (i = 0; i < narg + nout; ++i) {
- PyObject *obj;
- int j;
- int new_class = 1;
-
- if (i < narg) {
- obj = arg_objs[i];
- }
- else {
- obj = out_objs[i - narg];
- }
/*
- * Have we seen this class before? If so, ignore.
+ * The C-API recommends calling PySequence_Fast before any of the other
+ * PySequence_Fast* functions. This is required for PyPy
*/
- for (j = 0; j < num_override_args; j++) {
- new_class = (Py_TYPE(obj) != Py_TYPE(with_override[j]));
- if (!new_class) {
- break;
- }
- }
- if (new_class) {
- /*
- * Now see if the object provides an __array_ufunc__. However, we should
- * ignore the base ndarray.__ufunc__, so we skip any ndarray as well as
- * any ndarray subclass instances that did not override __array_ufunc__.
- */
- PyObject *method = get_non_default_array_ufunc(obj);
- if (method == NULL) {
- continue;
- }
- if (method == Py_None) {
- PyErr_Format(PyExc_TypeError,
- "operand '%.200s' does not support ufuncs "
- "(__array_ufunc__=None)",
- obj->ob_type->tp_name);
- Py_DECREF(method);
- goto fail;
- }
- Py_INCREF(obj);
- with_override[num_override_args] = obj;
- methods[num_override_args] = method;
- ++num_override_args;
+ PyObject *seq;
+ seq = PySequence_Fast(*out_kwd_obj,
+ "Could not convert object to sequence");
+ if (seq == NULL) {
+ *out_kwd_obj = NULL;
+ return -1;
}
+ *out_objs = PySequence_Fast_ITEMS(seq);
+ *out_kwd_obj = seq;
+ return PySequence_Fast_GET_SIZE(seq);
}
- return num_override_args;
-
-fail:
- for (i = 0; i < num_override_args; i++) {
- Py_DECREF(with_override[i]);
- Py_DECREF(methods[i]);
- }
- return -1;
-}
-
-/*
- * Check whether any of a set of input and output args have a non-default
- * __array_ufunc__ method. Return 1 if so, 0 if not.
- *
- * This function primarily exists to help ndarray.__array_ufunc__ determine
- * whether it can support a ufunc (which is the case only if none of the
- * operands have an override). Thus, unlike in PyUFunc_CheckOverride, the
- * actual overrides are not needed and one can stop looking once one is found.
- *
- * TODO: move this function and has_non_default_array_ufunc closer to ndarray.
- */
-NPY_NO_EXPORT int
-PyUFunc_HasOverride(PyObject *args, PyObject *kwds)
-{
- int i;
- int nin, nout;
- PyObject *out_kwd_obj;
- PyObject **in_objs, **out_objs;
-
- /* check inputs */
- nin = PyTuple_Size(args);
- if (nin < 0) {
- return -1;
- }
- in_objs = PySequence_Fast_ITEMS(args);
- for (i = 0; i < nin; ++i) {
- if (has_non_default_array_ufunc(in_objs[i])) {
- return 1;
- }
- }
- /* check outputs, if any */
- nout = get_out_objects(kwds, &out_kwd_obj, &out_objs);
- if (nout < 0) {
- return -1;
- }
- for (i = 0; i < nout; i++) {
- if (has_non_default_array_ufunc(out_objs[i])) {
- return 1;
- }
+ else {
+ Py_INCREF(*out_kwd_obj);
+ *out_objs = out_kwd_obj;
+ return 1;
}
- return 0;
}
diff --git a/numpy/core/src/common/ufunc_override.h b/numpy/core/src/common/ufunc_override.h
index 5b269d270..bf86865c9 100644
--- a/numpy/core/src/common/ufunc_override.h
+++ b/numpy/core/src/common/ufunc_override.h
@@ -8,18 +8,11 @@
* is not the default, i.e., the object is not an ndarray, and its
* __array_ufunc__ is not the same as that of ndarray.
*
- * Returns a new reference, the value of type(obj).__array_ufunc__
- *
- * If the __array_ufunc__ matches that of ndarray, or does not exist, return
- * NULL.
- *
- * Note that since this module is used with both multiarray and umath, we do
- * not have access to PyArray_Type and therewith neither to PyArray_CheckExact
- * nor to the default __array_ufunc__ method, so instead we import locally.
- * TODO: Can this really not be done more smartly?
+ * Returns a new reference, the value of type(obj).__array_ufunc__ if it
+ * exists and is different from that of ndarray, and NULL otherwise.
*/
NPY_NO_EXPORT PyObject *
-get_non_default_array_ufunc(PyObject *obj);
+PyUFuncOverride_GetNonDefaultArrayUfunc(PyObject *obj);
/*
* Check whether an object has __array_ufunc__ defined on its class and it
@@ -29,18 +22,16 @@ get_non_default_array_ufunc(PyObject *obj);
* Returns 1 if this is the case, 0 if not.
*/
NPY_NO_EXPORT int
-has_non_default_array_ufunc(PyObject * obj);
+PyUFunc_HasOverride(PyObject *obj);
/*
- * Check whether a set of input and output args have a non-default
- * `__array_ufunc__` method. Returns the number of overrides, setting
- * corresponding objects in PyObject array with_override (if not NULL).
- * returns -1 on failure.
+ * Get possible out argument from kwds, and returns the number of outputs
+ * contained within it: if a tuple, the number of elements in it, 1 otherwise.
+ * The out argument itself is returned in out_kwd_obj, and the outputs
+ * in the out_obj array (as borrowed references).
+ *
+ * Returns 0 if no outputs found, -1 if kwds is not a dict (with an error set).
*/
NPY_NO_EXPORT int
-PyUFunc_WithOverride(PyObject *args, PyObject *kwds,
- PyObject **with_override, PyObject **methods);
-
-NPY_NO_EXPORT int
-PyUFunc_HasOverride(PyObject *args, PyObject *kwds);
+PyUFuncOverride_GetOutObjects(PyObject *kwds, PyObject **out_kwd_obj, PyObject ***out_objs);
#endif
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 6c4d49bd1..fa2efb428 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -11,6 +11,13 @@
#include "npy_extint128.h"
#include "common.h"
+
+#if defined(MS_WIN32) || defined(__CYGWIN__)
+#define EXPORT(x) __declspec(dllexport) x
+#else
+#define EXPORT(x) x
+#endif
+
#define ARRAY_SIZE(a) (sizeof(a)/sizeof(a[0]))
/* test PyArray_IsPythonScalar, before including private py3 compat header */
@@ -31,6 +38,12 @@ IsPythonScalar(PyObject * dummy, PyObject *args)
#include "npy_pycompat.h"
+/** Function to test calling via ctypes */
+EXPORT(void*) forward_pointer(void *x)
+{
+ return x;
+}
+
/*
* TODO:
* - Handle mode
@@ -580,6 +593,25 @@ fail:
return NULL;
}
+/*
+ * Helper to test fromstring of 0 terminated strings, as the C-API supports
+ * the -1 length identifier.
+ */
+static PyObject *
+fromstring_null_term_c_api(PyObject *dummy, PyObject *byte_obj)
+{
+ char *string;
+ PyArray_Descr *descr;
+
+ string = PyBytes_AsString(byte_obj);
+ if (string == NULL) {
+ return NULL;
+ }
+ descr = PyArray_DescrNewFromType(NPY_FLOAT64);
+ return PyArray_FromString(string, -1, descr, -1, " ");
+}
+
+
/* check no elison for avoided increfs */
static PyObject *
incref_elide(PyObject *dummy, PyObject *args)
@@ -643,6 +675,43 @@ npy_updateifcopy_deprecation(PyObject* NPY_UNUSED(self), PyObject* args)
Py_RETURN_NONE;
}
+/* used to test PyArray_As1D usage emits not implemented error */
+static PyObject*
+npy_pyarrayas1d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *op = Py_BuildValue("i", 42);
+ PyObject *result = op;
+ int dim = 4;
+ double arg[2] = {1, 2};
+ int temp = PyArray_As1D(&result, (char **)&arg, &dim, NPY_DOUBLE);
+ if (temp < 0) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ /* op != result */
+ Py_DECREF(op);
+ return result;
+}
+
+/* used to test PyArray_As2D usage emits not implemented error */
+static PyObject*
+npy_pyarrayas2d_deprecation(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *op = Py_BuildValue("i", 42);
+ PyObject *result = op;
+ int dim1 = 4;
+ int dim2 = 6;
+ double arg[2][2] = {{1, 2}, {3, 4}};
+ int temp = PyArray_As2D(&result, (char ***)&arg, &dim1, &dim2, NPY_DOUBLE);
+ if (temp < 0) {
+ Py_DECREF(op);
+ return NULL;
+ }
+ /* op != result */
+ Py_DECREF(op);
+ return result;
+}
+
/* used to create array with WRITEBACKIFCOPY flag */
static PyObject*
npy_create_writebackifcopy(PyObject* NPY_UNUSED(self), PyObject* args)
@@ -834,6 +903,31 @@ get_buffer_info(PyObject *NPY_UNUSED(self), PyObject *args)
#undef GET_PYBUF_FLAG
+/*
+ * Return a new array object wrapping existing C-allocated (dummy) data.
+ * Such an array does not own its data (must not free it), but because it
+ * wraps C data, it also has no base object. Used to test arr.flags.writeable
+ * setting behaviour.
+ */
+static PyObject*
+get_c_wrapping_array(PyObject* NPY_UNUSED(self), PyObject* arg)
+{
+ int writeable, flags;
+ PyArray_Descr *descr;
+ npy_intp zero = 0;
+
+ writeable = PyObject_IsTrue(arg);
+ if (error_converting(writeable)) {
+ return NULL;
+ }
+
+ flags = writeable ? NPY_ARRAY_WRITEABLE : 0;
+ /* Create an empty array (which points to a random place) */
+ descr = PyArray_DescrNewFromType(NPY_INTP);
+ return PyArray_NewFromDescr(&PyArray_Type, descr,
+ 1, &zero, NULL, &zero, flags, NULL);
+}
+
/*
* Test C-api level item getting.
@@ -892,6 +986,7 @@ test_as_c_array(PyObject *NPY_UNUSED(self), PyObject *args)
num_dims = PyArray_NDIM(array_obj);
descr = PyArray_DESCR(array_obj);
+ Py_INCREF(descr); /* PyArray_AsCArray steals a reference to this */
switch (num_dims) {
case 1:
@@ -934,6 +1029,7 @@ test_as_c_array(PyObject *NPY_UNUSED(self), PyObject *args)
PyArray_Free((PyObject *) array_obj, (void *) array3);
break;
default:
+ Py_DECREF(descr);
PyErr_SetString(PyExc_ValueError, "array.ndim not in [1, 3]");
return NULL;
}
@@ -1227,7 +1323,9 @@ pylong_from_int128(npy_extint128_t value)
}
Py_DECREF(val);
+ Py_DECREF(val_64);
val = tmp;
+ val_64 = NULL;
tmp = PyLong_FromUnsignedLongLong(value.lo);
if (tmp == NULL) {
@@ -1855,6 +1953,19 @@ printf_float_g(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
return PrintFloat_Printf_g(obj, precision);
}
+static PyObject *
+getset_numericops(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *ret;
+ PyObject *ops = PyArray_GetNumericOps();
+ if (ops == NULL) {
+ return NULL;
+ }
+ ret = PyLong_FromLong(PyArray_SetNumericOps(ops));
+ Py_DECREF(ops);
+ return ret;
+}
+
static PyMethodDef Multiarray_TestsMethods[] = {
{"IsPythonScalar",
IsPythonScalar,
@@ -1874,6 +1985,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"test_inplace_increment",
inplace_increment,
METH_VARARGS, NULL},
+ {"fromstring_null_term_c_api",
+ fromstring_null_term_c_api,
+ METH_O, NULL},
{"incref_elide",
incref_elide,
METH_VARARGS, NULL},
@@ -1886,6 +2000,12 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"npy_updateifcopy_deprecation",
npy_updateifcopy_deprecation,
METH_O, NULL},
+ {"npy_pyarrayas1d_deprecation",
+ npy_pyarrayas1d_deprecation,
+ METH_NOARGS, NULL},
+ {"npy_pyarrayas2d_deprecation",
+ npy_pyarrayas2d_deprecation,
+ METH_NOARGS, NULL},
{"npy_create_writebackifcopy",
npy_create_writebackifcopy,
METH_O, NULL},
@@ -1906,6 +2026,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"get_buffer_info",
get_buffer_info,
METH_VARARGS, NULL},
+ {"get_c_wrapping_array",
+ get_c_wrapping_array,
+ METH_O, NULL},
{"array_indexing",
array_indexing,
METH_VARARGS, NULL},
@@ -1963,6 +2086,9 @@ static PyMethodDef Multiarray_TestsMethods[] = {
{"get_fpu_mode",
get_fpu_mode,
METH_VARARGS, get_fpu_mode_doc},
+ {"getset_numericops",
+ getset_numericops,
+ METH_NOARGS, NULL},
/**begin repeat
* #name = cabs, carg#
*/
@@ -2040,3 +2166,9 @@ init_multiarray_tests(void)
}
return RETVAL;
}
+
+NPY_NO_EXPORT int
+test_not_exported(void)
+{
+ return 1;
+}
diff --git a/numpy/core/src/multiarray/alloc.c b/numpy/core/src/multiarray/alloc.c
index 6755095d7..a7f34cbe5 100644
--- a/numpy/core/src/multiarray/alloc.c
+++ b/numpy/core/src/multiarray/alloc.c
@@ -25,10 +25,14 @@
#include <assert.h>
-#ifdef HAVE_SYS_MMAN_H
+#ifdef NPY_OS_LINUX
#include <sys/mman.h>
-#if defined MADV_HUGEPAGE && defined HAVE_MADVISE
-#define HAVE_MADV_HUGEPAGE
+#ifndef MADV_HUGEPAGE
+/*
+ * Use code 14 (MADV_HUGEPAGE) if it isn't defined. This gives a chance of
+ * enabling huge pages even if built with linux kernel < 2.6.38
+ */
+#define MADV_HUGEPAGE 14
#endif
#endif
@@ -74,11 +78,15 @@ _npy_alloc_cache(npy_uintp nelem, npy_uintp esz, npy_uint msz,
#ifdef _PyPyGC_AddMemoryPressure
_PyPyPyGC_AddMemoryPressure(nelem * esz);
#endif
-#ifdef HAVE_MADV_HUGEPAGE
+#ifdef NPY_OS_LINUX
/* allow kernel allocating huge pages for large arrays */
if (NPY_UNLIKELY(nelem * esz >= ((1u<<22u)))) {
npy_uintp offset = 4096u - (npy_uintp)p % (4096u);
npy_uintp length = nelem * esz - offset;
+ /**
+ * Intentionally not checking for errors that may be returned by
+ * older kernel versions; optimistically tries enabling huge pages.
+ */
madvise((void*)((npy_uintp)p + offset), length, MADV_HUGEPAGE);
}
#endif
@@ -218,6 +226,7 @@ PyDataMem_NEW(size_t size)
{
void *result;
+ assert(size != 0);
result = malloc(size);
if (_PyDataMem_eventhook != NULL) {
NPY_ALLOW_C_API_DEF
@@ -281,6 +290,7 @@ PyDataMem_RENEW(void *ptr, size_t size)
{
void *result;
+ assert(size != 0);
result = realloc(ptr, size);
if (result != ptr) {
PyTraceMalloc_Untrack(NPY_TRACE_DOMAIN, (npy_uintp)ptr);
diff --git a/numpy/core/src/multiarray/array_assign_array.c b/numpy/core/src/multiarray/array_assign_array.c
index f692e0307..7ff33ebd7 100644
--- a/numpy/core/src/multiarray/array_assign_array.c
+++ b/numpy/core/src/multiarray/array_assign_array.c
@@ -25,6 +25,47 @@
#include "array_assign.h"
/*
+ * Check that array data is both uint-aligned and true-aligned for all array
+ * elements, as required by the copy/casting code in lowlevel_strided_loops.c
+ */
+NPY_NO_EXPORT int
+copycast_isaligned(int ndim, npy_intp *shape,
+ PyArray_Descr *dtype, char *data, npy_intp *strides)
+{
+ int aligned;
+ int big_aln, small_aln;
+
+ int uint_aln = npy_uint_alignment(dtype->elsize);
+ int true_aln = dtype->alignment;
+
+ /* uint alignment can be 0, meaning not uint alignable */
+ if (uint_aln == 0) {
+ return 0;
+ }
+
+ /*
+ * As an optimization, it is unnecessary to check the alignment to the
+ * smaller of (uint_aln, true_aln) if the data is aligned to the bigger of
+ * the two and the big is a multiple of the small aln. We check the bigger
+ * one first and only check the smaller if necessary.
+ */
+ if (true_aln >= uint_aln) {
+ big_aln = true_aln;
+ small_aln = uint_aln;
+ }
+ else {
+ big_aln = uint_aln;
+ small_aln = true_aln;
+ }
+
+ aligned = raw_array_is_aligned(ndim, shape, data, strides, big_aln);
+ if (aligned && big_aln % small_aln != 0) {
+ aligned = raw_array_is_aligned(ndim, shape, data, strides, small_aln);
+ }
+ return aligned;
+}
+
+/*
* Assigns the array from 'src' to 'dst'. The strides must already have
* been broadcast.
*
@@ -48,11 +89,9 @@ raw_array_assign_array(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
- npy_uint_alignment(dst_dtype->elsize)) &&
- raw_array_is_aligned(ndim, shape, src_data, src_strides,
- npy_uint_alignment(src_dtype->elsize));
+ aligned =
+ copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) &&
+ copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides);
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareTwoRawArrayIter(
@@ -133,11 +172,9 @@ raw_array_wheremasked_assign_array(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
- aligned = raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
- npy_uint_alignment(dst_dtype->elsize)) &&
- raw_array_is_aligned(ndim, shape, src_data, src_strides,
- npy_uint_alignment(src_dtype->elsize));
+ aligned =
+ copycast_isaligned(ndim, shape, dst_dtype, dst_data, dst_strides) &&
+ copycast_isaligned(ndim, shape, src_dtype, src_data, src_strides);
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareThreeRawArrayIter(
@@ -383,14 +420,14 @@ PyArray_AssignArray(PyArrayObject *dst, PyArrayObject *src,
/* A straightforward where-masked assignment */
/* Do the masked assignment with raw array iteration */
- if (raw_array_wheremasked_assign_array(
- PyArray_NDIM(dst), PyArray_DIMS(dst),
- PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst),
- PyArray_DESCR(src), PyArray_DATA(src), src_strides,
- PyArray_DESCR(wheremask), PyArray_DATA(wheremask),
- wheremask_strides) < 0) {
- goto fail;
- }
+ if (raw_array_wheremasked_assign_array(
+ PyArray_NDIM(dst), PyArray_DIMS(dst),
+ PyArray_DESCR(dst), PyArray_DATA(dst), PyArray_STRIDES(dst),
+ PyArray_DESCR(src), PyArray_DATA(src), src_strides,
+ PyArray_DESCR(wheremask), PyArray_DATA(wheremask),
+ wheremask_strides) < 0) {
+ goto fail;
+ }
}
if (copied_src) {
diff --git a/numpy/core/src/multiarray/array_assign_scalar.c b/numpy/core/src/multiarray/array_assign_scalar.c
index 841a41850..ecb5be47b 100644
--- a/numpy/core/src/multiarray/array_assign_scalar.c
+++ b/numpy/core/src/multiarray/array_assign_scalar.c
@@ -45,10 +45,13 @@ raw_array_assign_scalar(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
+ /* Check both uint and true alignment */
aligned = raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
npy_uint_alignment(dst_dtype->elsize)) &&
- npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize));
+ raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
+ dst_dtype->alignment) &&
+ npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize) &&
+ npy_is_aligned(src_data, src_dtype->alignment));
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareOneRawArrayIter(
@@ -116,10 +119,13 @@ raw_array_wheremasked_assign_scalar(int ndim, npy_intp *shape,
NPY_BEGIN_THREADS_DEF;
- /* Check alignment */
+ /* Check both uint and true alignment */
aligned = raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
npy_uint_alignment(dst_dtype->elsize)) &&
- npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize));
+ raw_array_is_aligned(ndim, shape, dst_data, dst_strides,
+ dst_dtype->alignment) &&
+ npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize) &&
+ npy_is_aligned(src_data, src_dtype->alignment));
/* Use raw iteration with no heap allocation */
if (PyArray_PrepareTwoRawArrayIter(
@@ -220,7 +226,8 @@ PyArray_AssignRawScalar(PyArrayObject *dst,
* we also skip this if 'dst' has an object dtype.
*/
if ((!PyArray_EquivTypes(PyArray_DESCR(dst), src_dtype) ||
- !npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize))) &&
+ !(npy_is_aligned(src_data, npy_uint_alignment(src_dtype->elsize)) &&
+ npy_is_aligned(src_data, src_dtype->alignment))) &&
PyArray_SIZE(dst) > 1 &&
!PyDataType_REFCHK(PyArray_DESCR(dst))) {
char *tmp_src_data;
diff --git a/numpy/core/src/multiarray/arrayfunction_override.c b/numpy/core/src/multiarray/arrayfunction_override.c
new file mode 100644
index 000000000..62e597764
--- /dev/null
+++ b/numpy/core/src/multiarray/arrayfunction_override.c
@@ -0,0 +1,376 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#define _MULTIARRAYMODULE
+
+#include "npy_pycompat.h"
+#include "get_attr_string.h"
+#include "npy_import.h"
+#include "multiarraymodule.h"
+
+
+/* Return the ndarray.__array_function__ method. */
+static PyObject *
+get_ndarray_array_function(void)
+{
+ PyObject* method = PyObject_GetAttrString((PyObject *)&PyArray_Type,
+ "__array_function__");
+ assert(method != NULL);
+ return method;
+}
+
+
+/*
+ * Get an object's __array_function__ method in the fastest way possible.
+ * Never raises an exception. Returns NULL if the method doesn't exist.
+ */
+static PyObject *
+get_array_function(PyObject *obj)
+{
+ static PyObject *ndarray_array_function = NULL;
+
+ if (ndarray_array_function == NULL) {
+ ndarray_array_function = get_ndarray_array_function();
+ }
+
+ /* Fast return for ndarray */
+ if (PyArray_CheckExact(obj)) {
+ Py_INCREF(ndarray_array_function);
+ return ndarray_array_function;
+ }
+
+ return PyArray_LookupSpecial(obj, "__array_function__");
+}
+
+
+/*
+ * Like list.insert(), but for C arrays of PyObject*. Skips error checking.
+ */
+static void
+pyobject_array_insert(PyObject **array, int length, int index, PyObject *item)
+{
+ int j;
+
+ for (j = length; j > index; j--) {
+ array[j] = array[j - 1];
+ }
+ array[index] = item;
+}
+
+
+/*
+ * Collects arguments with __array_function__ and their corresponding methods
+ * in the order in which they should be tried (i.e., skipping redundant types).
+ * `relevant_args` is expected to have been produced by PySequence_Fast.
+ * Returns the number of arguments, or -1 on failure.
+ */
+static int
+get_implementing_args_and_methods(PyObject *relevant_args,
+ PyObject **implementing_args,
+ PyObject **methods)
+{
+ int num_implementing_args = 0;
+ Py_ssize_t i;
+ int j;
+
+ PyObject **items = PySequence_Fast_ITEMS(relevant_args);
+ Py_ssize_t length = PySequence_Fast_GET_SIZE(relevant_args);
+
+ for (i = 0; i < length; i++) {
+ int new_class = 1;
+ PyObject *argument = items[i];
+
+ /* Have we seen this type before? */
+ for (j = 0; j < num_implementing_args; j++) {
+ if (Py_TYPE(argument) == Py_TYPE(implementing_args[j])) {
+ new_class = 0;
+ break;
+ }
+ }
+ if (new_class) {
+ PyObject *method = get_array_function(argument);
+
+ if (method != NULL) {
+ int arg_index;
+
+ if (num_implementing_args >= NPY_MAXARGS) {
+ PyErr_Format(
+ PyExc_TypeError,
+ "maximum number (%d) of distinct argument types " \
+ "implementing __array_function__ exceeded",
+ NPY_MAXARGS);
+ Py_DECREF(method);
+ goto fail;
+ }
+
+ /* "subclasses before superclasses, otherwise left to right" */
+ arg_index = num_implementing_args;
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *other_type;
+ other_type = (PyObject *)Py_TYPE(implementing_args[j]);
+ if (PyObject_IsInstance(argument, other_type)) {
+ arg_index = j;
+ break;
+ }
+ }
+ Py_INCREF(argument);
+ pyobject_array_insert(implementing_args, num_implementing_args,
+ arg_index, argument);
+ pyobject_array_insert(methods, num_implementing_args,
+ arg_index, method);
+ ++num_implementing_args;
+ }
+ }
+ }
+ return num_implementing_args;
+
+fail:
+ for (j = 0; j < num_implementing_args; j++) {
+ Py_DECREF(implementing_args[j]);
+ Py_DECREF(methods[j]);
+ }
+ return -1;
+}
+
+
+/*
+ * Is this object ndarray.__array_function__?
+ */
+static int
+is_default_array_function(PyObject *obj)
+{
+ static PyObject *ndarray_array_function = NULL;
+
+ if (ndarray_array_function == NULL) {
+ ndarray_array_function = get_ndarray_array_function();
+ }
+ return obj == ndarray_array_function;
+}
+
+
+/*
+ * Core implementation of ndarray.__array_function__. This is exposed
+ * separately so we can avoid the overhead of a Python method call from
+ * within `implement_array_function`.
+ */
+NPY_NO_EXPORT PyObject *
+array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
+ PyObject *kwargs)
+{
+ Py_ssize_t j;
+ PyObject *implementation, *result;
+
+ PyObject **items = PySequence_Fast_ITEMS(types);
+ Py_ssize_t length = PySequence_Fast_GET_SIZE(types);
+
+ for (j = 0; j < length; j++) {
+ int is_subclass = PyObject_IsSubclass(
+ items[j], (PyObject *)&PyArray_Type);
+ if (is_subclass == -1) {
+ return NULL;
+ }
+ if (!is_subclass) {
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+ }
+ }
+
+ implementation = PyObject_GetAttr(func, npy_ma_str_implementation);
+ if (implementation == NULL) {
+ return NULL;
+ }
+ result = PyObject_Call(implementation, args, kwargs);
+ Py_DECREF(implementation);
+ return result;
+}
+
+
+/*
+ * Calls __array_function__ on the provided argument, with a fast-path for
+ * ndarray.
+ */
+static PyObject *
+call_array_function(PyObject* argument, PyObject* method,
+ PyObject* public_api, PyObject* types,
+ PyObject* args, PyObject* kwargs)
+{
+ if (is_default_array_function(method)) {
+ return array_function_method_impl(public_api, types, args, kwargs);
+ }
+ else {
+ return PyObject_CallFunctionObjArgs(
+ method, argument, public_api, types, args, kwargs, NULL);
+ }
+}
+
+
+/*
+ * Implements the __array_function__ protocol for a function, as described in
+ * in NEP-18. See numpy.core.overrides for a full docstring.
+ */
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+ PyObject *implementation, *public_api, *relevant_args, *args, *kwargs;
+
+ PyObject *types = NULL;
+ PyObject *implementing_args[NPY_MAXARGS];
+ PyObject *array_function_methods[NPY_MAXARGS];
+
+ int j, any_overrides;
+ int num_implementing_args = 0;
+ PyObject *result = NULL;
+
+ static PyObject *errmsg_formatter = NULL;
+
+ if (!PyArg_UnpackTuple(
+ positional_args, "implement_array_function", 5, 5,
+ &implementation, &public_api, &relevant_args, &args, &kwargs)) {
+ return NULL;
+ }
+
+ relevant_args = PySequence_Fast(
+ relevant_args,
+ "dispatcher for __array_function__ did not return an iterable");
+ if (relevant_args == NULL) {
+ return NULL;
+ }
+
+ /* Collect __array_function__ implementations */
+ num_implementing_args = get_implementing_args_and_methods(
+ relevant_args, implementing_args, array_function_methods);
+ if (num_implementing_args == -1) {
+ goto cleanup;
+ }
+
+ /*
+ * Handle the typical case of no overrides. This is merely an optimization
+ * if some arguments are ndarray objects, but is also necessary if no
+ * arguments implement __array_function__ at all (e.g., if they are all
+ * built-in types).
+ */
+ any_overrides = 0;
+ for (j = 0; j < num_implementing_args; j++) {
+ if (!is_default_array_function(array_function_methods[j])) {
+ any_overrides = 1;
+ break;
+ }
+ }
+ if (!any_overrides) {
+ result = PyObject_Call(implementation, args, kwargs);
+ goto cleanup;
+ }
+
+ /*
+ * Create a Python object for types.
+ * We use a tuple, because it's the fastest Python collection to create
+ * and has the bonus of being immutable.
+ */
+ types = PyTuple_New(num_implementing_args);
+ if (types == NULL) {
+ goto cleanup;
+ }
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *arg_type = (PyObject *)Py_TYPE(implementing_args[j]);
+ Py_INCREF(arg_type);
+ PyTuple_SET_ITEM(types, j, arg_type);
+ }
+
+ /* Call __array_function__ methods */
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *argument = implementing_args[j];
+ PyObject *method = array_function_methods[j];
+
+ /*
+ * We use `public_api` instead of `implementation` here so
+ * __array_function__ implementations can do equality/identity
+ * comparisons.
+ */
+ result = call_array_function(
+ argument, method, public_api, types, args, kwargs);
+
+ if (result == Py_NotImplemented) {
+ /* Try the next one */
+ Py_DECREF(result);
+ result = NULL;
+ }
+ else {
+ /* Either a good result, or an exception was raised. */
+ goto cleanup;
+ }
+ }
+
+ /* No acceptable override found, raise TypeError. */
+ npy_cache_import("numpy.core._internal",
+ "array_function_errmsg_formatter",
+ &errmsg_formatter);
+ if (errmsg_formatter != NULL) {
+ PyObject *errmsg = PyObject_CallFunctionObjArgs(
+ errmsg_formatter, public_api, types, NULL);
+ if (errmsg != NULL) {
+ PyErr_SetObject(PyExc_TypeError, errmsg);
+ Py_DECREF(errmsg);
+ }
+ }
+
+cleanup:
+ for (j = 0; j < num_implementing_args; j++) {
+ Py_DECREF(implementing_args[j]);
+ Py_DECREF(array_function_methods[j]);
+ }
+ Py_XDECREF(types);
+ Py_DECREF(relevant_args);
+ return result;
+}
+
+
+/*
+ * Python wrapper for get_implementing_args_and_methods, for testing purposes.
+ */
+NPY_NO_EXPORT PyObject *
+array__get_implementing_args(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args)
+{
+ PyObject *relevant_args;
+ int j;
+ int num_implementing_args = 0;
+ PyObject *implementing_args[NPY_MAXARGS];
+ PyObject *array_function_methods[NPY_MAXARGS];
+ PyObject *result = NULL;
+
+ if (!PyArg_ParseTuple(positional_args, "O:array__get_implementing_args",
+ &relevant_args)) {
+ return NULL;
+ }
+
+ relevant_args = PySequence_Fast(
+ relevant_args,
+ "dispatcher for __array_function__ did not return an iterable");
+ if (relevant_args == NULL) {
+ return NULL;
+ }
+
+ num_implementing_args = get_implementing_args_and_methods(
+ relevant_args, implementing_args, array_function_methods);
+ if (num_implementing_args == -1) {
+ goto cleanup;
+ }
+
+ /* create a Python object for implementing_args */
+ result = PyList_New(num_implementing_args);
+ if (result == NULL) {
+ goto cleanup;
+ }
+ for (j = 0; j < num_implementing_args; j++) {
+ PyObject *argument = implementing_args[j];
+ Py_INCREF(argument);
+ PyList_SET_ITEM(result, j, argument);
+ }
+
+cleanup:
+ for (j = 0; j < num_implementing_args; j++) {
+ Py_DECREF(implementing_args[j]);
+ Py_DECREF(array_function_methods[j]);
+ }
+ Py_DECREF(relevant_args);
+ return result;
+}
diff --git a/numpy/core/src/multiarray/arrayfunction_override.h b/numpy/core/src/multiarray/arrayfunction_override.h
new file mode 100644
index 000000000..0d224e2b6
--- /dev/null
+++ b/numpy/core/src/multiarray/arrayfunction_override.h
@@ -0,0 +1,16 @@
+#ifndef _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H
+#define _NPY_PRIVATE__ARRAYFUNCTION_OVERRIDE_H
+
+NPY_NO_EXPORT PyObject *
+array_implement_array_function(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
+
+NPY_NO_EXPORT PyObject *
+array__get_implementing_args(
+ PyObject *NPY_UNUSED(dummy), PyObject *positional_args);
+
+NPY_NO_EXPORT PyObject *
+array_function_method_impl(PyObject *func, PyObject *types, PyObject *args,
+ PyObject *kwargs);
+
+#endif
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 341682588..4e229e321 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -462,7 +462,7 @@ WARN_IN_DEALLOC(PyObject* warning, const char * msg) {
PyErr_WriteUnraisable(Py_None);
}
}
-};
+}
/* array object functions */
@@ -471,7 +471,7 @@ array_dealloc(PyArrayObject *self)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)self;
- _array_dealloc_buffer_info(self);
+ _dealloc_cached_buffer_info((PyObject*)self);
if (fa->weakreflist != NULL) {
PyObject_ClearWeakRefs((PyObject *)self);
@@ -483,10 +483,11 @@ array_dealloc(PyArrayObject *self)
char const * msg = "WRITEBACKIFCOPY detected in array_dealloc. "
" Required call to PyArray_ResolveWritebackIfCopy or "
"PyArray_DiscardWritebackIfCopy is missing.";
- Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
- *array_dealloc
- */
+ /*
+ * prevent reaching 0 twice and thus recursing into dealloc.
+ * Increasing sys.gettotalrefcount, but path should not be taken.
+ */
+ Py_INCREF(self);
WARN_IN_DEALLOC(PyExc_RuntimeWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
if (retval < 0)
@@ -500,10 +501,11 @@ array_dealloc(PyArrayObject *self)
char const * msg = "UPDATEIFCOPY detected in array_dealloc. "
" Required call to PyArray_ResolveWritebackIfCopy or "
"PyArray_DiscardWritebackIfCopy is missing";
- Py_INCREF(self); /* hold on to self in next call since if
- * refcount == 0 it will recurse back into
- *array_dealloc
- */
+ /*
+ * prevent reaching 0 twice and thus recursing into dealloc.
+ * Increasing sys.gettotalrefcount, but path should not be taken.
+ */
+ Py_INCREF(self);
/* 2017-Nov-10 1.14 */
WARN_IN_DEALLOC(PyExc_DeprecationWarning, msg);
retval = PyArray_ResolveWritebackIfCopy(self);
@@ -523,12 +525,7 @@ array_dealloc(PyArrayObject *self)
if ((fa->flags & NPY_ARRAY_OWNDATA) && fa->data) {
/* Free internal references if an Object array */
if (PyDataType_FLAGCHK(fa->descr, NPY_ITEM_REFCOUNT)) {
- Py_INCREF(self); /*hold on to self */
PyArray_XDECREF(self);
- /*
- * Don't need to DECREF -- because we are deleting
- * self already...
- */
}
npy_free_cache(fa->data, PyArray_NBYTES(self));
}
@@ -610,7 +607,7 @@ PyArray_DebugPrint(PyArrayObject *obj)
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_SetDatetimeParseFunction(PyObject *op)
+PyArray_SetDatetimeParseFunction(PyObject *NPY_UNUSED(op))
{
}
@@ -633,7 +630,7 @@ PyArray_CompareUCS4(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
/*NUMPY_API
*/
NPY_NO_EXPORT int
-PyArray_CompareString(char *s1, char *s2, size_t len)
+PyArray_CompareString(const char *s1, const char *s2, size_t len)
{
const unsigned char *c1 = (unsigned char *)s1;
const unsigned char *c2 = (unsigned char *)s2;
@@ -655,15 +652,11 @@ NPY_NO_EXPORT int
array_might_be_written(PyArrayObject *obj)
{
const char *msg =
- "Numpy has detected that you (may be) writing to an array returned\n"
- "by numpy.diagonal or by selecting multiple fields in a structured\n"
- "array. This code will likely break in a future numpy release --\n"
- "see numpy.diagonal or arrays.indexing reference docs for details.\n"
- "The quick fix is to make an explicit copy (e.g., do\n"
- "arr.diagonal().copy() or arr[['f0','f1']].copy()).";
+ "Numpy has detected that you (may be) writing to an array with\n"
+ "overlapping memory from np.broadcast_arrays. If this is intentional\n"
+ "set the WRITEABLE flag True or make a copy immediately before writing.";
if (PyArray_FLAGS(obj) & NPY_ARRAY_WARN_ON_WRITE) {
- /* 2012-07-17, 1.7 */
- if (DEPRECATE_FUTUREWARNING(msg) < 0) {
+ if (DEPRECATE(msg) < 0) {
return -1;
}
/* Only warn once per array */
@@ -1165,8 +1158,10 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
newdims.ptr = dimensions;
newdims.len = result_ndim+1;
- memcpy(dimensions, PyArray_DIMS((PyArrayObject *)temp),
- sizeof(npy_intp)*result_ndim);
+ if (result_ndim) {
+ memcpy(dimensions, PyArray_DIMS((PyArrayObject *)temp),
+ sizeof(npy_intp)*result_ndim);
+ }
dimensions[result_ndim] = -1;
temp2 = PyArray_Newshape((PyArrayObject *)temp,
&newdims, NPY_ANYORDER);
@@ -1205,15 +1200,28 @@ _void_compare(PyArrayObject *self, PyArrayObject *other, int cmp_op)
}
}
if (res == NULL && !PyErr_Occurred()) {
- PyErr_SetString(PyExc_ValueError, "No fields found.");
+ /* these dtypes had no fields. Use a MultiIter to broadcast them
+ * to an output array, and fill with True (for EQ)*/
+ PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *)
+ PyArray_MultiIterNew(2, self, other);
+ if (mit == NULL) {
+ return NULL;
+ }
+
+ res = PyArray_NewFromDescr(&PyArray_Type,
+ PyArray_DescrFromType(NPY_BOOL),
+ mit->nd, mit->dimensions,
+ NULL, NULL, 0, NULL);
+ Py_DECREF(mit);
+ if (res) {
+ PyArray_FILLWBYTE((PyArrayObject *)res,
+ cmp_op == Py_EQ ? 1 : 0);
+ }
}
return res;
}
else {
- /*
- * compare as a string. Assumes self and
- * other have same descr->type
- */
+ /* compare as a string. Assumes self and other have same descr->type */
return _strings_richcompare(self, other, cmp_op, 0);
}
}
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 0e69cfc07..5d9e990e8 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -2,7 +2,8 @@
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#include "structmember.h"
-
+#include <limits.h>
+#include <assert.h>
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
@@ -29,13 +30,13 @@
#include <emmintrin.h>
#endif
+#include "npy_longdouble.h"
#include "numpyos.h"
#include <string.h>
#include "cblasfuncs.h"
#include "npy_cblas.h"
-#include <limits.h>
-#include <assert.h>
+#include "buffer.h"
/* check for sequences, but ignore the types numpy considers scalars */
static NPY_INLINE npy_bool
@@ -150,32 +151,6 @@ MyPyLong_AsUnsigned@Type@ (PyObject *obj)
/**end repeat**/
-static npy_longlong
-npy_strtoll(const char *str, char **endptr, int base)
-{
-#if defined HAVE_STRTOLL
- return strtoll(str, endptr, base);
-#elif defined _MSC_VER
- return _strtoi64(str, endptr, base);
-#else
- /* ok on 64 bit posix */
- return PyOS_strtol(str, endptr, base);
-#endif
-}
-
-static npy_ulonglong
-npy_strtoull(const char *str, char **endptr, int base)
-{
-#if defined HAVE_STRTOULL
- return strtoull(str, endptr, base);
-#elif defined _MSC_VER
- return _strtoui64(str, endptr, base);
-#else
- /* ok on 64 bit posix */
- return PyOS_strtoul(str, endptr, base);
-#endif
-}
-
/*
*****************************************************************************
** GETITEM AND SETITEM **
@@ -246,9 +221,7 @@ static int
if (PySequence_NoString_Check(op)) {
PyErr_SetString(PyExc_ValueError,
"setting an array element with a sequence.");
- Py_DECREF(type);
- Py_XDECREF(value);
- Py_XDECREF(traceback);
+ npy_PyErr_ChainExceptionsCause(type, value, traceback);
}
else {
PyErr_Restore(type, value, traceback);
@@ -354,6 +327,17 @@ string_to_long_double(PyObject*op)
npy_longdouble temp;
PyObject* b;
+ /* Convert python long objects to a longdouble, without precision or range
+ * loss via a double.
+ */
+ if ((PyLong_Check(op) && !PyBool_Check(op))
+#if !defined(NPY_PY3K)
+ || (PyInt_Check(op) && !PyBool_Check(op))
+#endif
+ ) {
+ return npy_longdouble_from_PyLong(op);
+ }
+
if (PyUnicode_Check(op)) {
b = PyUnicode_AsUTF8String(op);
if (!b) {
@@ -947,6 +931,7 @@ VOID_setitem(PyObject *op, void *input, void *vap)
memset(ip + view.len, 0, itemsize - view.len);
}
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info(op);
}
#else
{
@@ -1796,8 +1781,8 @@ BOOL_scan(FILE *fp, npy_bool *ip, void *NPY_UNUSED(ignore),
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong,
* npy_datetime, npy_timedelta#
- * #func = (PyOS_strtol, PyOS_strtoul)*4, npy_strtoll, npy_strtoull,
- * npy_strtoll*2#
+ * #func = (PyOS_strtol, PyOS_strtoul)*4, NumPyOS_strtoll, NumPyOS_strtoull,
+ * NumPyOS_strtoll*2#
* #btype = (npy_long, npy_ulong)*4, npy_longlong, npy_ulonglong,
* npy_longlong*2#
*/
@@ -2230,15 +2215,19 @@ static void
VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
npy_intp n, int swap, PyArrayObject *arr)
{
+ PyArray_Descr *descr;
+
if (arr == NULL) {
return;
}
+
+ descr = PyArray_DESCR(arr);
+
if (PyArray_HASFIELDS(arr)) {
PyObject *key, *value;
- PyArray_Descr *descr;
+
Py_ssize_t pos = 0;
- descr = PyArray_DESCR(arr);
while (PyDict_Next(descr->fields, &pos, &key, &value)) {
npy_intp offset;
PyArray_Descr * new;
@@ -2261,14 +2250,28 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
- if (swap && PyArray_DESCR(arr)->subarray != NULL) {
- PyArray_Descr *descr, *new;
+ if (PyDataType_HASSUBARRAY(descr)) {
+ PyArray_Descr *new;
npy_intp num;
npy_intp i;
int subitemsize;
char *dstptr, *srcptr;
+ /*
+ * In certain cases subarray copy can be optimized. This is when
+ * swapping is unnecessary and the subarrays data type can certainly
+ * be simply copied (no object, fields, subarray, and not a user dtype).
+ */
+ npy_bool can_optimize_subarray = (!swap &&
+ !PyDataType_HASFIELDS(descr->subarray->base) &&
+ !PyDataType_HASSUBARRAY(descr->subarray->base) &&
+ !PyDataType_REFCHK(descr->subarray->base) &&
+ (descr->subarray->base->type_num < NPY_NTYPES));
+
+ if (can_optimize_subarray) {
+ _basic_copyn(dst, dstride, src, sstride, n, descr->elsize);
+ return;
+ }
- descr = PyArray_DESCR(arr);
new = descr->subarray->base;
/*
* TODO: temporarily modifying the array like this
@@ -2278,6 +2281,10 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
dstptr = dst;
srcptr = src;
subitemsize = new->elsize;
+ if (subitemsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
num = descr->elsize / subitemsize;
for (i = 0; i < n; i++) {
new->f->copyswapn(dstptr, subitemsize, srcptr,
@@ -2290,22 +2297,26 @@ VOID_copyswapn (char *dst, npy_intp dstride, char *src, npy_intp sstride,
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
- _basic_copyn(dst, dstride, src, sstride, n, PyArray_DESCR(arr)->elsize);
+ /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */
+ _basic_copyn(dst, dstride, src, sstride, n, descr->elsize);
return;
}
static void
VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
{
+ PyArray_Descr *descr;
+
if (arr == NULL) {
return;
}
+
+ descr = PyArray_DESCR(arr);
+
if (PyArray_HASFIELDS(arr)) {
PyObject *key, *value;
- PyArray_Descr *descr;
Py_ssize_t pos = 0;
- descr = PyArray_DESCR(arr);
while (PyDict_Next(descr->fields, &pos, &key, &value)) {
npy_intp offset;
PyArray_Descr * new;
@@ -2328,28 +2339,45 @@ VOID_copyswap (char *dst, char *src, int swap, PyArrayObject *arr)
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
- if (swap && PyArray_DESCR(arr)->subarray != NULL) {
- PyArray_Descr *descr, *new;
+ if (PyDataType_HASSUBARRAY(descr)) {
+ PyArray_Descr *new;
npy_intp num;
- int itemsize;
+ int subitemsize;
+ /*
+ * In certain cases subarray copy can be optimized. This is when
+ * swapping is unnecessary and the subarrays data type can certainly
+ * be simply copied (no object, fields, subarray, and not a user dtype).
+ */
+ npy_bool can_optimize_subarray = (!swap &&
+ !PyDataType_HASFIELDS(descr->subarray->base) &&
+ !PyDataType_HASSUBARRAY(descr->subarray->base) &&
+ !PyDataType_REFCHK(descr->subarray->base) &&
+ (descr->subarray->base->type_num < NPY_NTYPES));
+
+ if (can_optimize_subarray) {
+ _basic_copy(dst, src, descr->elsize);
+ return;
+ }
- descr = PyArray_DESCR(arr);
new = descr->subarray->base;
/*
* TODO: temporarily modifying the array like this
* is bad coding style, should be changed.
*/
((PyArrayObject_fields *)arr)->descr = new;
- itemsize = new->elsize;
- num = descr->elsize / itemsize;
- new->f->copyswapn(dst, itemsize, src,
- itemsize, num, swap, arr);
+ subitemsize = new->elsize;
+ if (subitemsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ num = descr->elsize / subitemsize;
+ new->f->copyswapn(dst, subitemsize, src,
+ subitemsize, num, swap, arr);
((PyArrayObject_fields *)arr)->descr = descr;
return;
}
-
- /* copy first if needed */
- _basic_copy(dst, src, PyArray_DESCR(arr)->elsize);
+ /* Must be a naive Void type (e.g. a "V8") so simple copy is sufficient. */
+ _basic_copy(dst, src, descr->elsize);
return;
}
@@ -3619,9 +3647,10 @@ OBJECT_dot(char *ip1, npy_intp is1, char *ip2, npy_intp is2, char *op, npy_intp
#define BOOL_fill NULL
/* this requires buffer to be filled with objects or NULL */
-static void
+static int
OBJECT_fill(PyObject **buffer, npy_intp length, void *NPY_UNUSED(ignored))
{
+ int retval = 0;
npy_intp i;
PyObject *start = buffer[0];
PyObject *delta = buffer[1];
@@ -3629,27 +3658,31 @@ OBJECT_fill(PyObject **buffer, npy_intp length, void *NPY_UNUSED(ignored))
delta = PyNumber_Subtract(delta, start);
if (!delta) {
- return;
+ return -1;
}
second = start = PyNumber_Add(start, delta);
if (!start) {
- goto finish;
+ goto error;
}
buffer += 2;
for (i = 2; i < length; i++, buffer++) {
start = PyNumber_Add(start, delta);
if (!start) {
- goto finish;
+ goto error;
}
Py_XDECREF(*buffer);
*buffer = start;
}
+ goto finish;
+
+error:
+ retval = -1;
finish:
Py_XDECREF(second);
Py_DECREF(delta);
- return;
+ return retval;
}
/**begin repeat
@@ -3663,7 +3696,7 @@ finish:
* npy_float, npy_double, npy_longdouble,
* npy_datetime, npy_timedelta#
*/
-static void
+static int
@NAME@_fill(@type@ *buffer, npy_intp length, void *NPY_UNUSED(ignored))
{
npy_intp i;
@@ -3674,10 +3707,11 @@ static void
for (i = 2; i < length; ++i) {
buffer[i] = start + i*delta;
}
+ return 0;
}
/**end repeat**/
-static void
+static int
HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored))
{
npy_intp i;
@@ -3688,6 +3722,7 @@ HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored))
for (i = 2; i < length; ++i) {
buffer[i] = npy_float_to_half(start + i*delta);
}
+ return 0;
}
/**begin repeat
@@ -3695,7 +3730,7 @@ HALF_fill(npy_half *buffer, npy_intp length, void *NPY_UNUSED(ignored))
* #NAME = CFLOAT, CDOUBLE, CLONGDOUBLE#
* #type = npy_cfloat, npy_cdouble, npy_clongdouble#
*/
-static void
+static int
@NAME@_fill(@type@ *buffer, npy_intp length, void *NPY_UNUSED(ignore))
{
npy_intp i;
@@ -3713,6 +3748,7 @@ static void
buffer->real = start.real + i*delta.real;
buffer->imag = start.imag + i*delta.imag;
}
+ return 0;
}
/**end repeat**/
@@ -3772,176 +3808,6 @@ static void
/*
*****************************************************************************
- ** FASTCLIP **
- *****************************************************************************
- */
-
-#define _LESS_THAN(a, b) ((a) < (b))
-#define _GREATER_THAN(a, b) ((a) > (b))
-
-/*
- * In fastclip, 'b' was already checked for NaN, so the half comparison
- * only needs to check 'a' for NaN.
- */
-
-#define _HALF_LESS_THAN(a, b) (!npy_half_isnan(a) && npy_half_lt_nonan(a, b))
-#define _HALF_GREATER_THAN(a, b) (!npy_half_isnan(a) && npy_half_lt_nonan(b, a))
-
-/**begin repeat
- *
- * #name = BOOL,
- * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
- * LONG, ULONG, LONGLONG, ULONGLONG,
- * HALF, FLOAT, DOUBLE, LONGDOUBLE,
- * DATETIME, TIMEDELTA#
- * #type = npy_bool,
- * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
- * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
- * npy_half, npy_float, npy_double, npy_longdouble,
- * npy_datetime, npy_timedelta#
- * #isfloat = 0*11, 1*4, 0*2#
- * #isnan = nop*11, npy_half_isnan, npy_isnan*3, nop*2#
- * #lt = _LESS_THAN*11, _HALF_LESS_THAN, _LESS_THAN*5#
- * #gt = _GREATER_THAN*11, _HALF_GREATER_THAN, _GREATER_THAN*5#
- */
-static void
-@name@_fastclip(@type@ *in, npy_intp ni, @type@ *min, @type@ *max, @type@ *out)
-{
- npy_intp i;
- @type@ max_val = 0, min_val = 0;
-
- if (max != NULL) {
- max_val = *max;
-#if @isfloat@
- /* NaNs result in no clipping, so optimize the case away */
- if (@isnan@(max_val)) {
- if (min == NULL) {
- memmove(out, in, ni * sizeof(@type@));
- return;
- }
- max = NULL;
- }
-#endif
- }
- if (min != NULL) {
- min_val = *min;
-#if @isfloat@
- if (@isnan@(min_val)) {
- if (max == NULL) {
- memmove(out, in, ni * sizeof(@type@));
- return;
- }
- min = NULL;
- }
-#endif
- }
- if (max == NULL) {
- for (i = 0; i < ni; i++) {
- if (@lt@(in[i], min_val)) {
- out[i] = min_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else if (min == NULL) {
- for (i = 0; i < ni; i++) {
- if (@gt@(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else {
- /*
- * Visual Studio 2015 loop vectorizer handles NaN in an unexpected
- * manner, see: https://github.com/numpy/numpy/issues/7601
- */
- #if (_MSC_VER == 1900)
- #pragma loop( no_vector )
- #endif
- for (i = 0; i < ni; i++) {
- if (@lt@(in[i], min_val)) {
- out[i] = min_val;
- }
- else if (@gt@(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
-}
-/**end repeat**/
-
-#undef _LESS_THAN
-#undef _GREATER_THAN
-#undef _HALF_LESS_THAN
-#undef _HALF_GREATER_THAN
-
-/**begin repeat
- *
- * #name = CFLOAT, CDOUBLE, CLONGDOUBLE#
- * #type = npy_cfloat, npy_cdouble, npy_clongdouble#
- */
-static void
-@name@_fastclip(@type@ *in, npy_intp ni, @type@ *min, @type@ *max, @type@ *out)
-{
- npy_intp i;
- @type@ max_val, min_val;
-
- if (max != NULL) {
- max_val = *max;
- }
- if (min != NULL) {
- min_val = *min;
- }
- if (max == NULL) {
- for (i = 0; i < ni; i++) {
- if (PyArray_CLT(in[i],min_val)) {
- out[i] = min_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else if (min == NULL) {
- for (i = 0; i < ni; i++) {
- if (PyArray_CGT(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
- else {
- for (i = 0; i < ni; i++) {
- if (PyArray_CLT(in[i], min_val)) {
- out[i] = min_val;
- }
- else if (PyArray_CGT(in[i], max_val)) {
- out[i] = max_val;
- }
- else {
- out[i] = in[i];
- }
- }
- }
-}
-
-/**end repeat**/
-
-#define OBJECT_fastclip NULL
-
-
-/*
- *****************************************************************************
** FASTPUTMASK **
*****************************************************************************
*/
@@ -4192,6 +4058,53 @@ small_correlate(const char * d_, npy_intp dstride,
}
/*
+*/
+
+/* A clone function for the datetime dtype c_metadata */
+static NpyAuxData *
+_datetime_dtype_metadata_clone(NpyAuxData *data)
+{
+ PyArray_DatetimeDTypeMetaData *newdata =
+ (PyArray_DatetimeDTypeMetaData *)PyArray_malloc(
+ sizeof(*newdata));
+ if (newdata == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ memcpy(newdata, data, sizeof(*newdata));
+
+ return (NpyAuxData *)newdata;
+}
+
+/*
+ * Allcoate and initialize a PyArray_DatetimeDTypeMetaData object
+ */
+static NpyAuxData*
+_create_datetime_metadata(NPY_DATETIMEUNIT base, int num)
+{
+ PyArray_DatetimeDTypeMetaData *data;
+
+ /* Allocate memory for the metadata */
+ data = PyArray_malloc(sizeof(*data));
+ if (data == NULL) {
+ PyErr_NoMemory();
+ return NULL;
+ }
+
+ /* Initialize the base aux data */
+ memset(data, 0, sizeof(PyArray_DatetimeDTypeMetaData));
+ data->base.free = (NpyAuxData_FreeFunc *)PyArray_free;
+ data->base.clone = _datetime_dtype_metadata_clone;
+
+ data->meta.base = base;
+ data->meta.num = num;
+
+ return (NpyAuxData*)data;
+}
+
+
+/*
*****************************************************************************
** SETUP FUNCTION POINTERS **
*****************************************************************************
@@ -4247,12 +4160,12 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
{
quicksort_@suff@,
heapsort_@suff@,
- mergesort_@suff@
+ timsort_@suff@
},
{
aquicksort_@suff@,
aheapsort_@suff@,
- amergesort_@suff@
+ atimsort_@suff@
},
#else
{
@@ -4332,6 +4245,7 @@ static PyArray_Descr @from@_Descr = {
* npy_half, npy_float, npy_double, npy_longdouble,
* npy_cfloat, npy_cdouble, npy_clongdouble,
* PyObject *, npy_datetime, npy_timedelta#
+ * #rsort = 1*5, 0*16#
* #NAME = Bool,
* Byte, UByte, Short, UShort, Int, UInt,
* Long, ULong, LongLong, ULongLong,
@@ -4388,12 +4302,20 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
{
quicksort_@suff@,
heapsort_@suff@,
- mergesort_@suff@
+ #if @rsort@
+ radixsort_@suff@
+ #else
+ timsort_@suff@
+ #endif
},
{
aquicksort_@suff@,
aheapsort_@suff@,
- amergesort_@suff@
+ #if @rsort@
+ aradixsort_@suff@
+ #else
+ atimsort_@suff@
+ #endif
},
#else
{
@@ -4407,7 +4329,7 @@ static PyArray_ArrFuncs _Py@NAME@_ArrFuncs = {
(PyArray_ScalarKindFunc*)NULL,
NULL,
NULL,
- (PyArray_FastClipFunc*)@from@_fastclip,
+ (PyArray_FastClipFunc*)NULL,
(PyArray_FastPutmaskFunc*)@from@_fastputmask,
(PyArray_FastTakeFunc*)@from@_fasttake,
(PyArray_ArgFunc*)@from@_argmin
@@ -4490,7 +4412,17 @@ PyArray_DescrFromType(int type)
{
PyArray_Descr *ret = NULL;
- if (type < NPY_NTYPES) {
+ if (type < 0) {
+ /*
+ * It's not valid for type to be less than 0.
+ * If that happens, then no other branch of
+ * this if/else chain should be followed.
+ * This is effectively a no-op that ensures
+ * the default error is raised.
+ */
+ ret = NULL;
+ }
+ else if (type < NPY_NTYPES) {
ret = _builtin_descrs[type];
}
else if (type == NPY_NOTYPE) {
@@ -4547,66 +4479,6 @@ PyArray_DescrFromType(int type)
return ret;
}
-/* A clone function for the datetime dtype metadata */
-static NpyAuxData *
-datetime_dtype_metadata_clone(NpyAuxData *data)
-{
- PyArray_DatetimeDTypeMetaData *newdata =
- (PyArray_DatetimeDTypeMetaData *)PyArray_malloc(
- sizeof(PyArray_DatetimeDTypeMetaData));
- if (newdata == NULL) {
- return NULL;
- }
-
- memcpy(newdata, data, sizeof(PyArray_DatetimeDTypeMetaData));
-
- return (NpyAuxData *)newdata;
-}
-
-/*
- * Initializes the c_metadata field for the _builtin_descrs DATETIME
- * and TIMEDELTA.
- *
- * must not be static, gcc 4.1.2 on redhat 5 then miscompiles this function
- * see gh-5163
- *
- */
-NPY_NO_EXPORT int
-initialize_builtin_datetime_metadata(void)
-{
- PyArray_DatetimeDTypeMetaData *data1, *data2;
-
- /* Allocate memory for the metadata */
- data1 = PyArray_malloc(sizeof(PyArray_DatetimeDTypeMetaData));
- if (data1 == NULL) {
- return -1;
- }
- data2 = PyArray_malloc(sizeof(PyArray_DatetimeDTypeMetaData));
- if (data2 == NULL) {
- PyArray_free(data1);
- return -1;
- }
-
- /* Initialize the base aux data */
- memset(data1, 0, sizeof(PyArray_DatetimeDTypeMetaData));
- memset(data2, 0, sizeof(PyArray_DatetimeDTypeMetaData));
- data1->base.free = (NpyAuxData_FreeFunc *)PyArray_free;
- data2->base.free = (NpyAuxData_FreeFunc *)PyArray_free;
- data1->base.clone = datetime_dtype_metadata_clone;
- data2->base.clone = datetime_dtype_metadata_clone;
-
- /* Set to the default metadata */
- data1->meta.base = NPY_DATETIME_DEFAULTUNIT;
- data1->meta.num = 1;
- data2->meta.base = NPY_DATETIME_DEFAULTUNIT;
- data2->meta.num = 1;
-
- _builtin_descrs[NPY_DATETIME]->c_metadata = (NpyAuxData *)data1;
- _builtin_descrs[NPY_TIMEDELTA]->c_metadata = (NpyAuxData *)data2;
-
- return 0;
-}
-
/*
*****************************************************************************
** SETUP TYPE INFO **
@@ -4675,7 +4547,14 @@ set_typeinfo(PyObject *dict)
/**end repeat**/
- if (initialize_builtin_datetime_metadata() < 0) {
+ _builtin_descrs[NPY_DATETIME]->c_metadata = _create_datetime_metadata(
+ NPY_DATETIME_DEFAULTUNIT, 1);
+ if (_builtin_descrs[NPY_DATETIME]->c_metadata == NULL) {
+ return -1;
+ }
+ _builtin_descrs[NPY_TIMEDELTA]->c_metadata = _create_datetime_metadata(
+ NPY_DATETIME_DEFAULTUNIT, 1);
+ if (_builtin_descrs[NPY_DATETIME]->c_metadata == NULL) {
return -1;
}
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index 9a2750aea..b729027ad 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -509,6 +509,10 @@ _buffer_info_new(PyObject *obj)
PyArray_Descr *descr = NULL;
int err = 0;
+ /*
+ * Note that the buffer info is cached as pyints making them appear like
+ * unreachable lost memory to valgrind.
+ */
info = malloc(sizeof(_buffer_info_t));
if (info == NULL) {
PyErr_NoMemory();
@@ -579,9 +583,11 @@ _buffer_info_new(PyObject *obj)
err = _buffer_format_string(descr, &fmt, obj, NULL, NULL);
Py_DECREF(descr);
if (err != 0) {
+ free(info->shape);
goto fail;
}
if (_append_char(&fmt, '\0') < 0) {
+ free(info->shape);
goto fail;
}
info->format = fmt.s;
@@ -765,17 +771,6 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
goto fail;
}
}
- /*
- * If a read-only buffer is requested on a read-write array, we return a
- * read-write buffer, which is dubious behavior. But that's why this call
- * is guarded by PyArray_ISWRITEABLE rather than (flags &
- * PyBUF_WRITEABLE).
- */
- if (PyArray_ISWRITEABLE(self)) {
- if (array_might_be_written(self) < 0) {
- goto fail;
- }
- }
if (view == NULL) {
PyErr_SetString(PyExc_ValueError, "NULL view in getbuffer");
@@ -791,7 +786,17 @@ array_getbuffer(PyObject *obj, Py_buffer *view, int flags)
view->buf = PyArray_DATA(self);
view->suboffsets = NULL;
view->itemsize = PyArray_ITEMSIZE(self);
- view->readonly = !PyArray_ISWRITEABLE(self);
+ /*
+ * If a read-only buffer is requested on a read-write array, we return a
+ * read-write buffer as per buffer protocol.
+ * We set a requested buffer to readonly also if the array will be readonly
+ * after a deprecation. This jumps the deprecation, but avoiding the
+ * warning is not convenient here. A warning is given if a writeable
+ * buffer is requested since `PyArray_FailUnlessWriteable` is called above
+ * (and clears the `NPY_ARRAY_WARN_ON_WRITE` flag).
+ */
+ view->readonly = (!PyArray_ISWRITEABLE(self) ||
+ PyArray_CHKFLAGS(self, NPY_ARRAY_WARN_ON_WRITE));
view->internal = NULL;
view->len = PyArray_NBYTES(self);
if ((flags & PyBUF_FORMAT) == PyBUF_FORMAT) {
@@ -922,7 +927,7 @@ fail:
*/
NPY_NO_EXPORT void
-_array_dealloc_buffer_info(PyArrayObject *self)
+_dealloc_cached_buffer_info(PyObject *self)
{
int reset_error_state = 0;
PyObject *ptype, *pvalue, *ptraceback;
@@ -936,7 +941,7 @@ _array_dealloc_buffer_info(PyArrayObject *self)
PyErr_Fetch(&ptype, &pvalue, &ptraceback);
}
- _buffer_clear_info((PyObject*)self);
+ _buffer_clear_info(self);
if (reset_error_state) {
PyErr_Restore(ptype, pvalue, ptraceback);
diff --git a/numpy/core/src/multiarray/buffer.h b/numpy/core/src/multiarray/buffer.h
index d5da8f440..fae413c85 100644
--- a/numpy/core/src/multiarray/buffer.h
+++ b/numpy/core/src/multiarray/buffer.h
@@ -4,7 +4,7 @@
extern NPY_NO_EXPORT PyBufferProcs array_as_buffer;
NPY_NO_EXPORT void
-_array_dealloc_buffer_info(PyArrayObject *self);
+_dealloc_cached_buffer_info(PyObject *self);
NPY_NO_EXPORT PyArray_Descr*
_descriptor_from_pep3118_format(char *s);
diff --git a/numpy/core/src/multiarray/calculation.c b/numpy/core/src/multiarray/calculation.c
index 90ee2c5b2..1d72a5227 100644
--- a/numpy/core/src/multiarray/calculation.c
+++ b/numpy/core/src/multiarray/calculation.c
@@ -918,6 +918,27 @@ PyArray_Clip(PyArrayObject *self, PyObject *min, PyObject *max, PyArrayObject *o
}
func = PyArray_DESCR(self)->f->fastclip;
+ if (func == NULL) {
+ if (min == NULL) {
+ return PyObject_CallFunctionObjArgs(n_ops.minimum, self, max, out, NULL);
+ }
+ else if (max == NULL) {
+ return PyObject_CallFunctionObjArgs(n_ops.maximum, self, min, out, NULL);
+ }
+ else {
+ return PyObject_CallFunctionObjArgs(n_ops.clip, self, min, max, out, NULL);
+ }
+ }
+
+ /* NumPy 1.17.0, 2019-02-24 */
+ if (DEPRECATE(
+ "->f->fastclip is deprecated. Use PyUFunc_RegisterLoopForDescr to "
+ "attach a custom loop to np.core.umath.clip, np.minimum, and "
+ "np.maximum") < 0) {
+ return NULL;
+ }
+ /* everything below can be removed once this deprecation completes */
+
if (func == NULL
|| (min != NULL && !PyArray_CheckAnyScalar(min))
|| (max != NULL && !PyArray_CheckAnyScalar(max))
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index 5b4611e8a..3270bc20d 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -147,7 +147,6 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (dtype == NULL) {
goto fail;
}
- Py_INCREF(dtype);
goto promote_types;
}
/* Check if it's a NumPy scalar */
@@ -164,7 +163,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
if (string_type == NPY_STRING) {
if ((temp = PyObject_Str(obj)) == NULL) {
- return -1;
+ goto fail;
}
#if defined(NPY_PY3K)
#if PY_VERSION_HEX >= 0x03030000
@@ -182,7 +181,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
#else
if ((temp = PyObject_Unicode(obj)) == NULL) {
#endif
- return -1;
+ goto fail;
}
itemsize = PyUnicode_GET_DATA_SIZE(temp);
#ifndef Py_UNICODE_WIDE
@@ -214,9 +213,13 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
int itemsize;
PyObject *temp;
+ /* dtype is not used in this (string discovery) branch */
+ Py_DECREF(dtype);
+ dtype = NULL;
+
if (string_type == NPY_STRING) {
if ((temp = PyObject_Str(obj)) == NULL) {
- return -1;
+ goto fail;
}
#if defined(NPY_PY3K)
#if PY_VERSION_HEX >= 0x03030000
@@ -234,7 +237,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
#else
if ((temp = PyObject_Unicode(obj)) == NULL) {
#endif
- return -1;
+ goto fail;
}
itemsize = PyUnicode_GET_DATA_SIZE(temp);
#ifndef Py_UNICODE_WIDE
@@ -312,6 +315,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
PyErr_Clear();
dtype = _descriptor_from_pep3118_format(buffer_view.format);
PyBuffer_Release(&buffer_view);
+ _dealloc_cached_buffer_info(obj);
if (dtype) {
goto promote_types;
}
@@ -323,6 +327,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
dtype = PyArray_DescrNewFromType(NPY_VOID);
dtype->elsize = buffer_view.itemsize;
PyBuffer_Release(&buffer_view);
+ _dealloc_cached_buffer_info(obj);
goto promote_types;
}
else {
@@ -341,7 +346,7 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
typestr = PyDict_GetItemString(ip, "typestr");
#if defined(NPY_PY3K)
/* Allow unicode type strings */
- if (PyUnicode_Check(typestr)) {
+ if (typestr && PyUnicode_Check(typestr)) {
tmp = PyUnicode_AsASCIIString(typestr);
typestr = tmp;
}
@@ -438,12 +443,18 @@ PyArray_DTypeFromObjectHelper(PyObject *obj, int maxdims,
return 0;
}
- /* Recursive case, first check the sequence contains only one type */
+ /*
+ * The C-API recommends calling PySequence_Fast before any of the other
+ * PySequence_Fast* functions. This is required for PyPy
+ */
seq = PySequence_Fast(obj, "Could not convert object to sequence");
if (seq == NULL) {
goto fail;
}
+
+ /* Recursive case, first check the sequence contains only one type */
size = PySequence_Fast_GET_SIZE(seq);
+ /* objects is borrowed, do not release seq */
objects = PySequence_Fast_ITEMS(seq);
common_type = size > 0 ? Py_TYPE(objects[0]) : NULL;
for (i = 1; i < size; ++i) {
@@ -503,7 +514,7 @@ promote_types:
PyArray_Descr *res_dtype = PyArray_PromoteTypes(dtype, *out_dtype);
Py_DECREF(dtype);
if (res_dtype == NULL) {
- return -1;
+ goto fail;
}
if (!string_type &&
res_dtype->type_num == NPY_UNICODE &&
@@ -590,7 +601,7 @@ _zerofill(PyArrayObject *ret)
NPY_NO_EXPORT npy_bool
_IsWriteable(PyArrayObject *ap)
{
- PyObject *base=PyArray_BASE(ap);
+ PyObject *base = PyArray_BASE(ap);
#if defined(NPY_PY3K)
Py_buffer view;
#else
@@ -598,44 +609,62 @@ _IsWriteable(PyArrayObject *ap)
Py_ssize_t n;
#endif
- /* If we own our own data, then no-problem */
- if ((base == NULL) || (PyArray_FLAGS(ap) & NPY_ARRAY_OWNDATA)) {
+ /*
+ * C-data wrapping arrays may not own their data while not having a base;
+ * WRITEBACKIFCOPY arrays have a base, but do own their data.
+ */
+ if (base == NULL || PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)) {
+ /*
+ * This is somewhat unsafe for directly wrapped non-writable C-arrays,
+ * which do not know whether the memory area is writable or not and
+ * do not own their data (but have no base).
+ * It would be better if this returned PyArray_ISWRITEABLE(ap).
+ * Since it is hard to deprecate, this is deprecated only on the Python
+ * side, but not on in PyArray_UpdateFlags.
+ */
return NPY_TRUE;
}
+
/*
- * Get to the final base object
- * If it is a writeable array, then return TRUE
- * If we can find an array object
- * or a writeable buffer object as the final base object
- * or a string object (for pickling support memory savings).
- * - this last could be removed if a proper pickleable
- * buffer was added to Python.
- *
- * MW: I think it would better to disallow switching from READONLY
- * to WRITEABLE like this...
+ * Get to the final base object.
+ * If it is a writeable array, then return True if we can
+ * find an array object or a writeable buffer object as
+ * the final base object.
*/
+ while (PyArray_Check(base)) {
+ ap = (PyArrayObject *)base;
+ base = PyArray_BASE(ap);
- while(PyArray_Check(base)) {
- if (PyArray_CHKFLAGS((PyArrayObject *)base, NPY_ARRAY_OWNDATA)) {
- return (npy_bool) (PyArray_ISWRITEABLE((PyArrayObject *)base));
+ if (PyArray_ISWRITEABLE(ap)) {
+ /*
+ * If any base is writeable, it must be OK to switch, note that
+ * bases are typically collapsed to always point to the most
+ * general one.
+ */
+ return NPY_TRUE;
}
- base = PyArray_BASE((PyArrayObject *)base);
- }
- /*
- * here so pickle support works seamlessly
- * and unpickled array can be set and reset writeable
- * -- could be abused --
- */
- if (PyString_Check(base)) {
- return NPY_TRUE;
+ if (base == NULL || PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA)) {
+ /* there is no further base to test the writeable flag for */
+ return NPY_FALSE;
+ }
+ assert(!PyArray_CHKFLAGS(ap, NPY_ARRAY_OWNDATA));
}
+
#if defined(NPY_PY3K)
if (PyObject_GetBuffer(base, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
PyErr_Clear();
return NPY_FALSE;
}
PyBuffer_Release(&view);
+ /*
+ * The first call to PyObject_GetBuffer stores a reference to a struct
+ * _buffer_info_t (from buffer.c, with format, ndim, strides and shape) in
+ * a static dictionary, with id(base) as the key. Usually we release it
+ * after the call to PyBuffer_Release, via a call to
+ * _dealloc_cached_buffer_info, but in this case leave it in the cache to
+ * speed up future calls to _IsWriteable.
+ */
#else
if (PyObject_AsWriteBuffer(base, &dummy, &n) < 0) {
PyErr_Clear();
diff --git a/numpy/core/src/multiarray/common.h b/numpy/core/src/multiarray/common.h
index 2b8d3d3a4..487d530a1 100644
--- a/numpy/core/src/multiarray/common.h
+++ b/numpy/core/src/multiarray/common.h
@@ -149,7 +149,7 @@ check_and_adjust_axis_msg(int *axis, int ndim, PyObject *msg_prefix)
PyObject *exc;
if (AxisError_cls == NULL) {
- PyObject *mod = PyImport_ImportModule("numpy.core._internal");
+ PyObject *mod = PyImport_ImportModule("numpy.core._exceptions");
if (mod != NULL) {
AxisError_cls = PyObject_GetAttrString(mod, "AxisError");
@@ -182,6 +182,7 @@ check_and_adjust_axis(int *axis, int ndim)
/* used for some alignment checks */
#define _ALIGN(type) offsetof(struct {char c; type v;}, v)
+#define _UINT_ALIGN(type) npy_uint_alignment(sizeof(type))
/*
* Disable harmless compiler warning "4116: unnamed type definition in
* parentheses" which is caused by the _ALIGN macro.
@@ -201,6 +202,7 @@ npy_is_aligned(const void * p, const npy_uintp alignment)
* Assumes cast from pointer to uintp gives a sensible representation we
* can use bitwise & on (not required by C standard, but used by glibc).
* This test is faster than a direct modulo.
+ * Note alignment value of 0 is allowed and returns False.
*/
return ((npy_uintp)(p) & ((alignment) - 1)) == 0;
}
diff --git a/numpy/core/src/multiarray/compiled_base.c b/numpy/core/src/multiarray/compiled_base.c
index 1c27f8394..055d3e60f 100644
--- a/numpy/core/src/multiarray/compiled_base.c
+++ b/numpy/core/src/multiarray/compiled_base.c
@@ -328,6 +328,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
} else {
Py_XDECREF(values);
Py_XDECREF(mask);
+ PyArray_ResolveWritebackIfCopy(array);
Py_XDECREF(array);
Py_RETURN_NONE;
}
@@ -358,6 +359,7 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
fail:
Py_XDECREF(mask);
+ PyArray_ResolveWritebackIfCopy(array);
Py_XDECREF(array);
Py_XDECREF(values);
return NULL;
@@ -365,6 +367,18 @@ arr_insert(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
#define LIKELY_IN_CACHE_SIZE 8
+#ifdef __INTEL_COMPILER
+#pragma intel optimization_level 0
+#endif
+static NPY_INLINE npy_intp
+_linear_search(const npy_double key, const npy_double *arr, const npy_intp len, const npy_intp i0)
+{
+ npy_intp i;
+
+ for (i = i0; i < len && key >= arr[i]; i++);
+ return i - 1;
+}
+
/** @brief find index of a sorted array such that arr[i] <= key < arr[i + 1].
*
* If an starting index guess is in-range, the array values around this
@@ -404,10 +418,7 @@ binary_search_with_guess(const npy_double key, const npy_double *arr,
* From above we know key >= arr[0] when we start.
*/
if (len <= 4) {
- npy_intp i;
-
- for (i = 1; i < len && key >= arr[i]; ++i);
- return i - 1;
+ return _linear_search(key, arr, len, 1);
}
if (guess > len - 3) {
@@ -565,6 +576,7 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
if (lenxp <= lenx) {
slopes = PyArray_malloc((lenxp - 1) * sizeof(npy_double));
if (slopes == NULL) {
+ PyErr_NoMemory();
goto fail;
}
}
@@ -600,9 +612,18 @@ arr_interp(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
dres[i] = dy[j];
}
else {
- const npy_double slope = (slopes != NULL) ? slopes[j] :
- (dy[j+1] - dy[j]) / (dx[j+1] - dx[j]);
+ const npy_double slope =
+ (slopes != NULL) ? slopes[j] :
+ (dy[j+1] - dy[j]) / (dx[j+1] - dx[j]);
+
+ /* If we get nan in one direction, try the other */
dres[i] = slope*(x_val - dx[j]) + dy[j];
+ if (NPY_UNLIKELY(npy_isnan(dres[i]))) {
+ dres[i] = slope*(x_val - dx[j+1]) + dy[j+1];
+ if (NPY_UNLIKELY(npy_isnan(dres[i])) && dy[j] == dy[j+1]) {
+ dres[i] = dy[j];
+ }
+ }
}
}
@@ -734,6 +755,7 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
if (lenxp <= lenx) {
slopes = PyArray_malloc((lenxp - 1) * sizeof(npy_cdouble));
if (slopes == NULL) {
+ PyErr_NoMemory();
goto fail;
}
}
@@ -772,16 +794,32 @@ arr_interp_complex(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwdict)
dres[i] = dy[j];
}
else {
- if (slopes!=NULL) {
- dres[i].real = slopes[j].real*(x_val - dx[j]) + dy[j].real;
- dres[i].imag = slopes[j].imag*(x_val - dx[j]) + dy[j].imag;
+ npy_cdouble slope;
+ if (slopes != NULL) {
+ slope = slopes[j];
}
else {
const npy_double inv_dx = 1.0 / (dx[j+1] - dx[j]);
- dres[i].real = (dy[j+1].real - dy[j].real)*(x_val - dx[j])*
- inv_dx + dy[j].real;
- dres[i].imag = (dy[j+1].imag - dy[j].imag)*(x_val - dx[j])*
- inv_dx + dy[j].imag;
+ slope.real = (dy[j+1].real - dy[j].real) * inv_dx;
+ slope.imag = (dy[j+1].imag - dy[j].imag) * inv_dx;
+ }
+
+ /* If we get nan in one direction, try the other */
+ dres[i].real = slope.real*(x_val - dx[j]) + dy[j].real;
+ if (NPY_UNLIKELY(npy_isnan(dres[i].real))) {
+ dres[i].real = slope.real*(x_val - dx[j+1]) + dy[j+1].real;
+ if (NPY_UNLIKELY(npy_isnan(dres[i].real)) &&
+ dy[j].real == dy[j+1].real) {
+ dres[i].real = dy[j].real;
+ }
+ }
+ dres[i].imag = slope.imag*(x_val - dx[j]) + dy[j].imag;
+ if (NPY_UNLIKELY(npy_isnan(dres[i].imag))) {
+ dres[i].imag = slope.imag*(x_val - dx[j+1]) + dy[j+1].imag;
+ if (NPY_UNLIKELY(npy_isnan(dres[i].imag)) &&
+ dy[j].imag == dy[j+1].imag) {
+ dres[i].imag = dy[j].imag;
+ }
}
}
}
@@ -803,17 +841,63 @@ fail:
return NULL;
}
+static const char *EMPTY_SEQUENCE_ERR_MSG = "indices must be integral: the provided " \
+ "empty sequence was inferred as float. Wrap it with " \
+ "'np.array(indices, dtype=np.intp)'";
+
+static const char *NON_INTEGRAL_ERROR_MSG = "only int indices permitted";
+
+/* Convert obj to an ndarray with integer dtype or fail */
+static PyArrayObject *
+astype_anyint(PyObject *obj) {
+ PyArrayObject *ret;
+
+ if (!PyArray_Check(obj)) {
+ /* prefer int dtype */
+ PyArray_Descr *dtype_guess = NULL;
+ if (PyArray_DTypeFromObject(obj, NPY_MAXDIMS, &dtype_guess) < 0) {
+ return NULL;
+ }
+ if (dtype_guess == NULL) {
+ if (PySequence_Check(obj) && PySequence_Size(obj) == 0) {
+ PyErr_SetString(PyExc_TypeError, EMPTY_SEQUENCE_ERR_MSG);
+ }
+ return NULL;
+ }
+ ret = (PyArrayObject*)PyArray_FromAny(obj, dtype_guess, 0, 0, 0, NULL);
+ if (ret == NULL) {
+ return NULL;
+ }
+ }
+ else {
+ ret = (PyArrayObject *)obj;
+ Py_INCREF(ret);
+ }
+
+ if (!(PyArray_ISINTEGER(ret) || PyArray_ISBOOL(ret))) {
+ /* ensure dtype is int-based */
+ PyErr_SetString(PyExc_TypeError, NON_INTEGRAL_ERROR_MSG);
+ Py_DECREF(ret);
+ return NULL;
+ }
+
+ return ret;
+}
+
/*
* Converts a Python sequence into 'count' PyArrayObjects
*
- * seq - Input Python object, usually a tuple but any sequence works.
- * op - Where the arrays are placed.
- * count - How many arrays there should be (errors if it doesn't match).
- * paramname - The name of the parameter that produced 'seq'.
+ * seq - Input Python object, usually a tuple but any sequence works.
+ * Must have integral content.
+ * paramname - The name of the parameter that produced 'seq'.
+ * count - How many arrays there should be (errors if it doesn't match).
+ * op - Where the arrays are placed.
*/
-static int sequence_to_arrays(PyObject *seq,
- PyArrayObject **op, int count,
- char *paramname)
+static int int_sequence_to_arrays(PyObject *seq,
+ char *paramname,
+ int count,
+ PyArrayObject **op
+ )
{
int i;
@@ -827,30 +911,26 @@ static int sequence_to_arrays(PyObject *seq,
for (i = 0; i < count; ++i) {
PyObject *item = PySequence_GetItem(seq, i);
if (item == NULL) {
- while (--i >= 0) {
- Py_DECREF(op[i]);
- op[i] = NULL;
- }
- return -1;
+ goto fail;
}
-
- op[i] = (PyArrayObject *)PyArray_FROM_O(item);
+ op[i] = astype_anyint(item);
+ Py_DECREF(item);
if (op[i] == NULL) {
- while (--i >= 0) {
- Py_DECREF(op[i]);
- op[i] = NULL;
- }
- Py_DECREF(item);
- return -1;
+ goto fail;
}
-
- Py_DECREF(item);
}
return 0;
+
+fail:
+ while (--i >= 0) {
+ Py_XDECREF(op[i]);
+ op[i] = NULL;
+ }
+ return -1;
}
-/* Inner loop for unravel_index */
+/* Inner loop for ravel_multi_index */
static int
ravel_multi_index_loop(int ravel_ndim, npy_intp *ravel_dims,
npy_intp *ravel_strides,
@@ -862,6 +942,20 @@ ravel_multi_index_loop(int ravel_ndim, npy_intp *ravel_dims,
char invalid;
npy_intp j, m;
+ /*
+ * Check for 0-dimensional axes unless there is nothing to do.
+ * An empty array/shape cannot be indexed at all.
+ */
+ if (count != 0) {
+ for (i = 0; i < ravel_ndim; ++i) {
+ if (ravel_dims[i] == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot unravel if shape has zero entries (is empty).");
+ return NPY_FAIL;
+ }
+ }
+ }
+
NPY_BEGIN_ALLOW_THREADS;
invalid = 0;
while (count--) {
@@ -994,11 +1088,10 @@ arr_ravel_multi_index(PyObject *self, PyObject *args, PyObject *kwds)
}
/* Get the multi_index into op */
- if (sequence_to_arrays(coords0, op, dimensions.len, "multi_index") < 0) {
+ if (int_sequence_to_arrays(coords0, "multi_index", dimensions.len, op) < 0) {
goto fail;
}
-
for (i = 0; i < dimensions.len; ++i) {
op_flags[i] = NPY_ITER_READONLY|
NPY_ITER_ALIGNED;
@@ -1065,67 +1158,44 @@ fail:
return NULL;
}
-/* C-order inner loop for unravel_index */
-static int
-unravel_index_loop_corder(int unravel_ndim, npy_intp *unravel_dims,
- npy_intp unravel_size, npy_intp count,
- char *indices, npy_intp indices_stride,
- npy_intp *coords)
-{
- int i;
- char invalid;
- npy_intp val;
- NPY_BEGIN_ALLOW_THREADS;
- invalid = 0;
- while (count--) {
- val = *(npy_intp *)indices;
- if (val < 0 || val >= unravel_size) {
- invalid = 1;
- break;
- }
- for (i = unravel_ndim-1; i >= 0; --i) {
- coords[i] = val % unravel_dims[i];
- val /= unravel_dims[i];
- }
- coords += unravel_ndim;
- indices += indices_stride;
- }
- NPY_END_ALLOW_THREADS;
- if (invalid) {
- PyErr_Format(PyExc_ValueError,
- "index %" NPY_INTP_FMT " is out of bounds for array with size "
- "%" NPY_INTP_FMT,
- val, unravel_size
- );
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
-}
-
-/* Fortran-order inner loop for unravel_index */
+/*
+ * Inner loop for unravel_index
+ * order must be NPY_CORDER or NPY_FORTRANORDER
+ */
static int
-unravel_index_loop_forder(int unravel_ndim, npy_intp *unravel_dims,
- npy_intp unravel_size, npy_intp count,
- char *indices, npy_intp indices_stride,
- npy_intp *coords)
+unravel_index_loop(int unravel_ndim, npy_intp *unravel_dims,
+ npy_intp unravel_size, npy_intp count,
+ char *indices, npy_intp indices_stride,
+ npy_intp *coords, NPY_ORDER order)
{
- int i;
- char invalid;
- npy_intp val;
+ int i, idx;
+ int idx_start = (order == NPY_CORDER) ? unravel_ndim - 1: 0;
+ int idx_step = (order == NPY_CORDER) ? -1 : 1;
+ char invalid = 0;
+ npy_intp val = 0;
NPY_BEGIN_ALLOW_THREADS;
- invalid = 0;
+ /* NPY_KEEPORDER or NPY_ANYORDER have no meaning in this setting */
+ assert(order == NPY_CORDER || order == NPY_FORTRANORDER);
while (count--) {
val = *(npy_intp *)indices;
if (val < 0 || val >= unravel_size) {
invalid = 1;
break;
}
+ idx = idx_start;
for (i = 0; i < unravel_ndim; ++i) {
- *coords++ = val % unravel_dims[i];
- val /= unravel_dims[i];
+ /*
+ * Using a local seems to enable single-divide optimization
+ * but only if the / precedes the %
+ */
+ npy_intp tmp = val / unravel_dims[idx];
+ coords[idx] = val % unravel_dims[idx];
+ val = tmp;
+ idx += idx_step;
}
+ coords += unravel_ndim;
indices += indices_stride;
}
NPY_END_ALLOW_THREADS;
@@ -1144,11 +1214,12 @@ unravel_index_loop_forder(int unravel_ndim, npy_intp *unravel_dims,
NPY_NO_EXPORT PyObject *
arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
{
- PyObject *indices0 = NULL, *ret_tuple = NULL;
+ PyObject *indices0 = NULL;
+ PyObject *ret_tuple = NULL;
PyArrayObject *ret_arr = NULL;
PyArrayObject *indices = NULL;
PyArray_Descr *dtype = NULL;
- PyArray_Dims dimensions={0,0};
+ PyArray_Dims dimensions = {0, 0};
NPY_ORDER order = NPY_CORDER;
npy_intp unravel_size;
@@ -1156,7 +1227,32 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
int i, ret_ndim;
npy_intp ret_dims[NPY_MAXDIMS], ret_strides[NPY_MAXDIMS];
- char *kwlist[] = {"indices", "dims", "order", NULL};
+ char *kwlist[] = {"indices", "shape", "order", NULL};
+
+ /*
+ * TODO: remove this in favor of warning raised in the dispatcher when
+ * __array_function__ is enabled by default.
+ */
+
+ /*
+ * Continue to support the older "dims" argument in place
+ * of the "shape" argument. Issue an appropriate warning
+ * if "dims" is detected in keywords, then replace it with
+ * the new "shape" argument and continue processing as usual.
+ */
+ if (kwds) {
+ PyObject *dims_item, *shape_item;
+ dims_item = PyDict_GetItemString(kwds, "dims");
+ shape_item = PyDict_GetItemString(kwds, "shape");
+ if (dims_item != NULL && shape_item == NULL) {
+ if (DEPRECATE("'shape' argument should be"
+ " used instead of 'dims'") < 0) {
+ return NULL;
+ }
+ PyDict_SetItemString(kwds, "shape", dims_item);
+ PyDict_DelItemString(kwds, "dims");
+ }
+ }
if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO&|O&:unravel_index",
kwlist,
@@ -1166,17 +1262,17 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
goto fail;
}
- unravel_size = PyArray_MultiplyList(dimensions.ptr, dimensions.len);
-
- if (!PyArray_Check(indices0)) {
- indices = (PyArrayObject*)PyArray_FROM_O(indices0);
- if (indices == NULL) {
- goto fail;
- }
+ unravel_size = PyArray_OverflowMultiplyList(dimensions.ptr, dimensions.len);
+ if (unravel_size == -1) {
+ PyErr_SetString(PyExc_ValueError,
+ "dimensions are too large; arrays and shapes with "
+ "a total size greater than 'intp' are not supported.");
+ goto fail;
}
- else {
- indices = (PyArrayObject *)indices0;
- Py_INCREF(indices);
+
+ indices = astype_anyint(indices0);
+ if (indices == NULL) {
+ goto fail;
}
dtype = PyArray_DescrFromType(NPY_INTP);
@@ -1226,64 +1322,35 @@ arr_unravel_index(PyObject *self, PyObject *args, PyObject *kwds)
goto fail;
}
- if (order == NPY_CORDER) {
- if (NpyIter_GetIterSize(iter) != 0) {
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *strides;
- npy_intp *countptr, count;
- npy_intp *coordsptr = (npy_intp *)PyArray_DATA(ret_arr);
+ if (order != NPY_CORDER && order != NPY_FORTRANORDER) {
+ PyErr_SetString(PyExc_ValueError,
+ "only 'C' or 'F' order is permitted");
+ goto fail;
+ }
+ if (NpyIter_GetIterSize(iter) != 0) {
+ NpyIter_IterNextFunc *iternext;
+ char **dataptr;
+ npy_intp *strides;
+ npy_intp *countptr, count;
+ npy_intp *coordsptr = (npy_intp *)PyArray_DATA(ret_arr);
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
- goto fail;
- }
- dataptr = NpyIter_GetDataPtrArray(iter);
- strides = NpyIter_GetInnerStrideArray(iter);
- countptr = NpyIter_GetInnerLoopSizePtr(iter);
-
- do {
- count = *countptr;
- if (unravel_index_loop_corder(dimensions.len, dimensions.ptr,
- unravel_size, count, *dataptr, *strides,
- coordsptr) != NPY_SUCCEED) {
- goto fail;
- }
- coordsptr += count*dimensions.len;
- } while(iternext(iter));
+ iternext = NpyIter_GetIterNext(iter, NULL);
+ if (iternext == NULL) {
+ goto fail;
}
- }
- else if (order == NPY_FORTRANORDER) {
- if (NpyIter_GetIterSize(iter) != 0) {
- NpyIter_IterNextFunc *iternext;
- char **dataptr;
- npy_intp *strides;
- npy_intp *countptr, count;
- npy_intp *coordsptr = (npy_intp *)PyArray_DATA(ret_arr);
+ dataptr = NpyIter_GetDataPtrArray(iter);
+ strides = NpyIter_GetInnerStrideArray(iter);
+ countptr = NpyIter_GetInnerLoopSizePtr(iter);
- iternext = NpyIter_GetIterNext(iter, NULL);
- if (iternext == NULL) {
+ do {
+ count = *countptr;
+ if (unravel_index_loop(dimensions.len, dimensions.ptr,
+ unravel_size, count, *dataptr, *strides,
+ coordsptr, order) != NPY_SUCCEED) {
goto fail;
}
- dataptr = NpyIter_GetDataPtrArray(iter);
- strides = NpyIter_GetInnerStrideArray(iter);
- countptr = NpyIter_GetInnerLoopSizePtr(iter);
-
- do {
- count = *countptr;
- if (unravel_index_loop_forder(dimensions.len, dimensions.ptr,
- unravel_size, count, *dataptr, *strides,
- coordsptr) != NPY_SUCCEED) {
- goto fail;
- }
- coordsptr += count*dimensions.len;
- } while(iternext(iter));
- }
- }
- else {
- PyErr_SetString(PyExc_ValueError,
- "only 'C' or 'F' order is permitted");
- goto fail;
+ coordsptr += count * dimensions.len;
+ } while (iternext(iter));
}
@@ -1467,7 +1534,8 @@ pack_inner(const char *inptr,
npy_intp in_stride,
char *outptr,
npy_intp n_out,
- npy_intp out_stride)
+ npy_intp out_stride,
+ char order)
{
/*
* Loop through the elements of inptr.
@@ -1487,9 +1555,13 @@ pack_inner(const char *inptr,
vn_out -= (vn_out & 1);
for (index = 0; index < vn_out; index += 2) {
unsigned int r;
- /* swap as packbits is "big endian", note x86 can load unaligned */
- npy_uint64 a = npy_bswap8(*(npy_uint64*)inptr);
- npy_uint64 b = npy_bswap8(*(npy_uint64*)(inptr + 8));
+ npy_uint64 a = *(npy_uint64*)inptr;
+ npy_uint64 b = *(npy_uint64*)(inptr + 8);
+ if (order == 'b') {
+ a = npy_bswap8(a);
+ b = npy_bswap8(b);
+ }
+ /* note x86 can load unaligned */
__m128i v = _mm_set_epi64(_m_from_int64(b), _m_from_int64(a));
/* false -> 0x00 and true -> 0xFF (there is no cmpneq) */
v = _mm_cmpeq_epi8(v, zero);
@@ -1509,30 +1581,45 @@ pack_inner(const char *inptr,
if (remain == 0) { /* assumes n_in > 0 */
remain = 8;
}
- /* don't reset index to handle remainder of above block */
+ /* Don't reset index. Just handle remainder of above block */
for (; index < n_out; index++) {
- char build = 0;
+ unsigned char build = 0;
int i, maxi;
npy_intp j;
maxi = (index == n_out - 1) ? remain : 8;
- for (i = 0; i < maxi; i++) {
- build <<= 1;
- for (j = 0; j < element_size; j++) {
- build |= (inptr[j] != 0);
+ if (order == 'b') {
+ for (i = 0; i < maxi; i++) {
+ build <<= 1;
+ for (j = 0; j < element_size; j++) {
+ build |= (inptr[j] != 0);
+ }
+ inptr += in_stride;
+ }
+ if (index == n_out - 1) {
+ build <<= 8 - remain;
}
- inptr += in_stride;
}
- if (index == n_out - 1) {
- build <<= 8 - remain;
+ else
+ {
+ for (i = 0; i < maxi; i++) {
+ build >>= 1;
+ for (j = 0; j < element_size; j++) {
+ build |= (inptr[j] != 0) ? 128 : 0;
+ }
+ inptr += in_stride;
+ }
+ if (index == n_out - 1) {
+ build >>= 8 - remain;
+ }
}
- *outptr = build;
+ *outptr = (char)build;
outptr += out_stride;
}
}
static PyObject *
-pack_bits(PyObject *input, int axis)
+pack_bits(PyObject *input, int axis, char order)
{
PyArrayObject *inp;
PyArrayObject *new = NULL;
@@ -1550,6 +1637,7 @@ pack_bits(PyObject *input, int axis)
if (!PyArray_ISBOOL(inp) && !PyArray_ISINTEGER(inp)) {
PyErr_SetString(PyExc_TypeError,
"Expected an input array of integer or boolean data type");
+ Py_DECREF(inp);
goto fail;
}
@@ -1616,7 +1704,7 @@ pack_bits(PyObject *input, int axis)
pack_inner(PyArray_ITER_DATA(it), PyArray_ITEMSIZE(new),
PyArray_DIM(new, axis), PyArray_STRIDE(new, axis),
PyArray_ITER_DATA(ot), PyArray_DIM(out, axis),
- PyArray_STRIDE(out, axis));
+ PyArray_STRIDE(out, axis), order);
PyArray_ITER_NEXT(it);
PyArray_ITER_NEXT(ot);
}
@@ -1636,17 +1724,24 @@ fail:
}
static PyObject *
-unpack_bits(PyObject *input, int axis)
+unpack_bits(PyObject *input, int axis, PyObject *count_obj, char order)
{
static int unpack_init = 0;
- static char unpack_lookup[256][8];
+ /*
+ * lookuptable for bitorder big as it has been around longer
+ * bitorder little is handled via byteswapping in the loop
+ */
+ static union {
+ npy_uint8 bytes[8];
+ npy_uint64 uint64;
+ } unpack_lookup_big[256];
PyArrayObject *inp;
PyArrayObject *new = NULL;
PyArrayObject *out = NULL;
npy_intp outdims[NPY_MAXDIMS];
int i;
PyArrayIterObject *it, *ot;
- npy_intp n_in, in_stride, out_stride;
+ npy_intp count, in_n, in_tail, out_pad, in_stride, out_stride;
NPY_BEGIN_THREADS_DEF;
inp = (PyArrayObject *)PyArray_FROM_O(input);
@@ -1657,6 +1752,7 @@ unpack_bits(PyObject *input, int axis)
if (PyArray_TYPE(inp) != NPY_UBYTE) {
PyErr_SetString(PyExc_TypeError,
"Expected an input array of unsigned byte data type");
+ Py_DECREF(inp);
goto fail;
}
@@ -1674,20 +1770,37 @@ unpack_bits(PyObject *input, int axis)
newdim.ptr = &shape;
temp = (PyArrayObject *)PyArray_Newshape(new, &newdim, NPY_CORDER);
+ Py_DECREF(new);
if (temp == NULL) {
- goto fail;
+ return NULL;
}
- Py_DECREF(new);
new = temp;
}
/* Setup output shape */
- for (i=0; i<PyArray_NDIM(new); i++) {
+ for (i = 0; i < PyArray_NDIM(new); i++) {
outdims[i] = PyArray_DIM(new, i);
}
/* Multiply axis dimension by 8 */
- outdims[axis] <<= 3;
+ outdims[axis] *= 8;
+ if (count_obj != Py_None) {
+ count = PyArray_PyIntAsIntp(count_obj);
+ if (error_converting(count)) {
+ goto fail;
+ }
+ if (count < 0) {
+ outdims[axis] += count;
+ if (outdims[axis] < 0) {
+ PyErr_Format(PyExc_ValueError,
+ "-count larger than number of elements");
+ goto fail;
+ }
+ }
+ else {
+ outdims[axis] = count;
+ }
+ }
/* Create output array */
out = (PyArrayObject *)PyArray_NewFromDescr(
@@ -1697,6 +1810,7 @@ unpack_bits(PyObject *input, int axis)
if (out == NULL) {
goto fail;
}
+
/* Setup iterators to iterate over all but given axis */
it = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)new, &axis);
ot = (PyArrayIterObject *)PyArray_IterAllButAxis((PyObject *)out, &axis);
@@ -1706,34 +1820,39 @@ unpack_bits(PyObject *input, int axis)
goto fail;
}
- /* setup lookup table under GIL, big endian 0..256 as bytes */
+ /*
+ * setup lookup table under GIL, 256 8 byte blocks representing 8 bits
+ * expanded to 1/0 bytes
+ */
if (unpack_init == 0) {
- npy_uint64 j;
- npy_uint64 * unpack_lookup_64 = (npy_uint64 *)unpack_lookup;
+ npy_intp j;
for (j=0; j < 256; j++) {
- npy_uint64 v = 0;
- v |= (npy_uint64)((j & 1) == 1);
- v |= (npy_uint64)((j & 2) == 2) << 8;
- v |= (npy_uint64)((j & 4) == 4) << 16;
- v |= (npy_uint64)((j & 8) == 8) << 24;
- v |= (npy_uint64)((j & 16) == 16) << 32;
- v |= (npy_uint64)((j & 32) == 32) << 40;
- v |= (npy_uint64)((j & 64) == 64) << 48;
- v |= (npy_uint64)((j & 128) == 128) << 56;
-#if NPY_BYTE_ORDER == NPY_LITTLE_ENDIAN
- v = npy_bswap8(v);
-#endif
- unpack_lookup_64[j] = v;
+ npy_intp k;
+ for (k=0; k < 8; k++) {
+ npy_uint8 v = (j & (1 << k)) == (1 << k);
+ unpack_lookup_big[j].bytes[7 - k] = v;
+ }
}
unpack_init = 1;
}
- NPY_BEGIN_THREADS_THRESHOLDED(PyArray_DIM(new, axis));
+ count = PyArray_DIM(new, axis) * 8;
+ if (outdims[axis] > count) {
+ in_n = count / 8;
+ in_tail = 0;
+ out_pad = outdims[axis] - count;
+ }
+ else {
+ in_n = outdims[axis] / 8;
+ in_tail = outdims[axis] % 8;
+ out_pad = 0;
+ }
- n_in = PyArray_DIM(new, axis);
in_stride = PyArray_STRIDE(new, axis);
out_stride = PyArray_STRIDE(out, axis);
+ NPY_BEGIN_THREADS_THRESHOLDED(PyArray_Size((PyObject *)out) / 8);
+
while (PyArray_ITER_NOTDONE(it)) {
npy_intp index;
unsigned const char *inptr = PyArray_ITER_DATA(it);
@@ -1741,24 +1860,74 @@ unpack_bits(PyObject *input, int axis)
if (out_stride == 1) {
/* for unity stride we can just copy out of the lookup table */
- for (index = 0; index < n_in; index++) {
- memcpy(outptr, unpack_lookup[*inptr], 8);
- outptr += 8;
- inptr += in_stride;
+ if (order == 'b') {
+ for (index = 0; index < in_n; index++) {
+ npy_uint64 v = unpack_lookup_big[*inptr].uint64;
+ memcpy(outptr, &v, 8);
+ outptr += 8;
+ inptr += in_stride;
+ }
+ }
+ else {
+ for (index = 0; index < in_n; index++) {
+ npy_uint64 v = unpack_lookup_big[*inptr].uint64;
+ if (order != 'b') {
+ v = npy_bswap8(v);
+ }
+ memcpy(outptr, &v, 8);
+ outptr += 8;
+ inptr += in_stride;
+ }
+ }
+ /* Clean up the tail portion */
+ if (in_tail) {
+ npy_uint64 v = unpack_lookup_big[*inptr].uint64;
+ if (order != 'b') {
+ v = npy_bswap8(v);
+ }
+ memcpy(outptr, &v, in_tail);
+ }
+ /* Add padding */
+ else if (out_pad) {
+ memset(outptr, 0, out_pad);
}
}
else {
- for (index = 0; index < n_in; index++) {
- unsigned char mask = 128;
-
- for (i = 0; i < 8; i++) {
- *outptr = ((mask & (*inptr)) != 0);
+ if (order == 'b') {
+ for (index = 0; index < in_n; index++) {
+ for (i = 0; i < 8; i++) {
+ *outptr = ((*inptr & (128 >> i)) != 0);
+ outptr += out_stride;
+ }
+ inptr += in_stride;
+ }
+ /* Clean up the tail portion */
+ for (i = 0; i < in_tail; i++) {
+ *outptr = ((*inptr & (128 >> i)) != 0);
+ outptr += out_stride;
+ }
+ }
+ else {
+ for (index = 0; index < in_n; index++) {
+ for (i = 0; i < 8; i++) {
+ *outptr = ((*inptr & (1 << i)) != 0);
+ outptr += out_stride;
+ }
+ inptr += in_stride;
+ }
+ /* Clean up the tail portion */
+ for (i = 0; i < in_tail; i++) {
+ *outptr = ((*inptr & (1 << i)) != 0);
outptr += out_stride;
- mask >>= 1;
}
- inptr += in_stride;
+ }
+ /* Add padding */
+ for (index = 0; index < out_pad; index++) {
+ *outptr = 0;
+ outptr += out_stride;
}
}
+
PyArray_ITER_NEXT(it);
PyArray_ITER_NEXT(ot);
}
@@ -1782,25 +1951,49 @@ io_pack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *obj;
int axis = NPY_MAXDIMS;
- static char *kwlist[] = {"in", "axis", NULL};
+ static char *kwlist[] = {"in", "axis", "bitorder", NULL};
+ char c = 'b';
+ const char * order_str = NULL;
- if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&:pack" , kwlist,
- &obj, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&s:pack" , kwlist,
+ &obj, PyArray_AxisConverter, &axis, &order_str)) {
return NULL;
}
- return pack_bits(obj, axis);
+ if (order_str != NULL) {
+ if (strncmp(order_str, "little", 6) == 0)
+ c = 'l';
+ else if (strncmp(order_str, "big", 3) == 0)
+ c = 'b';
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "'order' must be either 'little' or 'big'");
+ return NULL;
+ }
+ }
+ return pack_bits(obj, axis, c);
}
+
NPY_NO_EXPORT PyObject *
io_unpack(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *obj;
int axis = NPY_MAXDIMS;
- static char *kwlist[] = {"in", "axis", NULL};
+ PyObject *count = Py_None;
+ static char *kwlist[] = {"in", "axis", "count", "bitorder", NULL};
+ const char * c = NULL;
- if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&:unpack" , kwlist,
- &obj, PyArray_AxisConverter, &axis)) {
+ if (!PyArg_ParseTupleAndKeywords( args, kwds, "O|O&Os:unpack" , kwlist,
+ &obj, PyArray_AxisConverter, &axis, &count, &c)) {
+ return NULL;
+ }
+ if (c == NULL) {
+ c = "b";
+ }
+ if (c[0] != 'l' && c[0] != 'b') {
+ PyErr_SetString(PyExc_ValueError,
+ "'order' must begin with 'l' or 'b'");
return NULL;
}
- return unpack_bits(obj, axis);
+ return unpack_bits(obj, axis, count, c[0]);
}
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 7e92e5991..4baa02052 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -16,6 +16,7 @@
#include "conversion_utils.h"
#include "alloc.h"
+#include "buffer.h"
static int
PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2);
@@ -115,8 +116,8 @@ PyArray_IntpConverter(PyObject *obj, PyArray_Dims *seq)
return NPY_FAIL;
}
if (len > NPY_MAXDIMS) {
- PyErr_Format(PyExc_ValueError, "sequence too large; "
- "cannot be greater than %d", NPY_MAXDIMS);
+ PyErr_Format(PyExc_ValueError, "maximum supported dimension for an ndarray is %d"
+ ", found %d", NPY_MAXDIMS, len);
return NPY_FAIL;
}
if (len > 0) {
@@ -185,6 +186,7 @@ PyArray_BufferConverter(PyObject *obj, PyArray_Chunk *buf)
* sticks around after the release.
*/
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info(obj);
/* Point to the base of the buffer object if present */
if (PyMemoryView_Check(obj)) {
@@ -391,6 +393,11 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
char *str;
PyObject *tmp = NULL;
+ if (obj == Py_None) {
+ *sortkind = NPY_QUICKSORT;
+ return NPY_SUCCEED;
+ }
+
if (PyUnicode_Check(obj)) {
obj = tmp = PyUnicode_AsASCIIString(obj);
if (obj == NULL) {
@@ -399,6 +406,7 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
}
*sortkind = NPY_QUICKSORT;
+
str = PyBytes_AsString(obj);
if (!str) {
Py_XDECREF(tmp);
@@ -417,11 +425,23 @@ PyArray_SortkindConverter(PyObject *obj, NPY_SORTKIND *sortkind)
*sortkind = NPY_HEAPSORT;
}
else if (str[0] == 'm' || str[0] == 'M') {
+ /*
+ * Mergesort is an alias for NPY_STABLESORT.
+ * That maintains backwards compatibility while
+ * allowing other types of stable sorts to be used.
+ */
*sortkind = NPY_MERGESORT;
}
else if (str[0] == 's' || str[0] == 'S') {
- /* mergesort is the only stable sorting method in numpy */
- *sortkind = NPY_MERGESORT;
+ /*
+ * NPY_STABLESORT is one of
+ *
+ * - mergesort
+ * - timsort
+ *
+ * Which one is used depends on the data type.
+ */
+ *sortkind = NPY_STABLESORT;
}
else {
PyErr_Format(PyExc_ValueError,
@@ -530,10 +550,9 @@ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val)
int ret;
tmp = PyUnicode_AsASCIIString(object);
if (tmp == NULL) {
- PyErr_SetString(PyExc_ValueError, "Invalid unicode string passed in "
- "for the array ordering. "
- "Please pass in 'C', 'F', 'A' "
- "or 'K' instead");
+ PyErr_SetString(PyExc_ValueError,
+ "Invalid unicode string passed in for the array ordering. "
+ "Please pass in 'C', 'F', 'A' or 'K' instead");
return NPY_FAIL;
}
ret = PyArray_OrderConverter(tmp, val);
@@ -541,38 +560,18 @@ PyArray_OrderConverter(PyObject *object, NPY_ORDER *val)
return ret;
}
else if (!PyBytes_Check(object) || PyBytes_GET_SIZE(object) < 1) {
- /* 2015-12-14, 1.11 */
- int ret = DEPRECATE("Non-string object detected for "
- "the array ordering. Please pass "
- "in 'C', 'F', 'A', or 'K' instead");
-
- if (ret < 0) {
- return -1;
- }
-
- if (PyObject_IsTrue(object)) {
- *val = NPY_FORTRANORDER;
- }
- else {
- *val = NPY_CORDER;
- }
- if (PyErr_Occurred()) {
- return NPY_FAIL;
- }
- return NPY_SUCCEED;
+ PyErr_SetString(PyExc_ValueError,
+ "Non-string object detected for the array ordering. "
+ "Please pass in 'C', 'F', 'A', or 'K' instead");
+ return NPY_FAIL;
}
else {
str = PyBytes_AS_STRING(object);
if (strlen(str) != 1) {
- /* 2015-12-14, 1.11 */
- int ret = DEPRECATE("Non length-one string passed "
- "in for the array ordering. "
- "Please pass in 'C', 'F', 'A', "
- "or 'K' instead");
-
- if (ret < 0) {
- return -1;
- }
+ PyErr_SetString(PyExc_ValueError,
+ "Non-string object detected for the array ordering. "
+ "Please pass in 'C', 'F', 'A', or 'K' instead");
+ return NPY_FAIL;
}
if (str[0] == 'C' || str[0] == 'c') {
diff --git a/numpy/core/src/multiarray/convert.c b/numpy/core/src/multiarray/convert.c
index e88582a51..aa4e40e66 100644
--- a/numpy/core/src/multiarray/convert.c
+++ b/numpy/core/src/multiarray/convert.c
@@ -543,35 +543,6 @@ PyArray_AssignZero(PyArrayObject *dst,
return retcode;
}
-/*
- * Fills an array with ones.
- *
- * dst: The destination array.
- * wheremask: If non-NULL, a boolean mask specifying where to set the values.
- *
- * Returns 0 on success, -1 on failure.
- */
-NPY_NO_EXPORT int
-PyArray_AssignOne(PyArrayObject *dst,
- PyArrayObject *wheremask)
-{
- npy_bool value;
- PyArray_Descr *bool_dtype;
- int retcode;
-
- /* Create a raw bool scalar with the value True */
- bool_dtype = PyArray_DescrFromType(NPY_BOOL);
- if (bool_dtype == NULL) {
- return -1;
- }
- value = 1;
-
- retcode = PyArray_AssignRawScalar(dst, bool_dtype, (char *)&value,
- wheremask, NPY_SAFE_CASTING);
-
- Py_DECREF(bool_dtype);
- return retcode;
-}
/*NUMPY_API
* Copy an array.
@@ -614,22 +585,6 @@ PyArray_View(PyArrayObject *self, PyArray_Descr *type, PyTypeObject *pytype)
}
dtype = PyArray_DESCR(self);
-
- if (type != NULL && !PyArray_EquivTypes(dtype, type) &&
- (PyArray_FLAGS(self) & NPY_ARRAY_WARN_ON_WRITE)) {
- const char *msg =
- "Numpy has detected that you may be viewing or writing to an array "
- "returned by selecting multiple fields in a structured array. \n\n"
- "This code may break in numpy 1.16 because this will return a view "
- "instead of a copy -- see release notes for details.";
- /* 2016-09-19, 1.12 */
- if (DEPRECATE_FUTUREWARNING(msg) < 0) {
- return NULL;
- }
- /* Only warn once per array */
- PyArray_CLEARFLAGS(self, NPY_ARRAY_WARN_ON_WRITE);
- }
-
flags = PyArray_FLAGS(self);
Py_INCREF(dtype);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 33a706412..025c66013 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -47,11 +47,10 @@ PyArray_CastToType(PyArrayObject *arr, PyArray_Descr *dtype, int is_f_order)
PyObject *out;
/* If the requested dtype is flexible, adapt it */
- PyArray_AdaptFlexibleDType((PyObject *)arr, PyArray_DESCR(arr), &dtype);
+ dtype = PyArray_AdaptFlexibleDType((PyObject *)arr, PyArray_DESCR(arr), dtype);
if (dtype == NULL) {
return NULL;
}
-
out = PyArray_NewFromDescr(Py_TYPE(arr), dtype,
PyArray_NDIM(arr),
PyArray_DIMS(arr),
@@ -128,9 +127,9 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
}
/*
- * This function calls Py_DECREF on flex_dtype, and replaces it with
- * a new dtype that has been adapted based on the values in data_dtype
- * and data_obj. If the flex_dtype is not flexible, it leaves it as is.
+ * This function returns a dtype based on flex_dtype and the values in
+ * data_dtype and data_obj. It also calls Py_DECREF on the flex_dtype. If the
+ * flex_dtype is not flexible, it returns it as-is.
*
* Usually, if data_obj is not an array, dtype should be the result
* given by the PyArray_GetArrayParamsFromObject function.
@@ -138,40 +137,37 @@ PyArray_GetCastFunc(PyArray_Descr *descr, int type_num)
* The data_obj may be NULL if just a dtype is known for the source.
*
* If *flex_dtype is NULL, returns immediately, without setting an
- * exception. This basically assumes an error was already set previously.
+ * exception, leaving any previous error handling intact.
*
* The current flexible dtypes include NPY_STRING, NPY_UNICODE, NPY_VOID,
* and NPY_DATETIME with generic units.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT PyArray_Descr *
PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
- PyArray_Descr **flex_dtype)
+ PyArray_Descr *flex_dtype)
{
PyArray_DatetimeMetaData *meta;
+ PyArray_Descr *retval = NULL;
int flex_type_num;
- if (*flex_dtype == NULL) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_RuntimeError,
- "NumPy AdaptFlexibleDType was called with NULL flex_dtype "
- "but no error set");
- }
- return;
+ if (flex_dtype == NULL) {
+ return retval;
}
- flex_type_num = (*flex_dtype)->type_num;
+ flex_type_num = flex_dtype->type_num;
/* Flexible types with expandable size */
- if (PyDataType_ISUNSIZED(*flex_dtype)) {
+ if (PyDataType_ISUNSIZED(flex_dtype)) {
/* First replace the flex_dtype */
- PyArray_DESCR_REPLACE(*flex_dtype);
- if (*flex_dtype == NULL) {
- return;
+ retval = PyArray_DescrNew(flex_dtype);
+ Py_DECREF(flex_dtype);
+ if (retval == NULL) {
+ return retval;
}
if (data_dtype->type_num == flex_type_num ||
flex_type_num == NPY_VOID) {
- (*flex_dtype)->elsize = data_dtype->elsize;
+ (retval)->elsize = data_dtype->elsize;
}
else if (flex_type_num == NPY_STRING || flex_type_num == NPY_UNICODE) {
npy_intp size = 8;
@@ -199,7 +195,7 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
}
else if (data_dtype->elsize > 8 ||
data_dtype->elsize < 0) {
- /*
+ /*
* Element size should never be greater than 8 or
* less than 0 for integer type, but just in case...
*/
@@ -237,9 +233,8 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
PyObject *s = PyObject_Str(list);
if (s == NULL) {
Py_DECREF(list);
- Py_DECREF(*flex_dtype);
- *flex_dtype = NULL;
- return;
+ Py_DECREF(retval);
+ return NULL;
}
else {
size = PyObject_Length(s);
@@ -262,9 +257,16 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
list = PyArray_ToList((PyArrayObject *)data_obj);
result = PyArray_GetArrayParamsFromObject(
list,
- *flex_dtype,
+ retval,
0, &dtype,
&ndim, dims, &arr, NULL);
+ Py_DECREF(list);
+ Py_XDECREF(arr);
+ if (result < 0) {
+ Py_XDECREF(dtype);
+ Py_DECREF(retval);
+ return NULL;
+ }
if (result == 0 && dtype != NULL) {
if (flex_type_num == NPY_UNICODE) {
size = dtype->elsize / 4;
@@ -274,15 +276,12 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
}
}
Py_XDECREF(dtype);
- Py_XDECREF(arr);
- Py_DECREF(list);
}
else if (PyArray_IsPythonScalar(data_obj)) {
PyObject *s = PyObject_Str(data_obj);
if (s == NULL) {
- Py_DECREF(*flex_dtype);
- *flex_dtype = NULL;
- return;
+ Py_DECREF(retval);
+ return NULL;
}
else {
size = PyObject_Length(s);
@@ -301,9 +300,8 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
case NPY_DATETIME:
meta = get_datetime_metadata_from_dtype(data_dtype);
if (meta == NULL) {
- Py_DECREF(*flex_dtype);
- *flex_dtype = NULL;
- return;
+ Py_DECREF(retval);
+ return NULL;
}
size = get_datetime_iso_8601_strlen(0, meta->base);
break;
@@ -313,10 +311,10 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
}
if (flex_type_num == NPY_STRING) {
- (*flex_dtype)->elsize = size;
+ retval->elsize = size;
}
else if (flex_type_num == NPY_UNICODE) {
- (*flex_dtype)->elsize = size * 4;
+ retval->elsize = size * 4;
}
}
else {
@@ -326,18 +324,17 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
*/
PyErr_SetString(PyExc_TypeError,
"don't know how to adapt flex dtype");
- *flex_dtype = NULL;
- return;
+ Py_DECREF(retval);
+ return NULL;
}
}
/* Flexible type with generic time unit that adapts */
else if (flex_type_num == NPY_DATETIME ||
flex_type_num == NPY_TIMEDELTA) {
- meta = get_datetime_metadata_from_dtype(*flex_dtype);
+ meta = get_datetime_metadata_from_dtype(flex_dtype);
+ retval = flex_dtype;
if (meta == NULL) {
- Py_DECREF(*flex_dtype);
- *flex_dtype = NULL;
- return;
+ return NULL;
}
if (meta->base == NPY_FR_GENERIC) {
@@ -345,22 +342,24 @@ PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
data_dtype->type_num == NPY_TIMEDELTA) {
meta = get_datetime_metadata_from_dtype(data_dtype);
if (meta == NULL) {
- Py_DECREF(*flex_dtype);
- *flex_dtype = NULL;
- return;
+ return NULL;
}
- Py_DECREF(*flex_dtype);
- *flex_dtype = create_datetime_dtype(flex_type_num, meta);
+ retval = create_datetime_dtype(flex_type_num, meta);
+ Py_DECREF(flex_dtype);
}
else if (data_obj != NULL) {
/* Detect the unit from the input's data */
- Py_DECREF(*flex_dtype);
- *flex_dtype = find_object_datetime_type(data_obj,
+ retval = find_object_datetime_type(data_obj,
flex_type_num);
+ Py_DECREF(flex_dtype);
}
}
}
+ else {
+ retval = flex_dtype;
+ }
+ return retval;
}
/*
@@ -518,7 +517,7 @@ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to)
* stringified value of the object.
*/
else if (to_type_num == NPY_STRING || to_type_num == NPY_UNICODE) {
- /*
+ /*
* Boolean value cast to string type is 5 characters max
* for string 'False'.
*/
@@ -531,7 +530,7 @@ PyArray_CanCastTo(PyArray_Descr *from, PyArray_Descr *to)
if (PyDataType_ISUNSIZED(to)) {
ret = 1;
}
- /*
+ /*
* Need at least 5 characters to convert from boolean
* to 'True' or 'False'.
*/
@@ -680,15 +679,82 @@ NPY_NO_EXPORT npy_bool
PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
NPY_CASTING casting)
{
- /* Fast path for unsafe casts or basic types */
- if (casting == NPY_UNSAFE_CASTING ||
- (NPY_LIKELY(from->type_num < NPY_OBJECT) &&
- NPY_LIKELY(from->type_num == to->type_num) &&
- NPY_LIKELY(from->byteorder == to->byteorder))) {
+ /*
+ * Fast paths for equality and for basic types.
+ */
+ if (from == to ||
+ ((NPY_LIKELY(PyDataType_ISNUMBER(from)) ||
+ PyDataType_ISOBJECT(from)) &&
+ NPY_LIKELY(from->type_num == to->type_num) &&
+ NPY_LIKELY(from->byteorder == to->byteorder))) {
+ return 1;
+ }
+ /*
+ * Cases with subarrays and fields need special treatment.
+ */
+ if (PyDataType_HASFIELDS(from)) {
+ /*
+ * If from is a structured data type, then it can be cast to a simple
+ * non-object one only for unsafe casting *and* if it has a single
+ * field; recurse just in case the single field is itself structured.
+ */
+ if (!PyDataType_HASFIELDS(to) && !PyDataType_ISOBJECT(to)) {
+ if (casting == NPY_UNSAFE_CASTING &&
+ PyDict_Size(from->fields) == 1) {
+ Py_ssize_t ppos = 0;
+ PyObject *tuple;
+ PyArray_Descr *field;
+ PyDict_Next(from->fields, &ppos, NULL, &tuple);
+ field = (PyArray_Descr *)PyTuple_GET_ITEM(tuple, 0);
+ /*
+ * For a subarray, we need to get the underlying type;
+ * since we already are casting unsafely, we can ignore
+ * the shape.
+ */
+ if (PyDataType_HASSUBARRAY(field)) {
+ field = field->subarray->base;
+ }
+ return PyArray_CanCastTypeTo(field, to, casting);
+ }
+ else {
+ return 0;
+ }
+ }
+ /*
+ * Casting from one structured data type to another depends on the fields;
+ * we pass that case on to the EquivTypenums case below.
+ *
+ * TODO: move that part up here? Need to check whether equivalent type
+ * numbers is an addition constraint that is needed.
+ *
+ * TODO/FIXME: For now, always allow structured to structured for unsafe
+ * casting; this is not correct, but needed since the treatment in can_cast
+ * below got out of sync with astype; see gh-13667.
+ */
+ if (casting == NPY_UNSAFE_CASTING) {
+ return 1;
+ }
+ }
+ else if (PyDataType_HASFIELDS(to)) {
+ /*
+ * If "from" is a simple data type and "to" has fields, then only
+ * unsafe casting works (and that works always, even to multiple fields).
+ */
+ return casting == NPY_UNSAFE_CASTING;
+ }
+ /*
+ * Everything else we consider castable for unsafe for now.
+ * FIXME: ensure what we do here is consistent with "astype",
+ * i.e., deal more correctly with subarrays and user-defined dtype.
+ */
+ else if (casting == NPY_UNSAFE_CASTING) {
return 1;
}
- /* Equivalent types can be cast with any value of 'casting' */
- else if (PyArray_EquivTypenums(from->type_num, to->type_num)) {
+ /*
+ * Equivalent simple types can be cast with any value of 'casting', but
+ * we need to be careful about structured to structured.
+ */
+ if (PyArray_EquivTypenums(from->type_num, to->type_num)) {
/* For complicated case, use EquivTypes (for now) */
if (PyTypeNum_ISUSERDEF(from->type_num) ||
from->subarray != NULL) {
@@ -1166,7 +1232,11 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
PyArray_Descr *ret = NULL;
PyArray_Descr *temp = PyArray_DescrNew(type1);
PyDataType_MAKEUNSIZED(temp);
- PyArray_AdaptFlexibleDType(NULL, type2, &temp);
+
+ temp = PyArray_AdaptFlexibleDType(NULL, type2, temp);
+ if (temp == NULL) {
+ return NULL;
+ }
if (temp->elsize > type1->elsize) {
ret = ensure_dtype_nbo(temp);
}
@@ -1204,7 +1274,10 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
PyArray_Descr *ret = NULL;
PyArray_Descr *temp = PyArray_DescrNew(type1);
PyDataType_MAKEUNSIZED(temp);
- PyArray_AdaptFlexibleDType(NULL, type2, &temp);
+ temp = PyArray_AdaptFlexibleDType(NULL, type2, temp);
+ if (temp == NULL) {
+ return NULL;
+ }
if (temp->elsize > type1->elsize) {
ret = ensure_dtype_nbo(temp);
}
@@ -1252,7 +1325,10 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
PyArray_Descr *ret = NULL;
PyArray_Descr *temp = PyArray_DescrNew(type2);
PyDataType_MAKEUNSIZED(temp);
- PyArray_AdaptFlexibleDType(NULL, type1, &temp);
+ temp = PyArray_AdaptFlexibleDType(NULL, type1, temp);
+ if (temp == NULL) {
+ return NULL;
+ }
if (temp->elsize > type2->elsize) {
ret = ensure_dtype_nbo(temp);
}
@@ -1269,7 +1345,10 @@ PyArray_PromoteTypes(PyArray_Descr *type1, PyArray_Descr *type2)
PyArray_Descr *ret = NULL;
PyArray_Descr *temp = PyArray_DescrNew(type2);
PyDataType_MAKEUNSIZED(temp);
- PyArray_AdaptFlexibleDType(NULL, type1, &temp);
+ temp = PyArray_AdaptFlexibleDType(NULL, type1, temp);
+ if (temp == NULL) {
+ return NULL;
+ }
if (temp->elsize > type2->elsize) {
ret = ensure_dtype_nbo(temp);
}
@@ -1688,46 +1767,22 @@ dtype_kind_to_simplified_ordering(char kind)
}
}
-/*NUMPY_API
- * Produces the result type of a bunch of inputs, using the UFunc
- * type promotion rules. Use this function when you have a set of
- * input arrays, and need to determine an output array dtype.
- *
- * If all the inputs are scalars (have 0 dimensions) or the maximum "kind"
- * of the scalars is greater than the maximum "kind" of the arrays, does
- * a regular type promotion.
- *
- * Otherwise, does a type promotion on the MinScalarType
- * of all the inputs. Data types passed directly are treated as array
- * types.
- *
+
+/*
+ * Determine if there is a mix of scalars and arrays/dtypes.
+ * If this is the case, the scalars should be handled as the minimum type
+ * capable of holding the value when the maximum "category" of the scalars
+ * surpasses the maximum "category" of the arrays/dtypes.
+ * If the scalars are of a lower or same category as the arrays, they may be
+ * demoted to a lower type within their category (the lowest type they can
+ * be cast to safely according to scalar casting rules).
*/
-NPY_NO_EXPORT PyArray_Descr *
-PyArray_ResultType(npy_intp narrs, PyArrayObject **arr,
- npy_intp ndtypes, PyArray_Descr **dtypes)
+NPY_NO_EXPORT int
+should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
+ npy_intp ndtypes, PyArray_Descr **dtypes)
{
- npy_intp i;
- int use_min_scalar;
+ int use_min_scalar = 0;
- /* If there's just one type, pass it through */
- if (narrs + ndtypes == 1) {
- PyArray_Descr *ret = NULL;
- if (narrs == 1) {
- ret = PyArray_DESCR(arr[0]);
- }
- else {
- ret = dtypes[0];
- }
- Py_INCREF(ret);
- return ret;
- }
-
- /*
- * Determine if there are any scalars, and if so, whether
- * the maximum "kind" of the scalars surpasses the maximum
- * "kind" of the arrays
- */
- use_min_scalar = 0;
if (narrs > 0) {
int all_scalars;
int max_scalar_kind = -1;
@@ -1736,7 +1791,7 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr,
all_scalars = (ndtypes > 0) ? 0 : 1;
/* Compute the maximum "kinds" and whether everything is scalar */
- for (i = 0; i < narrs; ++i) {
+ for (npy_intp i = 0; i < narrs; ++i) {
if (PyArray_NDIM(arr[i]) == 0) {
int kind = dtype_kind_to_simplified_ordering(
PyArray_DESCR(arr[i])->kind);
@@ -1757,7 +1812,7 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr,
* If the max scalar kind is bigger than the max array kind,
* finish computing the max array kind
*/
- for (i = 0; i < ndtypes; ++i) {
+ for (npy_intp i = 0; i < ndtypes; ++i) {
int kind = dtype_kind_to_simplified_ordering(dtypes[i]->kind);
if (kind > max_array_kind) {
max_array_kind = kind;
@@ -1769,6 +1824,44 @@ PyArray_ResultType(npy_intp narrs, PyArrayObject **arr,
use_min_scalar = 1;
}
}
+ return use_min_scalar;
+}
+
+
+/*NUMPY_API
+ * Produces the result type of a bunch of inputs, using the UFunc
+ * type promotion rules. Use this function when you have a set of
+ * input arrays, and need to determine an output array dtype.
+ *
+ * If all the inputs are scalars (have 0 dimensions) or the maximum "kind"
+ * of the scalars is greater than the maximum "kind" of the arrays, does
+ * a regular type promotion.
+ *
+ * Otherwise, does a type promotion on the MinScalarType
+ * of all the inputs. Data types passed directly are treated as array
+ * types.
+ *
+ */
+NPY_NO_EXPORT PyArray_Descr *
+PyArray_ResultType(npy_intp narrs, PyArrayObject **arr,
+ npy_intp ndtypes, PyArray_Descr **dtypes)
+{
+ npy_intp i;
+
+ /* If there's just one type, pass it through */
+ if (narrs + ndtypes == 1) {
+ PyArray_Descr *ret = NULL;
+ if (narrs == 1) {
+ ret = PyArray_DESCR(arr[0]);
+ }
+ else {
+ ret = dtypes[0];
+ }
+ Py_INCREF(ret);
+ return ret;
+ }
+
+ int use_min_scalar = should_use_min_scalar(narrs, arr, ndtypes, dtypes);
/* Loop through all the types, promoting them */
if (!use_min_scalar) {
diff --git a/numpy/core/src/multiarray/convert_datatype.h b/numpy/core/src/multiarray/convert_datatype.h
index bf77d699a..72867ead8 100644
--- a/numpy/core/src/multiarray/convert_datatype.h
+++ b/numpy/core/src/multiarray/convert_datatype.h
@@ -18,16 +18,28 @@ NPY_NO_EXPORT npy_bool
can_cast_scalar_to(PyArray_Descr *scal_type, char *scal_data,
PyArray_Descr *to, NPY_CASTING casting);
+NPY_NO_EXPORT int
+should_use_min_scalar(npy_intp narrs, PyArrayObject **arr,
+ npy_intp ndtypes, PyArray_Descr **dtypes);
+
/*
* This function calls Py_DECREF on flex_dtype, and replaces it with
* a new dtype that has been adapted based on the values in data_dtype
- * and data_obj. If the flex_dtype is not flexible, it leaves it as is.
+ * and data_obj. If the flex_dtype is not flexible, it returns it as-is.
+ *
+ * Usually, if data_obj is not an array, dtype should be the result
+ * given by the PyArray_GetArrayParamsFromObject function.
+ *
+ * The data_obj may be NULL if just a dtype is known for the source.
+ *
+ * If *flex_dtype is NULL, returns immediately, without setting an
+ * exception, leaving any previous error handling intact.
*
* The current flexible dtypes include NPY_STRING, NPY_UNICODE, NPY_VOID,
* and NPY_DATETIME with generic units.
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT PyArray_Descr *
PyArray_AdaptFlexibleDType(PyObject *data_obj, PyArray_Descr *data_dtype,
- PyArray_Descr **flex_dtype);
+ PyArray_Descr *flex_dtype);
#endif
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index aaaaeee82..5174bd889 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -11,7 +11,7 @@
#include "npy_config.h"
-#include "npy_import.h"
+#include "npy_ctypes.h"
#include "npy_pycompat.h"
#include "multiarraymodule.h"
@@ -40,9 +40,31 @@
* regards to the handling of text representations.
*/
+/*
+ * Scanning function for next element parsing and seperator skipping.
+ * These functions return:
+ * - 0 to indicate more data to read
+ * - -1 when reading stopped at the end of the string/file
+ * - -2 when reading stopped before the end was reached.
+ *
+ * The dtype specific parsing functions may set the python error state
+ * (they have to get the GIL first) additionally.
+ */
typedef int (*next_element)(void **, void *, PyArray_Descr *, void *);
typedef int (*skip_separator)(void **, const char *, void *);
+
+static npy_bool
+string_is_fully_read(char const* start, char const* end) {
+ if (end == NULL) {
+ return *start == '\0'; /* null terminated */
+ }
+ else {
+ return start >= end; /* fixed length */
+ }
+}
+
+
static int
fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype,
const char *end)
@@ -50,19 +72,23 @@ fromstr_next_element(char **s, void *dptr, PyArray_Descr *dtype,
char *e = *s;
int r = dtype->f->fromstr(*s, dptr, &e, dtype);
/*
- * fromstr always returns 0 for basic dtypes
- * s points to the end of the parsed string
- * if an error occurs s is not changed
+ * fromstr always returns 0 for basic dtypes; s points to the end of the
+ * parsed string. If s is not changed an error occurred or the end was
+ * reached.
*/
- if (*s == e) {
- /* Nothing read */
- return -1;
+ if (*s == e || r < 0) {
+ /* Nothing read, could be end of string or an error (or both) */
+ if (string_is_fully_read(*s, end)) {
+ return -1;
+ }
+ return -2;
}
*s = e;
if (end != NULL && *s > end) {
+ /* Stop the iteration if we read far enough */
return -1;
}
- return r;
+ return 0;
}
static int
@@ -75,9 +101,13 @@ fromfile_next_element(FILE **fp, void *dptr, PyArray_Descr *dtype,
if (r == 1) {
return 0;
}
- else {
+ else if (r == EOF) {
return -1;
}
+ else {
+ /* unable to read more, but EOF not reached indicating an error. */
+ return -2;
+ }
}
/*
@@ -143,9 +173,10 @@ fromstr_skip_separator(char **s, const char *sep, const char *end)
{
char *string = *s;
int result = 0;
+
while (1) {
char c = *string;
- if (c == '\0' || (end != NULL && string >= end)) {
+ if (string_is_fully_read(string, end)) {
result = -1;
break;
}
@@ -422,6 +453,10 @@ copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems,
}
}
+NPY_NO_EXPORT PyObject *
+_array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
+ npy_bool writeable, PyObject *context);
+
/*
* adapted from Numarray,
* a: destination array
@@ -442,11 +477,6 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
if (dst == NULL)
dst = a;
- /*
- * This code is to ensure that the sequence access below will
- * return a lower-dimensional sequence.
- */
-
/* INCREF on entry DECREF on exit */
Py_INCREF(s);
@@ -472,6 +502,11 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
return 0;
}
+ /*
+ * This code is to ensure that the sequence access below will
+ * return a lower-dimensional sequence.
+ */
+
if (dim > PyArray_NDIM(a)) {
PyErr_Format(PyExc_ValueError,
"setArrayFromSequence: sequence/array dimensions mismatch.");
@@ -482,6 +517,27 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
if (slen < 0) {
goto fail;
}
+ if (slen > 0) {
+ /* gh-13659: try __array__ before using s as a sequence */
+ PyObject *tmp = _array_from_array_like(s, /*dtype*/NULL, /*writeable*/0,
+ /*context*/NULL);
+ if (tmp == NULL) {
+ goto fail;
+ }
+ else if (tmp == Py_NotImplemented) {
+ Py_DECREF(tmp);
+ }
+ else {
+ int r = PyArray_CopyInto(dst, (PyArrayObject *)tmp);
+ Py_DECREF(tmp);
+ if (r < 0) {
+ goto fail;
+ }
+ Py_DECREF(s);
+ return 0;
+ }
+ }
+
/*
* Either the dimensions match, or the sequence has length 1 and can
* be broadcast to the destination.
@@ -743,16 +799,31 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
d[i] = buffer_view.shape[i];
}
PyBuffer_Release(&buffer_view);
+ _dealloc_cached_buffer_info(obj);
return 0;
}
+ else if (PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_BufferError) ||
+ PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ } else {
+ return -1;
+ }
+ }
else if (PyObject_GetBuffer(obj, &buffer_view, PyBUF_SIMPLE) == 0) {
d[0] = buffer_view.len;
*maxndim = 1;
PyBuffer_Release(&buffer_view);
+ _dealloc_cached_buffer_info(obj);
return 0;
}
- else {
- PyErr_Clear();
+ else if (PyErr_Occurred()) {
+ if (PyErr_ExceptionMatches(PyExc_BufferError) ||
+ PyErr_ExceptionMatches(PyExc_TypeError)) {
+ PyErr_Clear();
+ } else {
+ return -1;
+ }
}
}
@@ -896,6 +967,39 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
return 0;
}
+static PyObject *
+raise_memory_error(int nd, npy_intp *dims, PyArray_Descr *descr)
+{
+ static PyObject *exc_type = NULL;
+
+ npy_cache_import(
+ "numpy.core._exceptions", "_ArrayMemoryError",
+ &exc_type);
+ if (exc_type == NULL) {
+ goto fail;
+ }
+
+ PyObject *shape = PyArray_IntTupleFromIntp(nd, dims);
+ if (shape == NULL) {
+ goto fail;
+ }
+
+ /* produce an error object */
+ PyObject *exc_value = PyTuple_Pack(2, shape, (PyObject *)descr);
+ Py_DECREF(shape);
+ if (exc_value == NULL){
+ goto fail;
+ }
+ PyErr_SetObject(exc_type, exc_value);
+ Py_DECREF(exc_value);
+ return NULL;
+
+fail:
+ /* we couldn't raise the formatted exception for some reason */
+ PyErr_WriteUnraisable(NULL);
+ return PyErr_NoMemory();
+}
+
/*
* Generic new array creation routine.
* Internal variant with calloc argument for PyArray_Zeros.
@@ -904,13 +1008,14 @@ discover_dimensions(PyObject *obj, int *maxndim, npy_intp *d, int check_it,
* be decrefed.
*/
NPY_NO_EXPORT PyObject *
-PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
- npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj, PyObject *base, int zeroed,
- int allow_emptystring)
+PyArray_NewFromDescr_int(
+ PyTypeObject *subtype, PyArray_Descr *descr, int nd,
+ npy_intp const *dims, npy_intp const *strides, void *data,
+ int flags, PyObject *obj, PyObject *base, int zeroed,
+ int allow_emptystring)
{
PyArrayObject_fields *fa;
- int i, is_empty;
+ int i;
npy_intp nbytes;
if (descr->subarray) {
@@ -964,7 +1069,6 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
}
/* Check dimensions and multiply them to nbytes */
- is_empty = 0;
for (i = 0; i < nd; i++) {
npy_intp dim = dims[i];
@@ -973,7 +1077,6 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* Compare to PyArray_OverflowMultiplyList that
* returns 0 in this case.
*/
- is_empty = 1;
continue;
}
@@ -1031,7 +1134,9 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
goto fail;
}
fa->strides = fa->dimensions + nd;
- memcpy(fa->dimensions, dims, sizeof(npy_intp)*nd);
+ if (nd) {
+ memcpy(fa->dimensions, dims, sizeof(npy_intp)*nd);
+ }
if (strides == NULL) { /* fill it in */
_array_fill_strides(fa->strides, dims, nd, descr->elsize,
flags, &(fa->flags));
@@ -1041,7 +1146,9 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* we allow strides even when we create
* the memory, but be careful with this...
*/
- memcpy(fa->strides, strides, sizeof(npy_intp)*nd);
+ if (nd) {
+ memcpy(fa->strides, strides, sizeof(npy_intp)*nd);
+ }
}
}
else {
@@ -1056,8 +1163,8 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* (a.data) doesn't work as it should.
* Could probably just allocate a few bytes here. -- Chuck
*/
- if (is_empty) {
- nbytes = descr->elsize;
+ if (nbytes == 0) {
+ nbytes = descr->elsize ? descr->elsize : 1;
}
/*
* It is bad to have uninitialized OBJECT pointers
@@ -1070,8 +1177,7 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
data = npy_alloc_cache(nbytes);
}
if (data == NULL) {
- PyErr_NoMemory();
- goto fail;
+ return raise_memory_error(fa->nd, fa->dimensions, descr);
}
fa->flags |= NPY_ARRAY_OWNDATA;
@@ -1157,9 +1263,10 @@ PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
* true, dtype will be decrefed.
*/
NPY_NO_EXPORT PyObject *
-PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr,
- int nd, npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj)
+PyArray_NewFromDescr(
+ PyTypeObject *subtype, PyArray_Descr *descr,
+ int nd, npy_intp const *dims, npy_intp const *strides, void *data,
+ int flags, PyObject *obj)
{
return PyArray_NewFromDescrAndBase(
subtype, descr,
@@ -1173,7 +1280,7 @@ PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr,
NPY_NO_EXPORT PyObject *
PyArray_NewFromDescrAndBase(
PyTypeObject *subtype, PyArray_Descr *descr,
- int nd, npy_intp *dims, npy_intp *strides, void *data,
+ int nd, npy_intp const *dims, npy_intp const *strides, void *data,
int flags, PyObject *obj, PyObject *base)
{
return PyArray_NewFromDescr_int(subtype, descr, nd,
@@ -1181,9 +1288,9 @@ PyArray_NewFromDescrAndBase(
flags, obj, base, 0, 0);
}
-/*NUMPY_API
+/*
* Creates a new array with the same shape as the provided one,
- * with possible memory layout order and data type changes.
+ * with possible memory layout order, data type and shape changes.
*
* prototype - The array the new one should be like.
* order - NPY_CORDER - C-contiguous result.
@@ -1191,6 +1298,8 @@ PyArray_NewFromDescrAndBase(
* NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise.
* NPY_KEEPORDER - Keeps the axis ordering of prototype.
* dtype - If not NULL, overrides the data type of the result.
+ * ndim - If not 0 and dims not NULL, overrides the shape of the result.
+ * dims - If not NULL and ndim not 0, overrides the shape of the result.
* subok - If 1, use the prototype's array subtype, otherwise
* always create a base-class array.
*
@@ -1198,11 +1307,18 @@ PyArray_NewFromDescrAndBase(
* dtype->subarray is true, dtype will be decrefed.
*/
NPY_NO_EXPORT PyObject *
-PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
- PyArray_Descr *dtype, int subok)
+PyArray_NewLikeArrayWithShape(PyArrayObject *prototype, NPY_ORDER order,
+ PyArray_Descr *dtype, int ndim, npy_intp const *dims, int subok)
{
PyObject *ret = NULL;
- int ndim = PyArray_NDIM(prototype);
+
+ if (dims == NULL) {
+ ndim = PyArray_NDIM(prototype);
+ dims = PyArray_DIMS(prototype);
+ }
+ else if (order == NPY_KEEPORDER && (ndim != PyArray_NDIM(prototype))) {
+ order = NPY_CORDER;
+ }
/* If no override data type, use the one from the prototype */
if (dtype == NULL) {
@@ -1235,7 +1351,7 @@ PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
ret = PyArray_NewFromDescr(subok ? Py_TYPE(prototype) : &PyArray_Type,
dtype,
ndim,
- PyArray_DIMS(prototype),
+ dims,
NULL,
NULL,
order,
@@ -1244,11 +1360,10 @@ PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
/* KEEPORDER needs some analysis of the strides */
else {
npy_intp strides[NPY_MAXDIMS], stride;
- npy_intp *shape = PyArray_DIMS(prototype);
npy_stride_sort_item strideperm[NPY_MAXDIMS];
int idim;
- PyArray_CreateSortedStridePerm(PyArray_NDIM(prototype),
+ PyArray_CreateSortedStridePerm(ndim,
PyArray_STRIDES(prototype),
strideperm);
@@ -1257,14 +1372,14 @@ PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
for (idim = ndim-1; idim >= 0; --idim) {
npy_intp i_perm = strideperm[idim].perm;
strides[i_perm] = stride;
- stride *= shape[i_perm];
+ stride *= dims[i_perm];
}
/* Finally, allocate the array */
ret = PyArray_NewFromDescr(subok ? Py_TYPE(prototype) : &PyArray_Type,
dtype,
ndim,
- shape,
+ dims,
strides,
NULL,
0,
@@ -1275,12 +1390,36 @@ PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
}
/*NUMPY_API
+ * Creates a new array with the same shape as the provided one,
+ * with possible memory layout order and data type changes.
+ *
+ * prototype - The array the new one should be like.
+ * order - NPY_CORDER - C-contiguous result.
+ * NPY_FORTRANORDER - Fortran-contiguous result.
+ * NPY_ANYORDER - Fortran if prototype is Fortran, C otherwise.
+ * NPY_KEEPORDER - Keeps the axis ordering of prototype.
+ * dtype - If not NULL, overrides the data type of the result.
+ * subok - If 1, use the prototype's array subtype, otherwise
+ * always create a base-class array.
+ *
+ * NOTE: If dtype is not NULL, steals the dtype reference. On failure or when
+ * dtype->subarray is true, dtype will be decrefed.
+ */
+NPY_NO_EXPORT PyObject *
+PyArray_NewLikeArray(PyArrayObject *prototype, NPY_ORDER order,
+ PyArray_Descr *dtype, int subok)
+{
+ return PyArray_NewLikeArrayWithShape(prototype, order, dtype, 0, NULL, subok);
+}
+
+/*NUMPY_API
* Generic new array creation routine.
*/
NPY_NO_EXPORT PyObject *
-PyArray_New(PyTypeObject *subtype, int nd, npy_intp *dims, int type_num,
- npy_intp *strides, void *data, int itemsize, int flags,
- PyObject *obj)
+PyArray_New(
+ PyTypeObject *subtype, int nd, npy_intp const *dims, int type_num,
+ npy_intp const *strides, void *data, int itemsize, int flags,
+ PyObject *obj)
{
PyArray_Descr *descr;
PyObject *new;
@@ -1328,28 +1467,6 @@ _dtype_from_buffer_3118(PyObject *memoryview)
}
-/*
- * Call the python _is_from_ctypes
- */
-NPY_NO_EXPORT int
-_is_from_ctypes(PyObject *obj) {
- PyObject *ret_obj;
- static PyObject *py_func = NULL;
-
- npy_cache_import("numpy.core._internal", "_is_from_ctypes", &py_func);
-
- if (py_func == NULL) {
- return -1;
- }
- ret_obj = PyObject_CallFunctionObjArgs(py_func, obj, NULL);
- if (ret_obj == NULL) {
- return -1;
- }
-
- return PyObject_IsTrue(ret_obj);
-}
-
-
NPY_NO_EXPORT PyObject *
_array_from_buffer_3118(PyObject *memoryview)
{
@@ -1381,15 +1498,7 @@ _array_from_buffer_3118(PyObject *memoryview)
* Note that even if the above are fixed in master, we have to drop the
* early patch versions of python to actually make use of the fixes.
*/
-
- int is_ctypes = _is_from_ctypes(view->obj);
- if (is_ctypes < 0) {
- /* This error is not useful */
- PyErr_WriteUnraisable(view->obj);
- is_ctypes = 0;
- }
-
- if (!is_ctypes) {
+ if (!npy_ctypes_check(Py_TYPE(view->obj))) {
/* This object has no excuse for a broken PEP3118 buffer */
PyErr_Format(
PyExc_RuntimeError,
@@ -1416,6 +1525,7 @@ _array_from_buffer_3118(PyObject *memoryview)
* dimensions, so the array is now 0d.
*/
nd = 0;
+ Py_DECREF(descr);
descr = (PyArray_Descr *)PyObject_CallFunctionObjArgs(
(PyObject *)&PyArrayDescr_Type, Py_TYPE(view->obj), NULL);
if (descr == NULL) {
@@ -1486,6 +1596,90 @@ fail:
}
+
+/*
+ * Attempts to extract an array from an array-like object.
+ *
+ * array-like is defined as either
+ *
+ * * an object implementing the PEP 3118 buffer interface;
+ * * an object with __array_struct__ or __array_interface__ attributes;
+ * * an object with an __array__ function.
+ *
+ * Returns Py_NotImplemented if a given object is not array-like;
+ * PyArrayObject* in case of success and NULL in case of failure.
+ */
+NPY_NO_EXPORT PyObject *
+_array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
+ npy_bool writeable, PyObject *context) {
+ PyObject* tmp;
+
+ /* If op supports the PEP 3118 buffer interface */
+ if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
+ PyObject *memoryview = PyMemoryView_FromObject(op);
+ if (memoryview == NULL) {
+ PyErr_Clear();
+ }
+ else {
+ tmp = _array_from_buffer_3118(memoryview);
+ Py_DECREF(memoryview);
+ if (tmp == NULL) {
+ return NULL;
+ }
+
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *) tmp, "PEP 3118 buffer") < 0) {
+ Py_DECREF(tmp);
+ return NULL;
+ }
+
+ return tmp;
+ }
+ }
+
+ /* If op supports the __array_struct__ or __array_interface__ interface */
+ tmp = PyArray_FromStructInterface(op);
+ if (tmp == NULL) {
+ return NULL;
+ }
+ if (tmp == Py_NotImplemented) {
+ tmp = PyArray_FromInterface(op);
+ if (tmp == NULL) {
+ return NULL;
+ }
+ }
+
+ /*
+ * If op supplies the __array__ function.
+ * The documentation says this should produce a copy, so
+ * we skip this method if writeable is true, because the intent
+ * of writeable is to modify the operand.
+ * XXX: If the implementation is wrong, and/or if actual
+ * usage requires this behave differently,
+ * this should be changed!
+ */
+ if (!writeable && tmp == Py_NotImplemented) {
+ tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
+ if (tmp == NULL) {
+ return NULL;
+ }
+ }
+
+ if (tmp != Py_NotImplemented) {
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *) tmp,
+ "array interface object") < 0) {
+ Py_DECREF(tmp);
+ return NULL;
+ }
+ return tmp;
+ }
+
+ Py_INCREF(Py_NotImplemented);
+ return Py_NotImplemented;
+}
+
+
/*NUMPY_API
* Retrieves the array parameters for viewing/converting an arbitrary
* PyObject* to a NumPy array. This allows the "innate type and shape"
@@ -1593,69 +1787,20 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
return 0;
}
- /* If op supports the PEP 3118 buffer interface */
- if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
-
- PyObject *memoryview = PyMemoryView_FromObject(op);
- if (memoryview == NULL) {
- PyErr_Clear();
- }
- else {
- PyObject *arr = _array_from_buffer_3118(memoryview);
- Py_DECREF(memoryview);
- if (arr == NULL) {
- return -1;
- }
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *)arr, "PEP 3118 buffer") < 0) {
- Py_DECREF(arr);
- return -1;
- }
- *out_arr = (PyArrayObject *)arr;
- return 0;
- }
- }
-
- /* If op supports the __array_struct__ or __array_interface__ interface */
- tmp = PyArray_FromStructInterface(op);
+ /* If op is an array-like */
+ tmp = _array_from_array_like(op, requested_dtype, writeable, context);
if (tmp == NULL) {
return -1;
}
- if (tmp == Py_NotImplemented) {
- tmp = PyArray_FromInterface(op);
- if (tmp == NULL) {
- return -1;
- }
- }
- if (tmp != Py_NotImplemented) {
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
- "array interface object") < 0) {
- Py_DECREF(tmp);
- return -1;
- }
- *out_arr = (PyArrayObject *)tmp;
- return (*out_arr) == NULL ? -1 : 0;
+ else if (tmp != Py_NotImplemented) {
+ *out_arr = (PyArrayObject*) tmp;
+ return 0;
}
-
- /*
- * If op supplies the __array__ function.
- * The documentation says this should produce a copy, so
- * we skip this method if writeable is true, because the intent
- * of writeable is to modify the operand.
- * XXX: If the implementation is wrong, and/or if actual
- * usage requires this behave differently,
- * this should be changed!
- */
- if (!writeable) {
- tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
- if (tmp != Py_NotImplemented) {
- *out_arr = (PyArrayObject *)tmp;
- return (*out_arr) == NULL ? -1 : 0;
- }
+ else {
+ Py_DECREF(Py_NotImplemented);
}
- /* Try to treat op as a list of lists */
+ /* Try to treat op as a list of lists or array-like objects. */
if (!writeable && PySequence_Check(op)) {
int check_it, stop_at_string, stop_at_tuple, is_object;
int type_num, type;
@@ -1817,15 +1962,19 @@ PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
/* If the requested dtype is flexible, adapt it */
if (newtype != NULL) {
- PyArray_AdaptFlexibleDType(op,
+ newtype = PyArray_AdaptFlexibleDType(op,
(dtype == NULL) ? PyArray_DESCR(arr) : dtype,
- &newtype);
+ newtype);
+ if (newtype == NULL) {
+ return NULL;
+ }
}
/* If we got dimensions and dtype instead of an array */
if (arr == NULL) {
if ((flags & NPY_ARRAY_WRITEBACKIFCOPY) ||
(flags & NPY_ARRAY_UPDATEIFCOPY)) {
+ Py_DECREF(dtype);
Py_XDECREF(newtype);
PyErr_SetString(PyExc_TypeError,
"WRITEBACKIFCOPY used for non-array input.");
@@ -2030,7 +2179,7 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
newtype = oldtype;
Py_INCREF(oldtype);
}
- if (PyDataType_ISUNSIZED(newtype)) {
+ else if (PyDataType_ISUNSIZED(newtype)) {
PyArray_DESCR_REPLACE(newtype);
if (newtype == NULL) {
return NULL;
@@ -2134,12 +2283,15 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
*/
/* 2017-Nov-10 1.14 */
- if (DEPRECATE("NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
- "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
- "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
- "instead, and call PyArray_ResolveWritebackIfCopy before the "
- "array is deallocated, i.e. before the last call to Py_DECREF.") < 0)
+ if (DEPRECATE(
+ "NPY_ARRAY_UPDATEIFCOPY, NPY_ARRAY_INOUT_ARRAY, and "
+ "NPY_ARRAY_INOUT_FARRAY are deprecated, use NPY_WRITEBACKIFCOPY, "
+ "NPY_ARRAY_INOUT_ARRAY2, or NPY_ARRAY_INOUT_FARRAY2 respectively "
+ "instead, and call PyArray_ResolveWritebackIfCopy before the "
+ "array is deallocated, i.e. before the last call to Py_DECREF.") < 0) {
+ Py_DECREF(ret);
return NULL;
+ }
Py_INCREF(arr);
if (PyArray_SetWritebackIfCopyBase(ret, arr) < 0) {
Py_DECREF(ret);
@@ -2166,14 +2318,12 @@ PyArray_FromArray(PyArrayObject *arr, PyArray_Descr *newtype, int flags)
Py_DECREF(newtype);
if (needview) {
- PyArray_Descr *dtype = PyArray_DESCR(arr);
PyTypeObject *subtype = NULL;
if (flags & NPY_ARRAY_ENSUREARRAY) {
subtype = &PyArray_Type;
}
- Py_INCREF(dtype);
ret = (PyArrayObject *)PyArray_View(arr, NULL, subtype);
if (ret == NULL) {
return NULL;
@@ -2471,6 +2621,7 @@ PyArray_FromInterface(PyObject *origin)
* sticks around after the release.
*/
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info(base);
#else
res = PyObject_AsWriteBuffer(base, (void **)&data, &buffer_len);
if (res < 0) {
@@ -2484,7 +2635,7 @@ PyArray_FromInterface(PyObject *origin)
}
#endif
/* Get offset number from interface specification */
- attr = PyDict_GetItemString(origin, "offset");
+ attr = PyDict_GetItemString(iface, "offset");
if (attr) {
npy_longlong num = PyLong_AsLongLong(attr);
if (error_converting(num)) {
@@ -2500,6 +2651,11 @@ PyArray_FromInterface(PyObject *origin)
&PyArray_Type, dtype,
n, dims, NULL, data,
dataflags, NULL, base);
+ /*
+ * Ref to dtype was stolen by PyArray_NewFromDescrAndBase
+ * Prevent DECREFing dtype in fail codepath by setting to NULL
+ */
+ dtype = NULL;
if (ret == NULL) {
goto fail;
}
@@ -2537,7 +2693,9 @@ PyArray_FromInterface(PyObject *origin)
goto fail;
}
}
- memcpy(PyArray_STRIDES(ret), strides, n*sizeof(npy_intp));
+ if (n) {
+ memcpy(PyArray_STRIDES(ret), strides, n*sizeof(npy_intp));
+ }
}
PyArray_UpdateFlags(ret, NPY_ARRAY_UPDATE_ALL);
Py_DECREF(iface);
@@ -2626,61 +2784,30 @@ PyArray_DescrFromObject(PyObject *op, PyArray_Descr *mintype)
/* They all zero-out the memory as previously done */
/* steals reference to descr -- and enforces native byteorder on it.*/
+
/*NUMPY_API
- Like FromDimsAndData but uses the Descr structure instead of typecode
- as input.
+ Deprecated, use PyArray_NewFromDescr instead.
*/
NPY_NO_EXPORT PyObject *
-PyArray_FromDimsAndDataAndDescr(int nd, int *d,
+PyArray_FromDimsAndDataAndDescr(int NPY_UNUSED(nd), int *NPY_UNUSED(d),
PyArray_Descr *descr,
- char *data)
+ char *NPY_UNUSED(data))
{
- PyObject *ret;
- int i;
- npy_intp newd[NPY_MAXDIMS];
- char msg[] = "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.";
-
- if (DEPRECATE(msg) < 0) {
- /* 2009-04-30, 1.5 */
- return NULL;
- }
- if (!PyArray_ISNBO(descr->byteorder))
- descr->byteorder = '=';
- for (i = 0; i < nd; i++) {
- newd[i] = (npy_intp) d[i];
- }
- ret = PyArray_NewFromDescr(&PyArray_Type, descr,
- nd, newd,
- NULL, data,
- (data ? NPY_ARRAY_CARRAY : 0), NULL);
- return ret;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_FromDimsAndDataAndDescr: use PyArray_NewFromDescr.");
+ Py_DECREF(descr);
+ return NULL;
}
/*NUMPY_API
- Construct an empty array from dimensions and typenum
+ Deprecated, use PyArray_SimpleNew instead.
*/
NPY_NO_EXPORT PyObject *
-PyArray_FromDims(int nd, int *d, int type)
+PyArray_FromDims(int NPY_UNUSED(nd), int *NPY_UNUSED(d), int NPY_UNUSED(type))
{
- PyArrayObject *ret;
- char msg[] = "PyArray_FromDims: use PyArray_SimpleNew.";
-
- if (DEPRECATE(msg) < 0) {
- /* 2009-04-30, 1.5 */
- return NULL;
- }
- ret = (PyArrayObject *)PyArray_FromDimsAndDataAndDescr(nd, d,
- PyArray_DescrFromType(type),
- NULL);
- /*
- * Old FromDims set memory to zero --- some algorithms
- * relied on that. Better keep it the same. If
- * Object type, then it's already been set to zero, though.
- */
- if (ret && (PyArray_DESCR(ret)->type_num != NPY_OBJECT)) {
- memset(PyArray_DATA(ret), 0, PyArray_NBYTES(ret));
- }
- return (PyObject *)ret;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_FromDims: use PyArray_SimpleNew.");
+ return NULL;
}
/* end old calls */
@@ -2832,7 +2959,8 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src, NPY_ORDER order)
* contiguous strides, etc.
*/
if (PyArray_GetDTypeTransferFunction(
- IsUintAligned(src) && IsUintAligned(dst),
+ IsUintAligned(src) && IsAligned(src) &&
+ IsUintAligned(dst) && IsAligned(dst),
src_stride, dst_stride,
PyArray_DESCR(src), PyArray_DESCR(dst),
0,
@@ -2997,7 +3125,7 @@ PyArray_CheckAxis(PyArrayObject *arr, int *axis, int flags)
* accepts NULL type
*/
NPY_NO_EXPORT PyObject *
-PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
+PyArray_Zeros(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order)
{
PyArrayObject *ret;
@@ -3032,10 +3160,10 @@ PyArray_Zeros(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
* Empty
*
* accepts NULL type
- * steals referenct to type
+ * steals a reference to type
*/
NPY_NO_EXPORT PyObject *
-PyArray_Empty(int nd, npy_intp *dims, PyArray_Descr *type, int is_f_order)
+PyArray_Empty(int nd, npy_intp const *dims, PyArray_Descr *type, int is_f_order)
{
PyArrayObject *ret;
@@ -3446,11 +3574,13 @@ PyArray_ArangeObj(PyObject *start, PyObject *stop, PyObject *step, PyArray_Descr
return NULL;
}
+/* This array creation function steals the reference to dtype. */
static PyArrayObject *
array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nread)
{
PyArrayObject *r;
npy_off_t start, numbytes;
+ int elsize;
if (num < 0) {
int fail = 0;
@@ -3477,27 +3607,29 @@ array_fromfile_binary(FILE *fp, PyArray_Descr *dtype, npy_intp num, size_t *nrea
}
num = numbytes / dtype->elsize;
}
+
/*
- * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype
- * even on success, so make sure it stays around until exit.
+ * Array creation may move sub-array dimensions from the dtype to array
+ * dimensions, so we need to use the original element size when reading.
*/
- Py_INCREF(dtype);
+ elsize = dtype->elsize;
+
r = (PyArrayObject *)PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &num,
NULL, NULL, 0, NULL);
if (r == NULL) {
- Py_DECREF(dtype);
return NULL;
}
+
NPY_BEGIN_ALLOW_THREADS;
- *nread = fread(PyArray_DATA(r), dtype->elsize, num, fp);
+ *nread = fread(PyArray_DATA(r), elsize, num, fp);
NPY_END_ALLOW_THREADS;
- Py_DECREF(dtype);
return r;
}
/*
* Create an array by reading from the given stream, using the passed
* next_element and skip_separator functions.
+ * As typical for array creation functions, it steals the reference to dtype.
*/
#define FROM_BUFFER_SIZE 4096
static PyArrayObject *
@@ -3509,6 +3641,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
npy_intp i;
char *dptr, *clean_sep, *tmp;
int err = 0;
+ int stop_reading_flag; /* -1 indicates end reached; -2 a parsing error */
npy_intp thisbuf = 0;
npy_intp size;
npy_intp bytes, totalbytes;
@@ -3516,10 +3649,11 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
size = (num >= 0) ? num : FROM_BUFFER_SIZE;
/*
- * When dtype->subarray is true, PyArray_NewFromDescr will decref dtype
- * even on success, so make sure it stays around until exit.
+ * Array creation may move sub-array dimensions from the dtype to array
+ * dimensions, so we need to use the original dtype when reading.
*/
Py_INCREF(dtype);
+
r = (PyArrayObject *)
PyArray_NewFromDescr(&PyArray_Type, dtype, 1, &size,
NULL, NULL, 0, NULL);
@@ -3527,6 +3661,7 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
Py_DECREF(dtype);
return NULL;
}
+
clean_sep = swab_separator(sep);
if (clean_sep == NULL) {
err = 1;
@@ -3536,9 +3671,9 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
NPY_BEGIN_ALLOW_THREADS;
totalbytes = bytes = size * dtype->elsize;
dptr = PyArray_DATA(r);
- for (i= 0; num < 0 || i < num; i++) {
- if (next(&stream, dptr, dtype, stream_data) < 0) {
- /* EOF */
+ for (i = 0; num < 0 || i < num; i++) {
+ stop_reading_flag = next(&stream, dptr, dtype, stream_data);
+ if (stop_reading_flag < 0) {
break;
}
*nread += 1;
@@ -3555,23 +3690,48 @@ array_from_text(PyArray_Descr *dtype, npy_intp num, char *sep, size_t *nread,
dptr = tmp + (totalbytes - bytes);
thisbuf = 0;
}
- if (skip_sep(&stream, clean_sep, stream_data) < 0) {
+ stop_reading_flag = skip_sep(&stream, clean_sep, stream_data);
+ if (stop_reading_flag < 0) {
+ if (num == i + 1) {
+ /* if we read as much as requested sep is optional */
+ stop_reading_flag = -1;
+ }
break;
}
}
if (num < 0) {
- tmp = PyDataMem_RENEW(PyArray_DATA(r), PyArray_MAX(*nread,1)*dtype->elsize);
- if (tmp == NULL) {
- err = 1;
- }
- else {
- PyArray_DIMS(r)[0] = *nread;
- ((PyArrayObject_fields *)r)->data = tmp;
+ const size_t nsize = PyArray_MAX(*nread,1)*dtype->elsize;
+
+ if (nsize != 0) {
+ tmp = PyDataMem_RENEW(PyArray_DATA(r), nsize);
+ if (tmp == NULL) {
+ err = 1;
+ }
+ else {
+ PyArray_DIMS(r)[0] = *nread;
+ ((PyArrayObject_fields *)r)->data = tmp;
+ }
}
}
NPY_END_ALLOW_THREADS;
+
free(clean_sep);
+ if (stop_reading_flag == -2) {
+ if (PyErr_Occurred()) {
+ /* If an error is already set (unlikely), do not create new one */
+ Py_DECREF(r);
+ Py_DECREF(dtype);
+ return NULL;
+ }
+ /* 2019-09-12, NumPy 1.18 */
+ if (DEPRECATE(
+ "string or file could not be read to its end due to unmatched "
+ "data; this will raise a ValueError in the future.") < 0) {
+ goto fail;
+ }
+ }
+
fail:
Py_DECREF(dtype);
if (err == 1) {
@@ -3590,9 +3750,8 @@ fail:
* Given a ``FILE *`` pointer ``fp``, and a ``PyArray_Descr``, return an
* array corresponding to the data encoded in that file.
*
- * If the dtype is NULL, the default array type is used (double).
- * If non-null, the reference is stolen and if dtype->subarray is true dtype
- * will be decrefed even on success.
+ * The reference to `dtype` is stolen (it is possible that the passed in
+ * dtype is not held on to).
*
* The number of elements to read is given as ``num``; if it is < 0, then
* then as many as possible are read.
@@ -3640,7 +3799,6 @@ PyArray_FromFile(FILE *fp, PyArray_Descr *dtype, npy_intp num, char *sep)
(skip_separator) fromfile_skip_separator, NULL);
}
if (ret == NULL) {
- Py_DECREF(dtype);
return NULL;
}
if (((npy_intp) nread) < num) {
@@ -3687,32 +3845,12 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
Py_DECREF(type);
return NULL;
}
- if (Py_TYPE(buf)->tp_as_buffer == NULL
-#if defined(NPY_PY3K)
- || Py_TYPE(buf)->tp_as_buffer->bf_getbuffer == NULL
-#else
- || (Py_TYPE(buf)->tp_as_buffer->bf_getwritebuffer == NULL
- && Py_TYPE(buf)->tp_as_buffer->bf_getreadbuffer == NULL)
-#endif
- ) {
- PyObject *newbuf;
- newbuf = PyObject_GetAttr(buf, npy_ma_str_buffer);
- if (newbuf == NULL) {
- Py_DECREF(type);
- return NULL;
- }
- buf = newbuf;
- }
- else {
- Py_INCREF(buf);
- }
#if defined(NPY_PY3K)
if (PyObject_GetBuffer(buf, &view, PyBUF_WRITABLE|PyBUF_SIMPLE) < 0) {
writeable = 0;
PyErr_Clear();
if (PyObject_GetBuffer(buf, &view, PyBUF_SIMPLE) < 0) {
- Py_DECREF(buf);
Py_DECREF(type);
return NULL;
}
@@ -3726,12 +3864,12 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
* sticks around after the release.
*/
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info(buf);
#else
if (PyObject_AsWriteBuffer(buf, (void *)&data, &ts) == -1) {
writeable = 0;
PyErr_Clear();
if (PyObject_AsReadBuffer(buf, (void *)&data, &ts) == -1) {
- Py_DECREF(buf);
Py_DECREF(type);
return NULL;
}
@@ -3742,7 +3880,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
PyErr_Format(PyExc_ValueError,
"offset must be non-negative and no greater than buffer "\
"length (%" NPY_INTP_FMT ")", (npy_intp)ts);
- Py_DECREF(buf);
Py_DECREF(type);
return NULL;
}
@@ -3751,12 +3888,17 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
s = (npy_intp)ts - offset;
n = (npy_intp)count;
itemsize = type->elsize;
- if (n < 0 ) {
+ if (n < 0) {
+ if (itemsize == 0) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot determine count if itemsize is 0");
+ Py_DECREF(type);
+ return NULL;
+ }
if (s % itemsize != 0) {
PyErr_SetString(PyExc_ValueError,
"buffer size must be a multiple"\
" of element size");
- Py_DECREF(buf);
Py_DECREF(type);
return NULL;
}
@@ -3767,7 +3909,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
PyErr_SetString(PyExc_ValueError,
"buffer is smaller than requested"\
" size");
- Py_DECREF(buf);
Py_DECREF(type);
return NULL;
}
@@ -3777,7 +3918,6 @@ PyArray_FromBuffer(PyObject *buf, PyArray_Descr *type,
&PyArray_Type, type,
1, &n, NULL, data,
NPY_ARRAY_DEFAULT, NULL, buf);
- Py_DECREF(buf);
if (ret == NULL) {
return NULL;
}
@@ -3859,6 +3999,11 @@ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype,
return NULL;
}
}
+ /*
+ * NewFromDescr may replace dtype to absorb subarray shape
+ * into the array, so get size beforehand.
+ */
+ npy_intp size_to_copy = num*dtype->elsize;
ret = (PyArrayObject *)
PyArray_NewFromDescr(&PyArray_Type, dtype,
1, &num, NULL, NULL,
@@ -3866,14 +4011,14 @@ PyArray_FromString(char *data, npy_intp slen, PyArray_Descr *dtype,
if (ret == NULL) {
return NULL;
}
- memcpy(PyArray_DATA(ret), data, num*dtype->elsize);
+ memcpy(PyArray_DATA(ret), data, size_to_copy);
}
else {
/* read from character-based string */
size_t nread = 0;
char *end;
- if (dtype->f->scanfunc == NULL) {
+ if (dtype->f->fromstr == NULL) {
PyErr_SetString(PyExc_ValueError,
"don't know how to read " \
"character strings with that " \
@@ -3917,7 +4062,16 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
"Must specify length when using variable-size data-type.");
goto done;
}
- elcount = (count < 0) ? 0 : count;
+ if (count < 0) {
+ elcount = PyObject_LengthHint(obj, 0);
+ if (elcount < 0) {
+ goto done;
+ }
+ }
+ else {
+ elcount = count;
+ }
+
elsize = dtype->elsize;
/*
@@ -3938,7 +4092,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
}
for (i = 0; (i < count || count == -1) &&
(value = PyIter_Next(iter)); i++) {
- if (i >= elcount) {
+ if (i >= elcount && elsize != 0) {
npy_intp nbytes;
/*
Grow PyArray_DATA(ret):
@@ -3984,9 +4138,9 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
* Realloc the data so that don't keep extra memory tied up
* (assuming realloc is reasonably good about reusing space...)
*/
- if (i == 0) {
+ if (i == 0 || elsize == 0) {
/* The size cannot be zero for PyDataMem_RENEW. */
- i = 1;
+ goto done;
}
new_data = PyDataMem_RENEW(PyArray_DATA(ret), i * elsize);
if (new_data == NULL) {
@@ -4026,7 +4180,7 @@ PyArray_FromIter(PyObject *obj, PyArray_Descr *dtype, npy_intp count)
*/
NPY_NO_EXPORT void
-_array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
+_array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t itemsize,
int inflag, int *objflags)
{
int i;
diff --git a/numpy/core/src/multiarray/ctors.h b/numpy/core/src/multiarray/ctors.h
index e9a2532da..4768e4efd 100644
--- a/numpy/core/src/multiarray/ctors.h
+++ b/numpy/core/src/multiarray/ctors.h
@@ -2,24 +2,33 @@
#define _NPY_ARRAY_CTORS_H_
NPY_NO_EXPORT PyObject *
-PyArray_NewFromDescr(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
- npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj);
+PyArray_NewFromDescr(
+ PyTypeObject *subtype, PyArray_Descr *descr, int nd,
+ npy_intp const *dims, npy_intp const *strides, void *data,
+ int flags, PyObject *obj);
NPY_NO_EXPORT PyObject *
PyArray_NewFromDescrAndBase(
- PyTypeObject *subtype, PyArray_Descr *descr,
- int nd, npy_intp *dims, npy_intp *strides, void *data,
+ PyTypeObject *subtype, PyArray_Descr *descr, int nd,
+ npy_intp const *dims, npy_intp const *strides, void *data,
int flags, PyObject *obj, PyObject *base);
NPY_NO_EXPORT PyObject *
-PyArray_NewFromDescr_int(PyTypeObject *subtype, PyArray_Descr *descr, int nd,
- npy_intp *dims, npy_intp *strides, void *data,
- int flags, PyObject *obj, PyObject *base, int zeroed,
- int allow_emptystring);
+PyArray_NewFromDescr_int(
+ PyTypeObject *subtype, PyArray_Descr *descr, int nd,
+ npy_intp const *dims, npy_intp const *strides, void *data,
+ int flags, PyObject *obj, PyObject *base, int zeroed,
+ int allow_emptystring);
-NPY_NO_EXPORT PyObject *PyArray_New(PyTypeObject *, int nd, npy_intp *,
- int, npy_intp *, void *, int, int, PyObject *);
+NPY_NO_EXPORT PyObject *
+PyArray_NewLikeArrayWithShape(
+ PyArrayObject *prototype, NPY_ORDER order,
+ PyArray_Descr *dtype, int ndim, npy_intp const *dims, int subok);
+
+NPY_NO_EXPORT PyObject *
+PyArray_New(
+ PyTypeObject *, int nd, npy_intp const *,
+ int, npy_intp const*, void *, int, int, PyObject *);
NPY_NO_EXPORT PyObject *
PyArray_FromAny(PyObject *op, PyArray_Descr *newtype, int min_depth,
@@ -64,7 +73,7 @@ PyArray_CopyAsFlat(PyArrayObject *dst, PyArrayObject *src,
/* FIXME: remove those from here */
NPY_NO_EXPORT void
-_array_fill_strides(npy_intp *strides, npy_intp *dims, int nd, size_t itemsize,
+_array_fill_strides(npy_intp *strides, npy_intp const *dims, int nd, size_t itemsize,
int inflag, int *objflags);
NPY_NO_EXPORT void
diff --git a/numpy/core/src/multiarray/datetime.c b/numpy/core/src/multiarray/datetime.c
index 7f837901c..d21bb9776 100644
--- a/numpy/core/src/multiarray/datetime.c
+++ b/numpy/core/src/multiarray/datetime.c
@@ -27,6 +27,40 @@
#include "datetime_strings.h"
/*
+ * Computes the python `ret, d = divmod(d, unit)`.
+ *
+ * Note that GCC is smart enough at -O2 to eliminate the `if(*d < 0)` branch
+ * for subsequent calls to this command - it is able to deduce that `*d >= 0`.
+ */
+static inline
+npy_int64 extract_unit_64(npy_int64 *d, npy_int64 unit) {
+ assert(unit > 0);
+ npy_int64 div = *d / unit;
+ npy_int64 mod = *d % unit;
+ if (mod < 0) {
+ mod += unit;
+ div -= 1;
+ }
+ assert(mod >= 0);
+ *d = mod;
+ return div;
+}
+
+static inline
+npy_int32 extract_unit_32(npy_int32 *d, npy_int32 unit) {
+ assert(unit > 0);
+ npy_int32 div = *d / unit;
+ npy_int32 mod = *d % unit;
+ if (mod < 0) {
+ mod += unit;
+ div -= 1;
+ }
+ assert(mod >= 0);
+ *d = mod;
+ return div;
+}
+
+/*
* Imports the PyDateTime functions so we can create these objects.
* This is called during module initialization
*/
@@ -160,17 +194,7 @@ days_to_yearsdays(npy_int64 *days_)
npy_int64 year;
/* Break down the 400 year cycle to get the year and day within the year */
- if (days >= 0) {
- year = 400 * (days / days_per_400years);
- days = days % days_per_400years;
- }
- else {
- year = 400 * ((days - (days_per_400years - 1)) / days_per_400years);
- days = days % days_per_400years;
- if (days < 0) {
- days += days_per_400years;
- }
- }
+ year = 400 * extract_unit_64(&days, days_per_400years);
/* Work out the year/day within the 400 year cycle */
if (days >= 366) {
@@ -386,7 +410,8 @@ convert_datetimestruct_to_datetime(PyArray_DatetimeMetaData *meta,
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT npy_datetime
-PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d)
+PyArray_DatetimeStructToDatetime(
+ NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_datetimestruct *NPY_UNUSED(d))
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_DatetimeStructToDatetime function has "
@@ -400,7 +425,8 @@ PyArray_DatetimeStructToDatetime(NPY_DATETIMEUNIT fr, npy_datetimestruct *d)
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT npy_datetime
-PyArray_TimedeltaStructToTimedelta(NPY_DATETIMEUNIT fr, npy_timedeltastruct *d)
+PyArray_TimedeltaStructToTimedelta(
+ NPY_DATETIMEUNIT NPY_UNUSED(fr), npy_timedeltastruct *NPY_UNUSED(d))
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_TimedeltaStructToTimedelta function has "
@@ -416,7 +442,7 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta,
npy_datetime dt,
npy_datetimestruct *out)
{
- npy_int64 perday;
+ npy_int64 days;
/* Initialize the output to all zeros */
memset(out, 0, sizeof(npy_datetimestruct));
@@ -451,14 +477,8 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta,
break;
case NPY_FR_M:
- if (dt >= 0) {
- out->year = 1970 + dt / 12;
- out->month = dt % 12 + 1;
- }
- else {
- out->year = 1969 + (dt + 1) / 12;
- out->month = 12 + (dt + 1)% 12;
- }
+ out->year = 1970 + extract_unit_64(&dt, 12);
+ out->month = dt + 1;
break;
case NPY_FR_W:
@@ -471,171 +491,96 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta,
break;
case NPY_FR_h:
- perday = 24LL;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
+ days = extract_unit_64(&dt, 24LL);
+ set_datetimestruct_days(days, out);
out->hour = (int)dt;
break;
case NPY_FR_m:
- perday = 24LL * 60;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
- out->hour = (int)(dt / 60);
- out->min = (int)(dt % 60);
+ days = extract_unit_64(&dt, 60LL*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 60LL);
+ out->min = (int)dt;
break;
case NPY_FR_s:
- perday = 24LL * 60 * 60;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
- out->hour = (int)(dt / (60*60));
- out->min = (int)((dt / 60) % 60);
- out->sec = (int)(dt % 60);
+ days = extract_unit_64(&dt, 60LL*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 60LL*60);
+ out->min = (int)extract_unit_64(&dt, 60LL);
+ out->sec = (int)dt;
break;
case NPY_FR_ms:
- perday = 24LL * 60 * 60 * 1000;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
- out->hour = (int)(dt / (60*60*1000LL));
- out->min = (int)((dt / (60*1000LL)) % 60);
- out->sec = (int)((dt / 1000LL) % 60);
- out->us = (int)((dt % 1000LL) * 1000);
+ days = extract_unit_64(&dt, 1000LL*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL);
+ out->us = (int)(dt * 1000);
break;
case NPY_FR_us:
- perday = 24LL * 60LL * 60LL * 1000LL * 1000LL;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
- out->hour = (int)(dt / (60*60*1000000LL));
- out->min = (int)((dt / (60*1000000LL)) % 60);
- out->sec = (int)((dt / 1000000LL) % 60);
- out->us = (int)(dt % 1000000LL);
+ days = extract_unit_64(&dt, 1000LL*1000*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000);
+ out->us = (int)dt;
break;
case NPY_FR_ns:
- perday = 24LL * 60LL * 60LL * 1000LL * 1000LL * 1000LL;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
- out->hour = (int)(dt / (60*60*1000000000LL));
- out->min = (int)((dt / (60*1000000000LL)) % 60);
- out->sec = (int)((dt / 1000000000LL) % 60);
- out->us = (int)((dt / 1000LL) % 1000000LL);
- out->ps = (int)((dt % 1000LL) * 1000);
+ days = extract_unit_64(&dt, 1000LL*1000*1000*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL);
+ out->ps = (int)(dt * 1000);
break;
case NPY_FR_ps:
- perday = 24LL * 60 * 60 * 1000 * 1000 * 1000 * 1000;
-
- if (dt >= 0) {
- set_datetimestruct_days(dt / perday, out);
- dt = dt % perday;
- }
- else {
- set_datetimestruct_days((dt - (perday-1)) / perday, out);
- dt = (perday-1) + (dt + 1) % perday;
- }
- out->hour = (int)(dt / (60*60*1000000000000LL));
- out->min = (int)((dt / (60*1000000000000LL)) % 60);
- out->sec = (int)((dt / 1000000000000LL) % 60);
- out->us = (int)((dt / 1000000LL) % 1000000LL);
- out->ps = (int)(dt % 1000000LL);
+ days = extract_unit_64(&dt, 1000LL*1000*1000*1000*60*60*24);
+ set_datetimestruct_days(days, out);
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*60*60);
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL*1000);
+ out->ps = (int)(dt);
break;
case NPY_FR_fs:
/* entire range is only +- 2.6 hours */
- if (dt >= 0) {
- out->hour = (int)(dt / (60*60*1000000000000000LL));
- out->min = (int)((dt / (60*1000000000000000LL)) % 60);
- out->sec = (int)((dt / 1000000000000000LL) % 60);
- out->us = (int)((dt / 1000000000LL) % 1000000LL);
- out->ps = (int)((dt / 1000LL) % 1000000LL);
- out->as = (int)((dt % 1000LL) * 1000);
- }
- else {
- npy_datetime minutes;
-
- minutes = dt / (60*1000000000000000LL);
- dt = dt % (60*1000000000000000LL);
- if (dt < 0) {
- dt += (60*1000000000000000LL);
- --minutes;
- }
- /* Offset the negative minutes */
- add_minutes_to_datetimestruct(out, minutes);
- out->sec = (int)((dt / 1000000000000000LL) % 60);
- out->us = (int)((dt / 1000000000LL) % 1000000LL);
- out->ps = (int)((dt / 1000LL) % 1000000LL);
- out->as = (int)((dt % 1000LL) * 1000);
- }
+ out->hour = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*60*60);
+ if (out->hour < 0) {
+ out->year = 1969;
+ out->month = 12;
+ out->day = 31;
+ out->hour += 24;
+ assert(out->hour >= 0);
+ }
+ out->min = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*60);
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000);
+ out->us = (int)extract_unit_64(&dt, 1000LL*1000*1000);
+ out->ps = (int)extract_unit_64(&dt, 1000LL);
+ out->as = (int)(dt * 1000);
break;
case NPY_FR_as:
/* entire range is only +- 9.2 seconds */
- if (dt >= 0) {
- out->sec = (int)((dt / 1000000000000000000LL) % 60);
- out->us = (int)((dt / 1000000000000LL) % 1000000LL);
- out->ps = (int)((dt / 1000000LL) % 1000000LL);
- out->as = (int)(dt % 1000000LL);
- }
- else {
- npy_datetime seconds;
-
- seconds = dt / 1000000000000000000LL;
- dt = dt % 1000000000000000000LL;
- if (dt < 0) {
- dt += 1000000000000000000LL;
- --seconds;
- }
- /* Offset the negative seconds */
- add_seconds_to_datetimestruct(out, seconds);
- out->us = (int)((dt / 1000000000000LL) % 1000000LL);
- out->ps = (int)((dt / 1000000LL) % 1000000LL);
- out->as = (int)(dt % 1000000LL);
- }
+ out->sec = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000*1000*1000);
+ if (out->sec < 0) {
+ out->year = 1969;
+ out->month = 12;
+ out->day = 31;
+ out->hour = 23;
+ out->min = 59;
+ out->sec += 60;
+ assert(out->sec >= 0);
+ }
+ out->us = (int)extract_unit_64(&dt, 1000LL*1000*1000*1000);
+ out->ps = (int)extract_unit_64(&dt, 1000LL*1000);
+ out->as = (int)dt;
break;
default:
@@ -655,8 +600,9 @@ convert_datetime_to_datetimestruct(PyArray_DatetimeMetaData *meta,
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr,
- npy_datetimestruct *result)
+PyArray_DatetimeToDatetimeStruct(
+ npy_datetime NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr),
+ npy_datetimestruct *result)
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_DatetimeToDatetimeStruct function has "
@@ -676,8 +622,9 @@ PyArray_DatetimeToDatetimeStruct(npy_datetime val, NPY_DATETIMEUNIT fr,
* TO BE REMOVED - NOT USED INTERNALLY.
*/
NPY_NO_EXPORT void
-PyArray_TimedeltaToTimedeltaStruct(npy_timedelta val, NPY_DATETIMEUNIT fr,
- npy_timedeltastruct *result)
+PyArray_TimedeltaToTimedeltaStruct(
+ npy_timedelta NPY_UNUSED(val), NPY_DATETIMEUNIT NPY_UNUSED(fr),
+ npy_timedeltastruct *result)
{
PyErr_SetString(PyExc_RuntimeError,
"The NumPy PyArray_TimedeltaToTimedeltaStruct function has "
@@ -1887,6 +1834,7 @@ convert_datetime_metadata_tuple_to_datetime_metadata(PyObject *tuple,
return -1;
}
equal_one = PyObject_RichCompareBool(event, one, Py_EQ);
+ Py_DECREF(one);
if (equal_one == -1) {
return -1;
}
@@ -2067,20 +2015,8 @@ add_seconds_to_datetimestruct(npy_datetimestruct *dts, int seconds)
int minutes;
dts->sec += seconds;
- if (dts->sec < 0) {
- minutes = dts->sec / 60;
- dts->sec = dts->sec % 60;
- if (dts->sec < 0) {
- --minutes;
- dts->sec += 60;
- }
- add_minutes_to_datetimestruct(dts, minutes);
- }
- else if (dts->sec >= 60) {
- minutes = dts->sec / 60;
- dts->sec = dts->sec % 60;
- add_minutes_to_datetimestruct(dts, minutes);
- }
+ minutes = extract_unit_32(&dts->sec, 60);
+ add_minutes_to_datetimestruct(dts, minutes);
}
/*
@@ -2092,28 +2028,13 @@ add_minutes_to_datetimestruct(npy_datetimestruct *dts, int minutes)
{
int isleap;
- /* MINUTES */
dts->min += minutes;
- while (dts->min < 0) {
- dts->min += 60;
- dts->hour--;
- }
- while (dts->min >= 60) {
- dts->min -= 60;
- dts->hour++;
- }
- /* HOURS */
- while (dts->hour < 0) {
- dts->hour += 24;
- dts->day--;
- }
- while (dts->hour >= 24) {
- dts->hour -= 24;
- dts->day++;
- }
+ /* propagate invalid minutes into hour and day changes */
+ dts->hour += extract_unit_32(&dts->min, 60);
+ dts->day += extract_unit_32(&dts->hour, 24);
- /* DAYS */
+ /* propagate invalid days into month and year changes */
if (dts->day < 1) {
dts->month--;
if (dts->month < 1) {
@@ -2305,6 +2226,7 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
if (DEPRECATE(
"parsing timezone aware datetimes is deprecated; "
"this will raise an error in the future") < 0) {
+ Py_DECREF(tmp);
return -1;
}
@@ -2321,10 +2243,14 @@ convert_pydatetime_to_datetimestruct(PyObject *obj, npy_datetimestruct *out,
* which contains the value we want.
*/
tmp = PyObject_CallMethod(offset, "total_seconds", "");
+ Py_DECREF(offset);
if (tmp == NULL) {
return -1;
}
- seconds_offset = PyInt_AsLong(tmp);
+ /* Rounding here is no worse than the integer division below.
+ * Only whole minute offsets are supported by numpy anyway.
+ */
+ seconds_offset = (int)PyFloat_AsDouble(tmp);
if (error_converting(seconds_offset)) {
Py_DECREF(tmp);
return -1;
@@ -2468,6 +2394,9 @@ convert_pyobject_to_datetime(PyArray_DatetimeMetaData *meta, PyObject *obj,
return -1;
}
*out = PyLong_AsLongLong(obj);
+ if (error_converting(*out)) {
+ return -1;
+ }
return 0;
}
/* Datetime scalar */
@@ -2666,6 +2595,9 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
}
*out = PyLong_AsLongLong(obj);
+ if (error_converting(*out)) {
+ return -1;
+ }
return 0;
}
/* Timedelta scalar */
@@ -2845,6 +2777,19 @@ convert_pyobject_to_timedelta(PyArray_DatetimeMetaData *meta, PyObject *obj,
*out = NPY_DATETIME_NAT;
return 0;
}
+ else if (PyArray_IsScalar(obj, Integer)) {
+ /* Use the default unit if none was specified */
+ if (meta->base == NPY_FR_ERROR) {
+ meta->base = NPY_DATETIME_DEFAULTUNIT;
+ meta->num = 1;
+ }
+
+ *out = PyLong_AsLongLong(obj);
+ if (error_converting(*out)) {
+ return -1;
+ }
+ return 0;
+ }
else {
PyErr_SetString(PyExc_ValueError,
"Could not convert object to NumPy timedelta");
@@ -2916,7 +2861,6 @@ convert_datetime_to_pyobject(npy_datetime dt, PyArray_DatetimeMetaData *meta)
NPY_NO_EXPORT PyObject *
convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta)
{
- PyObject *ret = NULL;
npy_timedelta value;
int days = 0, seconds = 0, useconds = 0;
@@ -2946,54 +2890,47 @@ convert_timedelta_to_pyobject(npy_timedelta td, PyArray_DatetimeMetaData *meta)
/* Convert to days/seconds/useconds */
switch (meta->base) {
case NPY_FR_W:
- value *= 7;
+ days = value * 7;
break;
case NPY_FR_D:
+ days = value;
break;
case NPY_FR_h:
- seconds = (int)((value % 24) * (60*60));
- value = value / 24;
+ days = extract_unit_64(&value, 24ULL);
+ seconds = value*60*60;
break;
case NPY_FR_m:
- seconds = (int)(value % (24*60)) * 60;
- value = value / (24*60);
+ days = extract_unit_64(&value, 60ULL*24);
+ seconds = value*60;
break;
case NPY_FR_s:
- seconds = (int)(value % (24*60*60));
- value = value / (24*60*60);
+ days = extract_unit_64(&value, 60ULL*60*24);
+ seconds = value;
break;
case NPY_FR_ms:
- useconds = (int)(value % 1000) * 1000;
- value = value / 1000;
- seconds = (int)(value % (24*60*60));
- value = value / (24*60*60);
+ days = extract_unit_64(&value, 1000ULL*60*60*24);
+ seconds = extract_unit_64(&value, 1000ULL);
+ useconds = value*1000;
break;
case NPY_FR_us:
- useconds = (int)(value % (1000*1000));
- value = value / (1000*1000);
- seconds = (int)(value % (24*60*60));
- value = value / (24*60*60);
+ days = extract_unit_64(&value, 1000ULL*1000*60*60*24);
+ seconds = extract_unit_64(&value, 1000ULL*1000);
+ useconds = value;
break;
default:
+ // unreachable, handled by the `if` above
+ assert(NPY_FALSE);
break;
}
/*
- * 'value' represents days, and seconds/useconds are filled.
- *
* If it would overflow the datetime.timedelta days, return a raw int
*/
- if (value < -999999999 || value > 999999999) {
+ if (days < -999999999 || days > 999999999) {
return PyLong_FromLongLong(td);
}
else {
- days = (int)value;
- ret = PyDelta_FromDSU(days, seconds, useconds);
- if (ret == NULL) {
- return NULL;
- }
+ return PyDelta_FromDSU(days, seconds, useconds);
}
-
- return ret;
}
/*
@@ -3158,7 +3095,7 @@ is_any_numpy_datetime_or_timedelta(PyObject *obj)
*/
NPY_NO_EXPORT int
convert_pyobjects_to_datetimes(int count,
- PyObject **objs, int *type_nums,
+ PyObject **objs, const int *type_nums,
NPY_CASTING casting,
npy_int64 *out_values,
PyArray_DatetimeMetaData *inout_meta)
@@ -3812,18 +3749,26 @@ recursive_find_object_timedelta64_type(PyObject *obj,
* single object using [()], but not by using
* __getitem__(integer) approaches
*/
- PyObject *item, *meth, *args;
+ PyObject *item, *args;
- meth = PyObject_GetAttrString(obj, "__getitem__");
- args = Py_BuildValue("(())");
- item = PyObject_CallObject(meth, args);
+ args = PyTuple_New(0);
+ if (args == NULL) {
+ return 0;
+ }
+ item = PyObject_GetItem(obj, args);
+ Py_DECREF(args);
+ if (item == NULL) {
+ return 0;
+ }
/*
* NOTE: may need other type checks here in the future
* for expanded 0 D datetime array conversions?
*/
if (PyDelta_Check(item)) {
+ Py_DECREF(item);
return delta_checker(meta);
}
+ Py_DECREF(item);
}
}
}
diff --git a/numpy/core/src/multiarray/datetime_busday.c b/numpy/core/src/multiarray/datetime_busday.c
index c04a6c125..cdeb65d0e 100644
--- a/numpy/core/src/multiarray/datetime_busday.c
+++ b/numpy/core/src/multiarray/datetime_busday.c
@@ -48,7 +48,7 @@ get_day_of_week(npy_datetime date)
*/
static int
is_holiday(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
@@ -88,7 +88,7 @@ is_holiday(npy_datetime date,
*/
static npy_datetime *
find_earliest_holiday_on_or_after(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
@@ -127,7 +127,7 @@ find_earliest_holiday_on_or_after(npy_datetime date,
*/
static npy_datetime *
find_earliest_holiday_after(npy_datetime date,
- npy_datetime *holidays_begin, npy_datetime *holidays_end)
+ npy_datetime *holidays_begin, const npy_datetime *holidays_end)
{
npy_datetime *trial;
@@ -159,7 +159,7 @@ static int
apply_business_day_roll(npy_datetime date, npy_datetime *out,
int *out_day_of_week,
NPY_BUSDAY_ROLL roll,
- npy_bool *weekmask,
+ const npy_bool *weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
int day_of_week;
@@ -361,7 +361,7 @@ apply_business_day_offset(npy_datetime date, npy_int64 offset,
static int
apply_business_day_count(npy_datetime date_begin, npy_datetime date_end,
npy_int64 *out,
- npy_bool *weekmask, int busdays_in_weekmask,
+ const npy_bool *weekmask, int busdays_in_weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
npy_int64 count, whole_weeks;
@@ -722,7 +722,7 @@ finish:
*/
NPY_NO_EXPORT PyArrayObject *
is_business_day(PyArrayObject *dates, PyArrayObject *out,
- npy_bool *weekmask, int busdays_in_weekmask,
+ const npy_bool *weekmask, int busdays_in_weekmask,
npy_datetime *holidays_begin, npy_datetime *holidays_end)
{
PyArray_DatetimeMetaData temp_meta;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 439980877..23d140cf6 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -10,7 +10,7 @@
#include "numpy/arrayscalars.h"
#include "npy_config.h"
-
+#include "npy_ctypes.h"
#include "npy_pycompat.h"
#include "_datetime.h"
@@ -19,6 +19,7 @@
#include "descriptor.h"
#include "alloc.h"
#include "assert.h"
+#include "buffer.h"
/*
* offset: A starting offset.
@@ -41,109 +42,87 @@ static PyObject *typeDict = NULL; /* Must be explicitly loaded */
static PyArray_Descr *
_use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag);
-
-/*
- * Returns value of PyMapping_GetItemString but as a borrowed reference instead
- * of a new reference.
- */
-static PyObject *
-Borrowed_PyMapping_GetItemString(PyObject *o, char *key)
-{
- PyObject *ret = PyMapping_GetItemString(o, key);
- Py_XDECREF(ret);
- return ret;
-}
-
-/*
- * Creates a dtype object from ctypes inputs.
- *
- * Returns a new reference to a dtype object, or NULL
- * if this is not possible. When it returns NULL, it does
- * not set a Python exception.
- */
static PyArray_Descr *
-_arraydescr_fromctypes(PyObject *obj)
+_arraydescr_from_ctypes_type(PyTypeObject *type)
{
- PyObject *dtypedescr;
- PyArray_Descr *newdescr;
- int ret;
+ PyObject *_numpy_dtype_ctypes;
+ PyObject *res;
- /* Understand basic ctypes */
- dtypedescr = PyObject_GetAttrString(obj, "_type_");
- PyErr_Clear();
- if (dtypedescr) {
- ret = PyArray_DescrConverter(dtypedescr, &newdescr);
- Py_DECREF(dtypedescr);
- if (ret == NPY_SUCCEED) {
- PyObject *length;
- /* Check for ctypes arrays */
- length = PyObject_GetAttrString(obj, "_length_");
- PyErr_Clear();
- if (length) {
- /* derived type */
- PyObject *newtup;
- PyArray_Descr *derived;
- newtup = Py_BuildValue("N(N)", newdescr, length);
- ret = PyArray_DescrConverter(newtup, &derived);
- Py_DECREF(newtup);
- if (ret == NPY_SUCCEED) {
- return derived;
- }
- PyErr_Clear();
- return NULL;
- }
- return newdescr;
- }
- PyErr_Clear();
+ /* Call the python function of the same name. */
+ _numpy_dtype_ctypes = PyImport_ImportModule("numpy.core._dtype_ctypes");
+ if (_numpy_dtype_ctypes == NULL) {
return NULL;
}
- /* Understand ctypes structures --
- bit-fields are not supported
- automatically aligns */
- dtypedescr = PyObject_GetAttrString(obj, "_fields_");
- PyErr_Clear();
- if (dtypedescr) {
- ret = PyArray_DescrAlignConverter(dtypedescr, &newdescr);
- Py_DECREF(dtypedescr);
- if (ret == NPY_SUCCEED) {
- return newdescr;
- }
- PyErr_Clear();
+ res = PyObject_CallMethod(_numpy_dtype_ctypes, "dtype_from_ctypes_type", "O", (PyObject *)type);
+ Py_DECREF(_numpy_dtype_ctypes);
+ if (res == NULL) {
+ return NULL;
}
- return NULL;
+ /*
+ * sanity check that dtype_from_ctypes_type returned the right type,
+ * since getting it wrong would give segfaults.
+ */
+ if (!PyObject_TypeCheck(res, &PyArrayDescr_Type)) {
+ Py_DECREF(res);
+ PyErr_BadInternalCall();
+ return NULL;
+ }
+
+ return (PyArray_Descr *)res;
}
/*
- * This function creates a dtype object when:
- * - The object has a "dtype" attribute, and it can be converted
- * to a dtype object.
- * - The object is a ctypes type object, including array
- * and structure types.
+ * This function creates a dtype object when the object has a "dtype" attribute,
+ * and it can be converted to a dtype object.
*
* Returns a new reference to a dtype object, or NULL
- * if this is not possible. When it returns NULL, it does
- * not set a Python exception.
+ * if this is not possible.
+ * When the return value is true, the dtype attribute should have been used
+ * and parsed. Currently the only failure mode for a 1 return is a
+ * RecursionError and the descriptor is set to NULL.
+ * When the return value is false, no error will be set.
*/
-NPY_NO_EXPORT PyArray_Descr *
-_arraydescr_fromobj(PyObject *obj)
+int
+_arraydescr_from_dtype_attr(PyObject *obj, PyArray_Descr **newdescr)
{
PyObject *dtypedescr;
- PyArray_Descr *newdescr = NULL;
int ret;
/* For arbitrary objects that have a "dtype" attribute */
dtypedescr = PyObject_GetAttrString(obj, "dtype");
- PyErr_Clear();
- if (dtypedescr != NULL) {
- ret = PyArray_DescrConverter(dtypedescr, &newdescr);
+ if (dtypedescr == NULL) {
+ /*
+ * This can be reached due to recursion limit being hit while fetching
+ * the attribute (tested for py3.7). This removes the custom message.
+ */
+ goto fail;
+ }
+
+ if (Py_EnterRecursiveCall(
+ " while trying to convert the given data type from its "
+ "`.dtype` attribute.") != 0) {
Py_DECREF(dtypedescr);
- if (ret == NPY_SUCCEED) {
- return newdescr;
- }
+ return 1;
+ }
+
+ ret = PyArray_DescrConverter(dtypedescr, newdescr);
+
+ Py_DECREF(dtypedescr);
+ Py_LeaveRecursiveCall();
+ if (ret != NPY_SUCCEED) {
+ goto fail;
+ }
+
+ return 1;
+
+ fail:
+ /* Ignore all but recursion errors, to give ctypes a full try. */
+ if (!PyErr_ExceptionMatches(PyExc_RecursionError)) {
PyErr_Clear();
+ return 0;
}
- return _arraydescr_fromctypes(obj);
+ return 1;
}
/*
@@ -170,7 +149,7 @@ array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args)
arg == '|' || arg == '=')
static int
-_check_for_commastring(char *type, Py_ssize_t len)
+_check_for_commastring(const char *type, Py_ssize_t len)
{
Py_ssize_t i;
int sqbracket;
@@ -286,6 +265,9 @@ _convert_from_tuple(PyObject *obj, int align)
return NULL;
}
PyArray_DESCR_REPLACE(type);
+ if (type == NULL) {
+ return NULL;
+ }
if (type->type_num == NPY_UNICODE) {
type->elsize = itemsize << 2;
}
@@ -319,15 +301,22 @@ _convert_from_tuple(PyObject *obj, int align)
"invalid shape in fixed-type tuple.");
goto fail;
}
- /*
- * If (type, 1) was given, it is equivalent to type...
- * or (type, ()) was given it is equivalent to type...
- */
- if ((shape.len == 1
- && shape.ptr[0] == 1
- && PyNumber_Check(val))
- || (shape.len == 0
- && PyTuple_Check(val))) {
+ /* if (type, ()) was given it is equivalent to type... */
+ if (shape.len == 0 && PyTuple_Check(val)) {
+ npy_free_cache_dim_obj(shape);
+ return type;
+ }
+ /* (type, 1) use to be equivalent to type, but is deprecated */
+ if (shape.len == 1
+ && shape.ptr[0] == 1
+ && PyNumber_Check(val)) {
+ /* 2019-05-20, 1.17 */
+ if (DEPRECATE_FUTUREWARNING(
+ "Passing (type, 1) or '1type' as a synonym of type is "
+ "deprecated; in a future version of numpy, it will be "
+ "understood as (type, (1,)) / '(1,)type'.") < 0) {
+ goto fail;
+ }
npy_free_cache_dim_obj(shape);
return type;
}
@@ -509,9 +498,6 @@ _convert_from_array_descr(PyObject *obj, int align)
else {
ret = PyArray_DescrConverter(PyTuple_GET_ITEM(item, 1), &conv);
}
- if (ret == NPY_FAIL) {
- PyObject_Print(PyTuple_GET_ITEM(item, 1), stderr, 0);
- }
}
else if (PyTuple_GET_SIZE(item) == 3) {
newobj = PyTuple_GetSlice(item, 1, 3);
@@ -529,6 +515,7 @@ _convert_from_array_descr(PyObject *obj, int align)
if (ret == NPY_FAIL) {
goto fail;
}
+
if ((PyDict_GetItem(fields, name) != NULL)
|| (title
&& PyBaseString_Check(title)
@@ -541,6 +528,7 @@ _convert_from_array_descr(PyObject *obj, int align)
#if defined(NPY_PY3K)
Py_DECREF(name);
#endif
+ Py_DECREF(conv);
goto fail;
}
dtypeflags |= (conv->flags & NPY_FROM_FIELDS);
@@ -863,9 +851,11 @@ _use_inherit(PyArray_Descr *type, PyObject *newobj, int *errflag)
else if (new->elsize != conv->elsize) {
PyErr_SetString(PyExc_ValueError,
"mismatch in size of old and new data-descriptor");
+ Py_DECREF(new);
goto fail;
}
else if (invalid_union_object_dtype(new, conv)) {
+ Py_DECREF(new);
goto fail;
}
@@ -1024,8 +1014,11 @@ _convert_from_dict(PyObject *obj, int align)
{
PyArray_Descr *new;
PyObject *fields = NULL;
- PyObject *names, *offsets, *descrs, *titles, *tmp;
- PyObject *metadata;
+ PyObject *names = NULL;
+ PyObject *offsets= NULL;
+ PyObject *descrs = NULL;
+ PyObject *titles = NULL;
+ PyObject *metadata, *tmp;
int n, i;
int totalsize, itemsize;
int maxalign = 0;
@@ -1040,19 +1033,27 @@ _convert_from_dict(PyObject *obj, int align)
/*
* Use PyMapping_GetItemString to support dictproxy objects as well.
*/
- names = Borrowed_PyMapping_GetItemString(obj, "names");
- descrs = Borrowed_PyMapping_GetItemString(obj, "formats");
- if (!names || !descrs) {
+ names = PyMapping_GetItemString(obj, "names");
+ if (names == NULL) {
+ Py_DECREF(fields);
+ /* XXX should check this is a KeyError */
+ PyErr_Clear();
+ return _use_fields_dict(obj, align);
+ }
+ descrs = PyMapping_GetItemString(obj, "formats");
+ if (descrs == NULL) {
Py_DECREF(fields);
+ /* XXX should check this is a KeyError */
PyErr_Clear();
+ Py_DECREF(names);
return _use_fields_dict(obj, align);
}
n = PyObject_Length(names);
- offsets = Borrowed_PyMapping_GetItemString(obj, "offsets");
+ offsets = PyMapping_GetItemString(obj, "offsets");
if (!offsets) {
PyErr_Clear();
}
- titles = Borrowed_PyMapping_GetItemString(obj, "titles");
+ titles = PyMapping_GetItemString(obj, "titles");
if (!titles) {
PyErr_Clear();
}
@@ -1070,7 +1071,7 @@ _convert_from_dict(PyObject *obj, int align)
* If a property 'aligned' is in the dict, it overrides the align flag
* to be True if it not already true.
*/
- tmp = Borrowed_PyMapping_GetItemString(obj, "aligned");
+ tmp = PyMapping_GetItemString(obj, "aligned");
if (tmp == NULL) {
PyErr_Clear();
} else {
@@ -1078,11 +1079,13 @@ _convert_from_dict(PyObject *obj, int align)
align = 1;
}
else if (tmp != Py_False) {
+ Py_DECREF(tmp);
PyErr_SetString(PyExc_ValueError,
"NumPy dtype descriptor includes 'aligned' entry, "
"but its value is neither True nor False");
- return NULL;
+ goto fail;
}
+ Py_DECREF(tmp);
}
totalsize = 0;
@@ -1238,14 +1241,18 @@ _convert_from_dict(PyObject *obj, int align)
}
new->elsize = totalsize;
if (!PyTuple_Check(names)) {
- names = PySequence_Tuple(names);
- }
- else {
- Py_INCREF(names);
+ Py_SETREF(names, PySequence_Tuple(names));
+ if (names == NULL) {
+ Py_DECREF(new);
+ goto fail;
+ }
}
new->names = names;
new->fields = fields;
new->flags = dtypeflags;
+ /* new takes responsibility for DECREFing names, fields */
+ names = NULL;
+ fields = NULL;
/*
* If the fields weren't in order, and there was an OBJECT type,
@@ -1254,7 +1261,7 @@ _convert_from_dict(PyObject *obj, int align)
if (has_out_of_order_fields && PyDataType_REFCHK(new)) {
if (validate_object_field_overlap(new) < 0) {
Py_DECREF(new);
- return NULL;
+ goto fail;
}
}
@@ -1264,14 +1271,15 @@ _convert_from_dict(PyObject *obj, int align)
}
/* Override the itemsize if provided */
- tmp = Borrowed_PyMapping_GetItemString(obj, "itemsize");
+ tmp = PyMapping_GetItemString(obj, "itemsize");
if (tmp == NULL) {
PyErr_Clear();
} else {
itemsize = (int)PyArray_PyIntAsInt(tmp);
+ Py_DECREF(tmp);
if (error_converting(itemsize)) {
Py_DECREF(new);
- return NULL;
+ goto fail;
}
/* Make sure the itemsize isn't made too small */
if (itemsize < new->elsize) {
@@ -1280,7 +1288,7 @@ _convert_from_dict(PyObject *obj, int align)
"cannot override to smaller itemsize of %d",
(int)new->elsize, (int)itemsize);
Py_DECREF(new);
- return NULL;
+ goto fail;
}
/* If align is set, make sure the alignment divides into the size */
if (align && itemsize % new->alignment != 0) {
@@ -1289,30 +1297,43 @@ _convert_from_dict(PyObject *obj, int align)
"which is not divisible into the specified itemsize %d",
(int)new->alignment, (int)itemsize);
Py_DECREF(new);
- return NULL;
+ goto fail;
}
/* Set the itemsize */
new->elsize = itemsize;
}
/* Add the metadata if provided */
- metadata = Borrowed_PyMapping_GetItemString(obj, "metadata");
+ metadata = PyMapping_GetItemString(obj, "metadata");
if (metadata == NULL) {
PyErr_Clear();
}
else if (new->metadata == NULL) {
new->metadata = metadata;
- Py_XINCREF(new->metadata);
}
- else if (PyDict_Merge(new->metadata, metadata, 0) == -1) {
- Py_DECREF(new);
- return NULL;
+ else {
+ int ret = PyDict_Merge(new->metadata, metadata, 0);
+ Py_DECREF(metadata);
+ if (ret < 0) {
+ Py_DECREF(new);
+ goto fail;
+ }
}
+
+ Py_XDECREF(fields);
+ Py_XDECREF(names);
+ Py_XDECREF(descrs);
+ Py_XDECREF(offsets);
+ Py_XDECREF(titles);
return new;
fail:
Py_XDECREF(fields);
+ Py_XDECREF(names);
+ Py_XDECREF(descrs);
+ Py_XDECREF(offsets);
+ Py_XDECREF(titles);
return NULL;
}
@@ -1364,7 +1385,6 @@ NPY_NO_EXPORT int
PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
{
int check_num = NPY_NOTYPE + 10;
- PyObject *item;
int elsize = 0;
char endian = '=';
@@ -1423,10 +1443,25 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
check_num = NPY_VOID;
}
else {
- *at = _arraydescr_fromobj(obj);
- if (*at) {
+ if (_arraydescr_from_dtype_attr(obj, at)) {
+ /*
+ * Using dtype attribute, *at may be NULL if a
+ * RecursionError occurred.
+ */
+ if (*at == NULL) {
+ goto error;
+ }
return NPY_SUCCEED;
}
+ /*
+ * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
+ * type might override the dtype if numpy does not otherwise
+ * support it.
+ */
+ if (npy_ctypes_check((PyTypeObject *)obj)) {
+ *at = _arraydescr_from_ctypes_type((PyTypeObject *)obj);
+ return *at ? NPY_SUCCEED : NPY_FAIL;
+ }
}
goto finish;
}
@@ -1509,7 +1544,8 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
/* A typecode like 'd' */
if (len == 1) {
- check_num = type[0];
+ /* Python byte string characters are unsigned */
+ check_num = (unsigned char) type[0];
}
/* A kind + size like 'f8' */
else {
@@ -1596,12 +1632,24 @@ PyArray_DescrConverter(PyObject *obj, PyArray_Descr **at)
goto fail;
}
else {
- *at = _arraydescr_fromobj(obj);
- if (*at) {
+ if (_arraydescr_from_dtype_attr(obj, at)) {
+ /*
+ * Using dtype attribute, *at may be NULL if a
+ * RecursionError occurred.
+ */
+ if (*at == NULL) {
+ goto error;
+ }
return NPY_SUCCEED;
}
- if (PyErr_Occurred()) {
- return NPY_FAIL;
+ /*
+ * Note: this comes after _arraydescr_from_dtype_attr because the ctypes
+ * type might override the dtype if numpy does not otherwise
+ * support it.
+ */
+ if (npy_ctypes_check(Py_TYPE(obj))) {
+ *at = _arraydescr_from_ctypes_type(Py_TYPE(obj));
+ return *at ? NPY_SUCCEED : NPY_FAIL;
}
goto fail;
}
@@ -1615,16 +1663,22 @@ finish:
PyErr_Clear();
/* Now check to see if the object is registered in typeDict */
if (typeDict != NULL) {
- item = PyDict_GetItem(typeDict, obj);
+ PyObject *item = NULL;
#if defined(NPY_PY3K)
- if (!item && PyBytes_Check(obj)) {
+ if (PyBytes_Check(obj)) {
PyObject *tmp;
tmp = PyUnicode_FromEncodedObject(obj, "ascii", "strict");
- if (tmp != NULL) {
- item = PyDict_GetItem(typeDict, tmp);
- Py_DECREF(tmp);
+ if (tmp == NULL) {
+ goto fail;
}
+ item = PyDict_GetItem(typeDict, tmp);
+ Py_DECREF(tmp);
}
+ else {
+ item = PyDict_GetItem(typeDict, obj);
+ }
+#else
+ item = PyDict_GetItem(typeDict, obj);
#endif
if (item) {
/* Check for a deprecated Numeric-style typecode */
@@ -1660,6 +1714,9 @@ finish:
if (PyDataType_ISUNSIZED(*at) && (*at)->elsize != elsize) {
PyArray_DESCR_REPLACE(*at);
+ if (*at == NULL) {
+ goto error;
+ }
(*at)->elsize = elsize;
}
if (endian != '=' && PyArray_ISNBO(endian)) {
@@ -1668,6 +1725,9 @@ finish:
if (endian != '=' && (*at)->byteorder != '|'
&& (*at)->byteorder != endian) {
PyArray_DESCR_REPLACE(*at);
+ if (*at == NULL) {
+ goto error;
+ }
(*at)->byteorder = endian;
}
return NPY_SUCCEED;
@@ -1728,6 +1788,7 @@ PyArray_DescrNew(PyArray_Descr *base)
newdescr->c_metadata = NPY_AUXDATA_CLONE(base->c_metadata);
if (newdescr->c_metadata == NULL) {
PyErr_NoMemory();
+ /* TODO: This seems wrong, as the old fields get decref'd? */
Py_DECREF(newdescr);
return NULL;
}
@@ -1770,6 +1831,7 @@ arraydescr_dealloc(PyArray_Descr *self)
Py_INCREF(self);
return;
}
+ _dealloc_cached_buffer_info((PyObject*)self);
Py_XDECREF(self->typeobj);
Py_XDECREF(self->names);
Py_XDECREF(self->fields);
@@ -3220,7 +3282,7 @@ arraydescr_richcompare(PyArray_Descr *self, PyObject *other, int cmp_op)
}
static int
-descr_nonzero(PyObject *self)
+descr_nonzero(PyObject *NPY_UNUSED(self))
{
/* `bool(np.dtype(...)) == True` for all dtypes. Needed to override default
* nonzero implementation, which checks if `len(object) > 0`. */
@@ -3335,12 +3397,126 @@ static PyObject *
_subscript_by_index(PyArray_Descr *self, Py_ssize_t i)
{
PyObject *name = PySequence_GetItem(self->names, i);
+ PyObject *ret;
if (name == NULL) {
PyErr_Format(PyExc_IndexError,
"Field index %zd out of range.", i);
return NULL;
}
- return _subscript_by_name(self, name);
+ ret = _subscript_by_name(self, name);
+ Py_DECREF(name);
+ return ret;
+}
+
+static npy_bool
+_is_list_of_strings(PyObject *obj)
+{
+ int seqlen, i;
+ if (!PyList_CheckExact(obj)) {
+ return NPY_FALSE;
+ }
+ seqlen = PyList_GET_SIZE(obj);
+ for (i = 0; i < seqlen; i++) {
+ PyObject *item = PyList_GET_ITEM(obj, i);
+ if (!PyBaseString_Check(item)) {
+ return NPY_FALSE;
+ }
+ }
+
+ return NPY_TRUE;
+}
+
+NPY_NO_EXPORT PyArray_Descr *
+arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind)
+{
+ int seqlen, i;
+ PyObject *fields = NULL;
+ PyObject *names = NULL;
+ PyArray_Descr *view_dtype;
+
+ seqlen = PySequence_Size(ind);
+ if (seqlen == -1) {
+ return NULL;
+ }
+
+ fields = PyDict_New();
+ if (fields == NULL) {
+ goto fail;
+ }
+ names = PyTuple_New(seqlen);
+ if (names == NULL) {
+ goto fail;
+ }
+
+ for (i = 0; i < seqlen; i++) {
+ PyObject *name;
+ PyObject *tup;
+
+ name = PySequence_GetItem(ind, i);
+ if (name == NULL) {
+ goto fail;
+ }
+
+ /* Let the names tuple steal a reference now, so we don't need to
+ * decref name if an error occurs further on.
+ */
+ PyTuple_SET_ITEM(names, i, name);
+
+ tup = PyDict_GetItem(self->fields, name);
+ if (tup == NULL) {
+ PyErr_SetObject(PyExc_KeyError, name);
+ goto fail;
+ }
+
+ /* disallow use of titles as index */
+ if (PyTuple_Size(tup) == 3) {
+ PyObject *title = PyTuple_GET_ITEM(tup, 2);
+ int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ);
+ if (titlecmp < 0) {
+ goto fail;
+ }
+ if (titlecmp == 1) {
+ /* if title == name, we were given a title, not a field name */
+ PyErr_SetString(PyExc_KeyError,
+ "cannot use field titles in multi-field index");
+ goto fail;
+ }
+ if (PyDict_SetItem(fields, title, tup) < 0) {
+ goto fail;
+ }
+ }
+ /* disallow duplicate field indices */
+ if (PyDict_Contains(fields, name)) {
+ PyObject *msg = NULL;
+ PyObject *fmt = PyUString_FromString(
+ "duplicate field of name {!r}");
+ if (fmt != NULL) {
+ msg = PyObject_CallMethod(fmt, "format", "O", name);
+ Py_DECREF(fmt);
+ }
+ PyErr_SetObject(PyExc_ValueError, msg);
+ Py_XDECREF(msg);
+ goto fail;
+ }
+ if (PyDict_SetItem(fields, name, tup) < 0) {
+ goto fail;
+ }
+ }
+
+ view_dtype = PyArray_DescrNewFromType(NPY_VOID);
+ if (view_dtype == NULL) {
+ goto fail;
+ }
+ view_dtype->elsize = self->elsize;
+ view_dtype->names = names;
+ view_dtype->fields = fields;
+ view_dtype->flags = self->flags;
+ return view_dtype;
+
+fail:
+ Py_XDECREF(fields);
+ Py_XDECREF(names);
+ return NULL;
}
static PyObject *
@@ -3353,6 +3529,9 @@ descr_subscript(PyArray_Descr *self, PyObject *op)
if (PyBaseString_Check(op)) {
return _subscript_by_name(self, op);
}
+ else if (_is_list_of_strings(op)) {
+ return (PyObject *)arraydescr_field_subset_view(self, op);
+ }
else {
Py_ssize_t i = PyArray_PyIntAsIntp(op);
if (error_converting(i)) {
@@ -3360,7 +3539,8 @@ descr_subscript(PyArray_Descr *self, PyObject *op)
PyObject *err = PyErr_Occurred();
if (PyErr_GivenExceptionMatches(err, PyExc_TypeError)) {
PyErr_SetString(PyExc_TypeError,
- "Field key must be an integer, string, or unicode.");
+ "Field key must be an integer field offset, "
+ "single field name, or list of field names.");
}
return NULL;
}
diff --git a/numpy/core/src/multiarray/descriptor.h b/numpy/core/src/multiarray/descriptor.h
index 5a3e4b15f..6024c5e77 100644
--- a/numpy/core/src/multiarray/descriptor.h
+++ b/numpy/core/src/multiarray/descriptor.h
@@ -7,13 +7,25 @@ NPY_NO_EXPORT PyObject *arraydescr_protocol_descr_get(PyArray_Descr *self);
NPY_NO_EXPORT PyObject *
array_set_typeDict(PyObject *NPY_UNUSED(ignored), PyObject *args);
-NPY_NO_EXPORT PyArray_Descr *
-_arraydescr_fromobj(PyObject *obj);
+int
+_arraydescr_from_dtype_attr(PyObject *obj, PyArray_Descr **newdescr);
NPY_NO_EXPORT int
is_dtype_struct_simple_unaligned_layout(PyArray_Descr *dtype);
+/*
+ * Filter the fields of a dtype to only those in the list of strings, ind.
+ *
+ * No type checking is performed on the input.
+ *
+ * Raises:
+ * ValueError - if a field is repeated
+ * KeyError - if an invalid field name (or any field title) is used
+ */
+NPY_NO_EXPORT PyArray_Descr *
+arraydescr_field_subset_view(PyArray_Descr *self, PyObject *ind);
+
extern NPY_NO_EXPORT char *_datetime_strings[];
#endif
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index 14dfa71c2..1694596e9 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -1,31 +1,33 @@
/*
* Copyright (c) 2014 Ryan Juckett
- * http://www.ryanjuckett.com/
*
- * This software is provided 'as-is', without any express or implied
- * warranty. In no event will the authors be held liable for any damages
- * arising from the use of this software.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
- * Permission is granted to anyone to use this software for any purpose,
- * including commercial applications, and to alter it and redistribute it
- * freely, subject to the following restrictions:
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * 1. The origin of this software must not be misrepresented; you must not
- * claim that you wrote the original software. If you use this software
- * in a product, an acknowledgment in the product documentation would be
- * appreciated but is not required.
- *
- * 2. Altered source versions must be plainly marked as such, and must not be
- * misrepresented as being the original software.
- *
- * 3. This notice may not be removed or altered from any source
- * distribution.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
/*
* This file contains a modified version of Ryan Juckett's Dragon4
- * implementation, which has been ported from C++ to C and which has
+ * implementation, obtained from http://www.ryanjuckett.com,
+ * which has been ported from C++ to C and which has
* modifications specific to printing floats in numpy.
+ *
+ * Ryan Juckett's original code was under the Zlib license; he gave numpy
+ * permission to include it under the MIT license instead.
*/
#include "dragon4.h"
@@ -874,7 +876,7 @@ BigInt_Pow2(BigInt *result, npy_uint32 exponent)
result->length = blockIdx + 1;
bitIdx = (exponent % 32);
- result->blocks[blockIdx] |= (1 << bitIdx);
+ result->blocks[blockIdx] |= ((npy_uint32)1 << bitIdx);
}
/*
diff --git a/numpy/core/src/multiarray/dragon4.h b/numpy/core/src/multiarray/dragon4.h
index 2b8b4cef4..3a99bde6c 100644
--- a/numpy/core/src/multiarray/dragon4.h
+++ b/numpy/core/src/multiarray/dragon4.h
@@ -1,31 +1,33 @@
/*
* Copyright (c) 2014 Ryan Juckett
- * http://www.ryanjuckett.com/
*
- * This software is provided 'as-is', without any express or implied
- * warranty. In no event will the authors be held liable for any damages
- * arising from the use of this software.
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
*
- * Permission is granted to anyone to use this software for any purpose,
- * including commercial applications, and to alter it and redistribute it
- * freely, subject to the following restrictions:
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
*
- * 1. The origin of this software must not be misrepresented; you must not
- * claim that you wrote the original software. If you use this software
- * in a product, an acknowledgment in the product documentation would be
- * appreciated but is not required.
- *
- * 2. Altered source versions must be plainly marked as such, and must not be
- * misrepresented as being the original software.
- *
- * 3. This notice may not be removed or altered from any source
- * distribution.
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
*/
/*
* This file contains a modified version of Ryan Juckett's Dragon4
- * implementation, which has been ported from C++ to C and which has
+ * implementation, obtained from http://www.ryanjuckett.com,
+ * which has been ported from C++ to C and which has
* modifications specific to printing floats in numpy.
+ *
+ * Ryan Juckett's original code was under the Zlib license; he gave numpy
+ * permission to include it under the MIT license instead.
*/
#ifndef _NPY_DRAGON4_H_
diff --git a/numpy/core/src/multiarray/dtype_transfer.c b/numpy/core/src/multiarray/dtype_transfer.c
index 97d899ce0..ef0dd4a01 100644
--- a/numpy/core/src/multiarray/dtype_transfer.c
+++ b/numpy/core/src/multiarray/dtype_transfer.c
@@ -26,6 +26,7 @@
#include "_datetime.h"
#include "datetime_strings.h"
#include "descriptor.h"
+#include "array_assign.h"
#include "shape.h"
#include "lowlevel_strided_loops.h"
@@ -51,6 +52,20 @@
#endif
/**********************************************/
+#if NPY_DT_DBG_TRACING
+/*
+ * Thin wrapper around print that ignores exceptions
+ */
+static void
+_safe_print(PyObject *obj)
+{
+ if (PyObject_Print(obj, stdout, 0) < 0) {
+ PyErr_Clear();
+ printf("<error during print>");
+ }
+}
+#endif
+
/*
* Returns a transfer function which DECREFs any references in src_type.
*
@@ -1042,9 +1057,9 @@ get_nbo_cast_datetime_transfer_function(int aligned,
#if NPY_DT_DBG_TRACING
printf("Dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ _safe_print((PyObject *)src_dtype);
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ _safe_print((PyObject *)dst_dtype);
printf("\n");
printf("has conversion fraction %lld/%lld\n", num, denom);
#endif
@@ -1089,9 +1104,9 @@ get_nbo_datetime_to_string_transfer_function(int aligned,
#if NPY_DT_DBG_TRACING
printf("Dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ _safe_print((PyObject *)src_dtype);
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ _safe_print((PyObject *)dst_dtype);
printf("\n");
#endif
@@ -1112,7 +1127,7 @@ get_datetime_to_unicode_transfer_function(int aligned,
/* Get an ASCII string data type, adapted to match the UNICODE one */
str_dtype = PyArray_DescrFromType(NPY_STRING);
- PyArray_AdaptFlexibleDType(NULL, dst_dtype, &str_dtype);
+ str_dtype = PyArray_AdaptFlexibleDType(NULL, dst_dtype, str_dtype);
if (str_dtype == NULL) {
return NPY_FAIL;
}
@@ -1211,9 +1226,9 @@ get_nbo_string_to_datetime_transfer_function(int aligned,
#if NPY_DT_DBG_TRACING
printf("Dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ _safe_print((PyObject *)src_dtype);
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ _safe_print((PyObject *)dst_dtype);
printf("\n");
#endif
@@ -1234,7 +1249,7 @@ get_unicode_to_datetime_transfer_function(int aligned,
/* Get an ASCII string data type, adapted to match the UNICODE one */
str_dtype = PyArray_DescrFromType(NPY_STRING);
- PyArray_AdaptFlexibleDType(NULL, src_dtype, &str_dtype);
+ str_dtype = PyArray_AdaptFlexibleDType(NULL, src_dtype, str_dtype);
if (str_dtype == NULL) {
return NPY_FAIL;
}
@@ -1557,12 +1572,30 @@ get_cast_transfer_function(int aligned,
src_dtype,
&tobuffer, &todata);
-
- /* Get the copy/swap operation to dst */
- PyArray_GetDTypeCopySwapFn(aligned,
- dst_itemsize, dst_stride,
- dst_dtype,
- &frombuffer, &fromdata);
+ if (!PyDataType_REFCHK(dst_dtype)) {
+ /* Copying from buffer is a simple copy/swap operation */
+ PyArray_GetDTypeCopySwapFn(aligned,
+ dst_itemsize, dst_stride,
+ dst_dtype,
+ &frombuffer, &fromdata);
+ }
+ else {
+ /*
+ * Since the buffer is initialized to NULL, need to move the
+ * references in order to DECREF the existing data.
+ */
+ /* Object types cannot be byte swapped */
+ assert(PyDataType_ISNOTSWAPPED(dst_dtype));
+ /* The loop already needs the python api if this is reached */
+ assert(*out_needs_api);
+
+ if (PyArray_GetDTypeTransferFunction(
+ aligned, dst_itemsize, dst_stride,
+ dst_dtype, dst_dtype, 1,
+ &frombuffer, &fromdata, out_needs_api) != NPY_SUCCEED) {
+ return NPY_FAIL;
+ }
+ }
if (frombuffer == NULL || tobuffer == NULL) {
NPY_AUXDATA_FREE(castdata);
@@ -1986,6 +2019,7 @@ typedef struct {
_subarray_broadcast_offsetrun offsetruns;
} _subarray_broadcast_data;
+
/* transfer data free function */
static void _subarray_broadcast_data_free(NpyAuxData *data)
{
@@ -2341,7 +2375,7 @@ get_subarray_transfer_function(int aligned,
/* Get the subarray shapes and sizes */
if (PyDataType_HASSUBARRAY(src_dtype)) {
- if (!(PyArray_IntpConverter(src_dtype->subarray->shape,
+ if (!(PyArray_IntpConverter(src_dtype->subarray->shape,
&src_shape))) {
PyErr_SetString(PyExc_ValueError,
"invalid subarray shape");
@@ -2351,7 +2385,7 @@ get_subarray_transfer_function(int aligned,
src_dtype = src_dtype->subarray->base;
}
if (PyDataType_HASSUBARRAY(dst_dtype)) {
- if (!(PyArray_IntpConverter(dst_dtype->subarray->shape,
+ if (!(PyArray_IntpConverter(dst_dtype->subarray->shape,
&dst_shape))) {
npy_free_cache_dim_obj(src_shape);
PyErr_SetString(PyExc_ValueError,
@@ -3303,7 +3337,7 @@ get_decsrcref_transfer_function(int aligned,
/* If there are subarrays, need to wrap it */
else if (PyDataType_HASSUBARRAY(src_dtype)) {
PyArray_Dims src_shape = {NULL, -1};
- npy_intp src_size = 1;
+ npy_intp src_size;
PyArray_StridedUnaryOp *stransfer;
NpyAuxData *data;
@@ -3421,9 +3455,13 @@ PyArray_GetDTypeTransferFunction(int aligned,
#if NPY_DT_DBG_TRACING
printf("Calculating dtype transfer from ");
- PyObject_Print((PyObject *)src_dtype, stdout, 0);
+ if (PyObject_Print((PyObject *)src_dtype, stdout, 0) < 0) {
+ return NPY_FAIL;
+ }
printf(" to ");
- PyObject_Print((PyObject *)dst_dtype, stdout, 0);
+ if (PyObject_Print((PyObject *)dst_dtype, stdout, 0) < 0) {
+ return NPY_FAIL;
+ }
printf("\n");
#endif
@@ -3747,11 +3785,15 @@ PyArray_CastRawArrays(npy_intp count,
return NPY_SUCCEED;
}
- /* Check data alignment */
- aligned = (((npy_intp)src | src_stride) &
- (src_dtype->alignment - 1)) == 0 &&
- (((npy_intp)dst | dst_stride) &
- (dst_dtype->alignment - 1)) == 0;
+ /* Check data alignment, both uint and true */
+ aligned = raw_array_is_aligned(1, &count, dst, &dst_stride,
+ npy_uint_alignment(dst_dtype->elsize)) &&
+ raw_array_is_aligned(1, &count, dst, &dst_stride,
+ dst_dtype->alignment) &&
+ raw_array_is_aligned(1, &count, src, &src_stride,
+ npy_uint_alignment(src_dtype->elsize)) &&
+ raw_array_is_aligned(1, &count, src, &src_stride,
+ src_dtype->alignment);
/* Get the function to do the casting */
if (PyArray_GetDTypeTransferFunction(aligned,
diff --git a/numpy/core/src/multiarray/einsum.c.src b/numpy/core/src/multiarray/einsum.c.src
index 1765982a0..e7bbc3d0b 100644
--- a/numpy/core/src/multiarray/einsum.c.src
+++ b/numpy/core/src/multiarray/einsum.c.src
@@ -1992,12 +1992,13 @@ parse_output_subscripts(char *subscripts, int length,
/*
- * When there's just one operand and no reduction, we
- * can return a view into op. This calculates the view
- * if possible.
+ * When there's just one operand and no reduction we can return a view
+ * into 'op'. This calculates the view and stores it in 'ret', if
+ * possible. Returns -1 on error, 0 otherwise. Note that a 0 return
+ * does not mean that a view was successfully created.
*/
static int
-get_single_op_view(PyArrayObject *op, int iop, char *labels,
+get_single_op_view(PyArrayObject *op, char *labels,
int ndim_output, char *output_labels,
PyArrayObject **ret)
{
@@ -2052,13 +2053,11 @@ get_single_op_view(PyArrayObject *op, int iop, char *labels,
}
/* Update the dimensions and strides of the output */
i = out_label - output_labels;
- if (new_dims[i] != 0 &&
- new_dims[i] != PyArray_DIM(op, idim)) {
+ if (new_dims[i] != 0 && new_dims[i] != PyArray_DIM(op, idim)) {
PyErr_Format(PyExc_ValueError,
- "dimensions in operand %d for collapsing "
+ "dimensions in single operand for collapsing "
"index '%c' don't match (%d != %d)",
- iop, label, (int)new_dims[i],
- (int)PyArray_DIM(op, idim));
+ label, (int)new_dims[i], (int)PyArray_DIM(op, idim));
return -1;
}
new_dims[i] = PyArray_DIM(op, idim);
@@ -2086,80 +2085,108 @@ get_single_op_view(PyArrayObject *op, int iop, char *labels,
return 0;
}
+
+/*
+ * The char type may be either signed or unsigned, we need it to be
+ * signed here.
+ */
+static int
+_any_labels_are_negative(signed char *labels, int ndim)
+{
+ int idim;
+
+ for (idim = 0; idim < ndim; ++idim) {
+ if (labels[idim] < 0) {
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Given the labels for an operand array, returns a view of the array
+ * with all repeated labels collapsed into a single dimension along
+ * the corresponding diagonal. The labels are also updated to match
+ * the dimensions of the new array. If no label is repeated, the
+ * original array is reference increased and returned unchanged.
+ */
static PyArrayObject *
get_combined_dims_view(PyArrayObject *op, int iop, char *labels)
{
npy_intp new_strides[NPY_MAXDIMS];
npy_intp new_dims[NPY_MAXDIMS];
- int idim, ndim, icombine, combineoffset;
+ int idim, icombine;
int icombinemap[NPY_MAXDIMS];
-
+ int ndim = PyArray_NDIM(op);
PyArrayObject *ret = NULL;
- ndim = PyArray_NDIM(op);
+ /* A fast path to avoid unnecessary calculations. */
+ if (!_any_labels_are_negative((signed char *)labels, ndim)) {
+ Py_INCREF(op);
- /* Initialize the dimensions and strides to zero */
- for (idim = 0; idim < ndim; ++idim) {
- new_dims[idim] = 0;
- new_strides[idim] = 0;
+ return op;
}
- /* Copy the dimensions and strides, except when collapsing */
+ /* Combine repeated labels. */
icombine = 0;
- for (idim = 0; idim < ndim; ++idim) {
+ for(idim = 0; idim < ndim; ++idim) {
/*
* The char type may be either signed or unsigned, we
* need it to be signed here.
*/
int label = (signed char)labels[idim];
- /* If this label says to merge axes, get the actual label */
- if (label < 0) {
- combineoffset = label;
- label = labels[idim+label];
- }
- else {
- combineoffset = 0;
- if (icombine != idim) {
- labels[icombine] = labels[idim];
- }
+ npy_intp dim = PyArray_DIM(op, idim);
+ npy_intp stride = PyArray_STRIDE(op, idim);
+
+ /* A label seen for the first time, add it to the op view. */
+ if (label >= 0) {
+ /*
+ * icombinemap maps dimensions in the original array to
+ * their position in the combined dimensions view.
+ */
icombinemap[idim] = icombine;
+ new_dims[icombine] = dim;
+ new_strides[icombine] = stride;
+ ++icombine;
}
- /* If the label is 0, it's an unlabeled broadcast dimension */
- if (label == 0) {
- new_dims[icombine] = PyArray_DIM(op, idim);
- new_strides[icombine] = PyArray_STRIDE(op, idim);
- }
+ /* A repeated label, find the original one and merge them. */
else {
- /* Update the combined axis dimensions and strides */
- int i = icombinemap[idim + combineoffset];
- if (combineoffset < 0 && new_dims[i] != 0 &&
- new_dims[i] != PyArray_DIM(op, idim)) {
+ int i = icombinemap[idim + label];
+
+ icombinemap[idim] = -1;
+ if (new_dims[i] != dim) {
+ char orig_label = labels[idim + label];
PyErr_Format(PyExc_ValueError,
- "dimensions in operand %d for collapsing "
- "index '%c' don't match (%d != %d)",
- iop, label, (int)new_dims[i],
- (int)PyArray_DIM(op, idim));
+ "dimensions in operand %d for collapsing "
+ "index '%c' don't match (%d != %d)",
+ iop, orig_label, (int)new_dims[i], (int)dim);
return NULL;
}
- new_dims[i] = PyArray_DIM(op, idim);
- new_strides[i] += PyArray_STRIDE(op, idim);
+ new_strides[i] += stride;
}
+ }
- /* If the label didn't say to combine axes, increment dest i */
- if (combineoffset == 0) {
- icombine++;
+ /* Overwrite labels to match the new operand view. */
+ for (idim = 0; idim < ndim; ++idim) {
+ int i = icombinemap[idim];
+
+ if (i >= 0) {
+ labels[i] = labels[idim];
}
}
- /* The compressed number of dimensions */
+ /* The number of dimensions of the combined view. */
ndim = icombine;
+ /* Create a view of the operand with the compressed dimensions. */
Py_INCREF(PyArray_DESCR(op));
ret = (PyArrayObject *)PyArray_NewFromDescrAndBase(
Py_TYPE(op), PyArray_DESCR(op),
ndim, new_dims, new_strides, PyArray_DATA(op),
PyArray_ISWRITEABLE(op) ? NPY_ARRAY_WRITEABLE : 0,
(PyObject *)op, (PyObject *)op);
+
return ret;
}
@@ -2620,6 +2647,24 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
return NULL;
}
+ /*
+ * If there's just one operand and no output parameter,
+ * first try remapping the axes to the output to return
+ * a view instead of a copy.
+ */
+ if (nop == 1 && out == NULL) {
+ ret = NULL;
+
+ if (get_single_op_view(op_in[0], op_labels[0], ndim_output,
+ output_labels, &ret) < 0) {
+ return NULL;
+ }
+
+ if (ret != NULL) {
+ return ret;
+ }
+ }
+
/* Set all the op references to NULL */
for (iop = 0; iop < nop; ++iop) {
op[iop] = NULL;
@@ -2631,53 +2676,10 @@ PyArray_EinsteinSum(char *subscripts, npy_intp nop,
*/
for (iop = 0; iop < nop; ++iop) {
char *labels = op_labels[iop];
- int combine, ndim;
-
- ndim = PyArray_NDIM(op_in[iop]);
- /*
- * If there's just one operand and no output parameter,
- * first try remapping the axes to the output to return
- * a view instead of a copy.
- */
- if (iop == 0 && nop == 1 && out == NULL) {
- ret = NULL;
-
- if (get_single_op_view(op_in[iop], iop, labels,
- ndim_output, output_labels,
- &ret) < 0) {
- return NULL;
- }
-
- if (ret != NULL) {
- return ret;
- }
- }
-
- /*
- * Check whether any dimensions need to be combined
- *
- * The char type may be either signed or unsigned, we
- * need it to be signed here.
- */
- combine = 0;
- for (idim = 0; idim < ndim; ++idim) {
- if ((signed char)labels[idim] < 0) {
- combine = 1;
- }
- }
-
- /* If any dimensions are combined, create a view which combines them */
- if (combine) {
- op[iop] = get_combined_dims_view(op_in[iop], iop, labels);
- if (op[iop] == NULL) {
- goto fail;
- }
- }
- /* No combining needed */
- else {
- Py_INCREF(op_in[iop]);
- op[iop] = op_in[iop];
+ op[iop] = get_combined_dims_view(op_in[iop], iop, labels);
+ if (op[iop] == NULL) {
+ goto fail;
}
}
diff --git a/numpy/core/src/multiarray/flagsobject.c b/numpy/core/src/multiarray/flagsobject.c
index 85ea49fb4..a66b9d40d 100644
--- a/numpy/core/src/multiarray/flagsobject.c
+++ b/numpy/core/src/multiarray/flagsobject.c
@@ -7,6 +7,7 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
+#include "arrayobject.h"
#include "numpy/arrayscalars.h"
#include "npy_config.h"
@@ -147,7 +148,7 @@ _UpdateContiguousFlags(PyArrayObject *ap)
if (PyArray_STRIDES(ap)[i] != sd) {
is_c_contig = 0;
break;
- }
+ }
/* contiguous, if it got this far */
if (dim == 0) {
break;
@@ -201,21 +202,36 @@ arrayflags_dealloc(PyArrayFlagsObject *self)
static PyObject * \
arrayflags_ ## lower ## _get(PyArrayFlagsObject *self) \
{ \
- PyObject *item; \
- item = ((self->flags & (UPPER)) == (UPPER)) ? Py_True : Py_False; \
- Py_INCREF(item); \
- return item; \
+ return PyBool_FromLong((self->flags & (UPPER)) == (UPPER)); \
+ }
+
+static char *msg = "future versions will not create a writeable "
+ "array from broadcast_array. Set the writable flag explicitly to "
+ "avoid this warning.";
+
+#define _define_get_warn(UPPER, lower) \
+ static PyObject * \
+ arrayflags_ ## lower ## _get(PyArrayFlagsObject *self) \
+ { \
+ if (self->flags & NPY_ARRAY_WARN_ON_WRITE) { \
+ if (PyErr_Warn(PyExc_FutureWarning, msg) < 0) {\
+ return NULL; \
+ } \
+ }\
+ return PyBool_FromLong((self->flags & (UPPER)) == (UPPER)); \
}
+
_define_get(NPY_ARRAY_C_CONTIGUOUS, contiguous)
_define_get(NPY_ARRAY_F_CONTIGUOUS, fortran)
_define_get(NPY_ARRAY_WRITEBACKIFCOPY, writebackifcopy)
_define_get(NPY_ARRAY_OWNDATA, owndata)
_define_get(NPY_ARRAY_ALIGNED, aligned)
-_define_get(NPY_ARRAY_WRITEABLE, writeable)
-_define_get(NPY_ARRAY_ALIGNED|
+_define_get(NPY_ARRAY_WRITEABLE, writeable_no_warn)
+_define_get_warn(NPY_ARRAY_WRITEABLE, writeable)
+_define_get_warn(NPY_ARRAY_ALIGNED|
NPY_ARRAY_WRITEABLE, behaved)
-_define_get(NPY_ARRAY_ALIGNED|
+_define_get_warn(NPY_ARRAY_ALIGNED|
NPY_ARRAY_WRITEABLE|
NPY_ARRAY_C_CONTIGUOUS, carray)
@@ -398,6 +414,40 @@ arrayflags_writeable_set(PyArrayFlagsObject *self, PyObject *obj)
return 0;
}
+static int
+arrayflags_warn_on_write_set(PyArrayFlagsObject *self, PyObject *obj)
+{
+ /*
+ * This code should go away in a future release, so do not mangle the
+ * array_setflags function with an extra kwarg
+ */
+ int ret;
+ if (obj == NULL) {
+ PyErr_SetString(PyExc_AttributeError,
+ "Cannot delete flags _warn_on_write attribute");
+ return -1;
+ }
+ ret = PyObject_IsTrue(obj);
+ if (ret > 0) {
+ if (!(PyArray_FLAGS((PyArrayObject*)self->arr) & NPY_ARRAY_WRITEABLE)) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot set '_warn_on_write' flag when 'writable' is "
+ "False");
+ return -1;
+ }
+ PyArray_ENABLEFLAGS((PyArrayObject*)self->arr, NPY_ARRAY_WARN_ON_WRITE);
+ }
+ else if (ret < 0) {
+ return -1;
+ }
+ else {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot clear '_warn_on_write', set "
+ "writeable True to clear this private flag");
+ return -1;
+ }
+ return 0;
+}
static PyGetSetDef arrayflags_getsets[] = {
{"contiguous",
@@ -436,6 +486,14 @@ static PyGetSetDef arrayflags_getsets[] = {
(getter)arrayflags_writeable_get,
(setter)arrayflags_writeable_set,
NULL, NULL},
+ {"_writeable_no_warn",
+ (getter)arrayflags_writeable_no_warn_get,
+ (setter)NULL,
+ NULL, NULL},
+ {"_warn_on_write",
+ (getter)NULL,
+ (setter)arrayflags_warn_on_write_set,
+ NULL, NULL},
{"fnc",
(getter)arrayflags_fnc_get,
NULL,
@@ -623,7 +681,7 @@ arrayflags_setitem(PyArrayFlagsObject *self, PyObject *ind, PyObject *item)
((n==1) && (strncmp(key, "U", n) == 0))) {
return arrayflags_updateifcopy_set(self, item);
}
- else if (((n==14) && (strncmp(key, "WRITEBACKIFCOPY", n) == 0)) ||
+ else if (((n==15) && (strncmp(key, "WRITEBACKIFCOPY", n) == 0)) ||
((n==1) && (strncmp(key, "X", n) == 0))) {
return arrayflags_writebackifcopy_set(self, item);
}
@@ -648,19 +706,25 @@ static PyObject *
arrayflags_print(PyArrayFlagsObject *self)
{
int fl = self->flags;
+ const char *_warn_on_write = "";
+ if (fl & NPY_ARRAY_WARN_ON_WRITE) {
+ _warn_on_write = " (with WARN_ON_WRITE=True)";
+ }
return PyUString_FromFormat(
" %s : %s\n %s : %s\n"
+ " %s : %s\n %s : %s%s\n"
" %s : %s\n %s : %s\n"
- " %s : %s\n %s : %s\n"
- " %s : %s",
+ " %s : %s\n",
"C_CONTIGUOUS", _torf_(fl, NPY_ARRAY_C_CONTIGUOUS),
"F_CONTIGUOUS", _torf_(fl, NPY_ARRAY_F_CONTIGUOUS),
"OWNDATA", _torf_(fl, NPY_ARRAY_OWNDATA),
"WRITEABLE", _torf_(fl, NPY_ARRAY_WRITEABLE),
+ _warn_on_write,
"ALIGNED", _torf_(fl, NPY_ARRAY_ALIGNED),
"WRITEBACKIFCOPY", _torf_(fl, NPY_ARRAY_WRITEBACKIFCOPY),
- "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY));
+ "UPDATEIFCOPY", _torf_(fl, NPY_ARRAY_UPDATEIFCOPY)
+ );
}
static int
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index cae4273ff..116e37ce5 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -20,6 +20,7 @@
#include "arrayobject.h"
#include "mem_overlap.h"
#include "alloc.h"
+#include "buffer.h"
/******************* array attribute get and set routines ******************/
@@ -72,15 +73,17 @@ array_shape_set(PyArrayObject *self, PyObject *val)
((PyArrayObject_fields *)self)->nd = nd;
if (nd > 0) {
/* create new dimensions and strides */
- ((PyArrayObject_fields *)self)->dimensions = npy_alloc_cache_dim(3*nd);
+ ((PyArrayObject_fields *)self)->dimensions = npy_alloc_cache_dim(2 * nd);
if (PyArray_DIMS(self) == NULL) {
Py_DECREF(ret);
PyErr_SetString(PyExc_MemoryError,"");
return -1;
}
((PyArrayObject_fields *)self)->strides = PyArray_DIMS(self) + nd;
- memcpy(PyArray_DIMS(self), PyArray_DIMS(ret), nd*sizeof(npy_intp));
- memcpy(PyArray_STRIDES(self), PyArray_STRIDES(ret), nd*sizeof(npy_intp));
+ if (nd) {
+ memcpy(PyArray_DIMS(self), PyArray_DIMS(ret), nd*sizeof(npy_intp));
+ memcpy(PyArray_STRIDES(self), PyArray_STRIDES(ret), nd*sizeof(npy_intp));
+ }
}
else {
((PyArrayObject_fields *)self)->dimensions = NULL;
@@ -143,6 +146,7 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
offset = PyArray_BYTES(self) - (char *)view.buf;
numbytes = view.len + offset;
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info((PyObject*)new);
}
#else
if (PyArray_BASE(new) &&
@@ -170,7 +174,9 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
"compatible with available memory");
goto fail;
}
- memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
+ if (newstrides.len) {
+ memcpy(PyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp)*newstrides.len);
+ }
PyArray_UpdateFlags(self, NPY_ARRAY_C_CONTIGUOUS | NPY_ARRAY_F_CONTIGUOUS |
NPY_ARRAY_ALIGNED);
npy_free_cache_dim_obj(newstrides);
@@ -184,14 +190,9 @@ array_strides_set(PyArrayObject *self, PyObject *obj)
static PyObject *
-array_priority_get(PyArrayObject *self)
+array_priority_get(PyArrayObject *NPY_UNUSED(self))
{
- if (PyArray_CheckExact(self)) {
- return PyFloat_FromDouble(NPY_PRIORITY);
- }
- else {
- return PyFloat_FromDouble(NPY_PRIORITY);
- }
+ return PyFloat_FromDouble(NPY_PRIORITY);
}
static PyObject *
@@ -376,6 +377,7 @@ array_data_set(PyArrayObject *self, PyObject *op)
* sticks around after the release.
*/
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info(op);
#else
if (PyObject_AsWriteBuffer(op, &buf, &buf_len) < 0) {
PyErr_Clear();
@@ -666,8 +668,10 @@ array_struct_get(PyArrayObject *self)
return PyErr_NoMemory();
}
inter->strides = inter->shape + PyArray_NDIM(self);
- memcpy(inter->shape, PyArray_DIMS(self), sizeof(npy_intp)*PyArray_NDIM(self));
- memcpy(inter->strides, PyArray_STRIDES(self), sizeof(npy_intp)*PyArray_NDIM(self));
+ if (PyArray_NDIM(self)) {
+ memcpy(inter->shape, PyArray_DIMS(self), sizeof(npy_intp)*PyArray_NDIM(self));
+ memcpy(inter->strides, PyArray_STRIDES(self), sizeof(npy_intp)*PyArray_NDIM(self));
+ }
}
else {
inter->shape = NULL;
diff --git a/numpy/core/src/multiarray/hashdescr.c b/numpy/core/src/multiarray/hashdescr.c
index 8465093b9..0b23b6c21 100644
--- a/numpy/core/src/multiarray/hashdescr.c
+++ b/numpy/core/src/multiarray/hashdescr.c
@@ -36,17 +36,17 @@ static int _array_descr_builtin(PyArray_Descr* descr, PyObject *l);
*/
static char _normalize_byteorder(char byteorder)
{
- switch(byteorder) {
- case '=':
- if (PyArray_GetEndianness() == NPY_CPU_BIG) {
- return '>';
- }
- else {
- return '<';
- }
- default:
- return byteorder;
- }
+ switch(byteorder) {
+ case '=':
+ if (PyArray_GetEndianness() == NPY_CPU_BIG) {
+ return '>';
+ }
+ else {
+ return '<';
+ }
+ default:
+ return byteorder;
+ }
}
/*
diff --git a/numpy/core/src/multiarray/item_selection.c b/numpy/core/src/multiarray/item_selection.c
index de54ca1b3..a6ac902d3 100644
--- a/numpy/core/src/multiarray/item_selection.c
+++ b/numpy/core/src/multiarray/item_selection.c
@@ -45,7 +45,7 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
indices = NULL;
self = (PyArrayObject *)PyArray_CheckAxis(self0, &axis,
- NPY_ARRAY_CARRAY);
+ NPY_ARRAY_CARRAY_RO);
if (self == NULL) {
return NULL;
}
@@ -98,6 +98,10 @@ PyArray_TakeFrom(PyArrayObject *self0, PyObject *indices0, int axis,
goto fail;
}
+ if (arrays_overlap(out, self)) {
+ flags |= NPY_ARRAY_ENSURECOPY;
+ }
+
if (clipmode == NPY_RAISE) {
/*
* we need to make sure and get a copy
@@ -261,6 +265,7 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
npy_intp i, chunk, ni, max_item, nv, tmp;
char *src, *dest;
int copied = 0;
+ int overlap = 0;
indices = NULL;
values = NULL;
@@ -274,24 +279,6 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
return NULL;
}
- if (!PyArray_ISCONTIGUOUS(self)) {
- PyArrayObject *obj;
- int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY;
-
- if (clipmode == NPY_RAISE) {
- flags |= NPY_ARRAY_ENSURECOPY;
- }
- Py_INCREF(PyArray_DESCR(self));
- obj = (PyArrayObject *)PyArray_FromArray(self,
- PyArray_DESCR(self), flags);
- if (obj != self) {
- copied = 1;
- }
- self = obj;
- }
- max_item = PyArray_SIZE(self);
- dest = PyArray_DATA(self);
- chunk = PyArray_DESCR(self)->elsize;
indices = (PyArrayObject *)PyArray_ContiguousFromAny(indices0,
NPY_INTP, 0, 0);
if (indices == NULL) {
@@ -308,6 +295,25 @@ PyArray_PutTo(PyArrayObject *self, PyObject* values0, PyObject *indices0,
if (nv <= 0) {
goto finish;
}
+
+ overlap = arrays_overlap(self, values) || arrays_overlap(self, indices);
+ if (overlap || !PyArray_ISCONTIGUOUS(self)) {
+ PyArrayObject *obj;
+ int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY |
+ NPY_ARRAY_ENSURECOPY;
+
+ Py_INCREF(PyArray_DESCR(self));
+ obj = (PyArrayObject *)PyArray_FromArray(self,
+ PyArray_DESCR(self), flags);
+ if (obj != self) {
+ copied = 1;
+ }
+ self = obj;
+ }
+ max_item = PyArray_SIZE(self);
+ dest = PyArray_DATA(self);
+ chunk = PyArray_DESCR(self)->elsize;
+
if (PyDataType_REFCHK(PyArray_DESCR(self))) {
switch(clipmode) {
case NPY_RAISE:
@@ -434,10 +440,11 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
PyArray_FastPutmaskFunc *func;
PyArrayObject *mask, *values;
PyArray_Descr *dtype;
- npy_intp i, j, chunk, ni, max_item, nv;
+ npy_intp i, j, chunk, ni, nv;
char *src, *dest;
npy_bool *mask_data;
int copied = 0;
+ int overlap = 0;
mask = NULL;
values = NULL;
@@ -447,29 +454,14 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
"be an array");
return NULL;
}
- if (!PyArray_ISCONTIGUOUS(self)) {
- PyArrayObject *obj;
- dtype = PyArray_DESCR(self);
- Py_INCREF(dtype);
- obj = (PyArrayObject *)PyArray_FromArray(self, dtype,
- NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY);
- if (obj != self) {
- copied = 1;
- }
- self = obj;
- }
-
- max_item = PyArray_SIZE(self);
- dest = PyArray_DATA(self);
- chunk = PyArray_DESCR(self)->elsize;
mask = (PyArrayObject *)PyArray_FROM_OTF(mask0, NPY_BOOL,
NPY_ARRAY_CARRAY | NPY_ARRAY_FORCECAST);
if (mask == NULL) {
goto fail;
}
ni = PyArray_SIZE(mask);
- if (ni != max_item) {
+ if (ni != PyArray_SIZE(self)) {
PyErr_SetString(PyExc_ValueError,
"putmask: mask and data must be "
"the same size");
@@ -491,6 +483,27 @@ PyArray_PutMask(PyArrayObject *self, PyObject* values0, PyObject* mask0)
}
src = PyArray_DATA(values);
+ overlap = arrays_overlap(self, values) || arrays_overlap(self, mask);
+ if (overlap || !PyArray_ISCONTIGUOUS(self)) {
+ int flags = NPY_ARRAY_CARRAY | NPY_ARRAY_WRITEBACKIFCOPY;
+ PyArrayObject *obj;
+
+ if (overlap) {
+ flags |= NPY_ARRAY_ENSURECOPY;
+ }
+
+ dtype = PyArray_DESCR(self);
+ Py_INCREF(dtype);
+ obj = (PyArrayObject *)PyArray_FromArray(self, dtype, flags);
+ if (obj != self) {
+ copied = 1;
+ }
+ self = obj;
+ }
+
+ chunk = PyArray_DESCR(self)->elsize;
+ dest = PyArray_DATA(self);
+
if (PyDataType_REFCHK(PyArray_DESCR(self))) {
for (i = 0, j = 0; i < ni; i++, j++) {
if (j >= nv) {
@@ -594,7 +607,8 @@ PyArray_Repeat(PyArrayObject *aop, PyObject *op, int axis)
else {
for (j = 0; j < n; j++) {
if (counts[j] < 0) {
- PyErr_SetString(PyExc_ValueError, "count < 0");
+ PyErr_SetString(PyExc_ValueError,
+ "repeats may not contain negative values.");
goto fail;
}
total += counts[j];
@@ -712,6 +726,13 @@ PyArray_Choose(PyArrayObject *ip, PyObject *op, PyArrayObject *out,
"choose: invalid shape for output array.");
goto fail;
}
+
+ for (i = 0; i < n; i++) {
+ if (arrays_overlap(out, mps[i])) {
+ flags |= NPY_ARRAY_ENSURECOPY;
+ }
+ }
+
if (clipmode == NPY_RAISE) {
/*
* we need to make sure and get a copy
@@ -1105,7 +1126,7 @@ fail:
NPY_NO_EXPORT int
PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
- PyArray_SortFunc *sort;
+ PyArray_SortFunc *sort = NULL;
int n = PyArray_NDIM(op);
if (check_and_adjust_axis(&axis, n) < 0) {
@@ -1122,6 +1143,7 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
}
sort = PyArray_DESCR(op)->f->sort[which];
+
if (sort == NULL) {
if (PyArray_DESCR(op)->f->compare) {
switch (which) {
@@ -1132,8 +1154,8 @@ PyArray_Sort(PyArrayObject *op, int axis, NPY_SORTKIND which)
case NPY_HEAPSORT:
sort = npy_heapsort;
break;
- case NPY_MERGESORT:
- sort = npy_mergesort;
+ case NPY_STABLESORT:
+ sort = npy_timsort;
break;
}
}
@@ -1263,16 +1285,11 @@ NPY_NO_EXPORT PyObject *
PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
{
PyArrayObject *op2;
- PyArray_ArgSortFunc *argsort;
+ PyArray_ArgSortFunc *argsort = NULL;
PyObject *ret;
- if (which < 0 || which >= NPY_NSORTS) {
- PyErr_SetString(PyExc_ValueError,
- "not a valid sort kind");
- return NULL;
- }
-
argsort = PyArray_DESCR(op)->f->argsort[which];
+
if (argsort == NULL) {
if (PyArray_DESCR(op)->f->compare) {
switch (which) {
@@ -1283,8 +1300,8 @@ PyArray_ArgSort(PyArrayObject *op, int axis, NPY_SORTKIND which)
case NPY_HEAPSORT:
argsort = npy_aheapsort;
break;
- case NPY_MERGESORT:
- argsort = npy_amergesort;
+ case NPY_STABLESORT:
+ argsort = npy_atimsort;
break;
}
}
@@ -1319,7 +1336,11 @@ PyArray_ArgPartition(PyArrayObject *op, PyArrayObject *ktharray, int axis,
PyArray_ArgSortFunc *argsort;
PyObject *ret;
- if (which < 0 || which >= NPY_NSELECTS) {
+ /*
+ * As a C-exported function, enum NPY_SELECTKIND loses its enum property
+ * Check the values to make sure they are in range
+ */
+ if ((int)which < 0 || (int)which >= NPY_NSELECTS) {
PyErr_SetString(PyExc_ValueError,
"not a valid partition kind");
return NULL;
@@ -1425,7 +1446,7 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
goto fail;
}
}
- if (!PyArray_DESCR(mps[i])->f->argsort[NPY_MERGESORT]
+ if (!PyArray_DESCR(mps[i])->f->argsort[NPY_STABLESORT]
&& !PyArray_DESCR(mps[i])->f->compare) {
PyErr_Format(PyExc_TypeError,
"item %zd type does not have compare function", i);
@@ -1439,8 +1460,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
/* Now we can check the axis */
nd = PyArray_NDIM(mps[0]);
- if ((nd == 0) || (PyArray_SIZE(mps[0]) == 1)) {
- /* single element case */
+ if ((nd == 0) || (PyArray_SIZE(mps[0]) <= 1)) {
+ /* empty/single element case */
ret = (PyArrayObject *)PyArray_NewFromDescr(
&PyArray_Type, PyArray_DescrFromType(NPY_INTP),
PyArray_NDIM(mps[0]), PyArray_DIMS(mps[0]), NULL, NULL,
@@ -1449,7 +1470,9 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
if (ret == NULL) {
goto fail;
}
- *((npy_intp *)(PyArray_DATA(ret))) = 0;
+ if (PyArray_SIZE(mps[0]) > 0) {
+ *((npy_intp *)(PyArray_DATA(ret))) = 0;
+ }
goto finish;
}
if (check_and_adjust_axis(&axis, nd) < 0) {
@@ -1499,16 +1522,28 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
char *valbuffer, *indbuffer;
int *swaps;
- valbuffer = PyDataMem_NEW(N * maxelsize);
+ assert(N > 0); /* Guaranteed and assumed by indbuffer */
+ npy_intp valbufsize = N * maxelsize;
+ if (NPY_UNLIKELY(valbufsize) == 0) {
+ valbufsize = 1; /* Ensure allocation is not empty */
+ }
+
+ valbuffer = PyDataMem_NEW(valbufsize);
if (valbuffer == NULL) {
goto fail;
}
indbuffer = PyDataMem_NEW(N * sizeof(npy_intp));
if (indbuffer == NULL) {
+ PyDataMem_FREE(valbuffer);
+ goto fail;
+ }
+ swaps = malloc(NPY_LIKELY(n > 0) ? n * sizeof(int) : 1);
+ if (swaps == NULL) {
+ PyDataMem_FREE(valbuffer);
PyDataMem_FREE(indbuffer);
goto fail;
}
- swaps = malloc(n*sizeof(int));
+
for (j = 0; j < n; j++) {
swaps[j] = PyArray_ISBYTESWAPPED(mps[j]);
}
@@ -1521,9 +1556,9 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
int rcode;
elsize = PyArray_DESCR(mps[j])->elsize;
astride = PyArray_STRIDES(mps[j])[axis];
- argsort = PyArray_DESCR(mps[j])->f->argsort[NPY_MERGESORT];
+ argsort = PyArray_DESCR(mps[j])->f->argsort[NPY_STABLESORT];
if(argsort == NULL) {
- argsort = npy_amergesort;
+ argsort = npy_atimsort;
}
_unaligned_strided_byte_copy(valbuffer, (npy_intp) elsize,
its[j]->dataptr, astride, N, elsize);
@@ -1537,8 +1572,8 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
#else
if (rcode < 0) {
#endif
- npy_free_cache(valbuffer, N * maxelsize);
- npy_free_cache(indbuffer, N * sizeof(npy_intp));
+ PyDataMem_FREE(valbuffer);
+ PyDataMem_FREE(indbuffer);
free(swaps);
goto fail;
}
@@ -1560,9 +1595,9 @@ PyArray_LexSort(PyObject *sort_keys, int axis)
}
for (j = 0; j < n; j++) {
int rcode;
- argsort = PyArray_DESCR(mps[j])->f->argsort[NPY_MERGESORT];
+ argsort = PyArray_DESCR(mps[j])->f->argsort[NPY_STABLESORT];
if(argsort == NULL) {
- argsort = npy_amergesort;
+ argsort = npy_atimsort;
}
rcode = argsort(its[j]->dataptr,
(npy_intp *)rit->dataptr, N, mps[j]);
@@ -2183,14 +2218,59 @@ PyArray_Nonzero(PyArrayObject *self)
PyArrayObject *ret = NULL;
PyObject *ret_tuple;
npy_intp ret_dims[2];
- PyArray_NonzeroFunc *nonzero = PyArray_DESCR(self)->f->nonzero;
+
+ PyArray_NonzeroFunc *nonzero;
+ PyArray_Descr *dtype;
+
npy_intp nonzero_count;
+ npy_intp added_count = 0;
+ int needs_api;
+ int is_bool;
NpyIter *iter;
NpyIter_IterNextFunc *iternext;
NpyIter_GetMultiIndexFunc *get_multi_index;
char **dataptr;
- int is_empty = 0;
+
+ dtype = PyArray_DESCR(self);
+ nonzero = dtype->f->nonzero;
+ needs_api = PyDataType_FLAGCHK(dtype, NPY_NEEDS_PYAPI);
+
+ /* Special case - nonzero(zero_d) is nonzero(atleast_1d(zero_d)) */
+ if (ndim == 0) {
+ char const* msg;
+ if (PyArray_ISBOOL(self)) {
+ msg =
+ "Calling nonzero on 0d arrays is deprecated, as it behaves "
+ "surprisingly. Use `atleast_1d(cond).nonzero()` if the old "
+ "behavior was intended. If the context of this warning is of "
+ "the form `arr[nonzero(cond)]`, just use `arr[cond]`.";
+ }
+ else {
+ msg =
+ "Calling nonzero on 0d arrays is deprecated, as it behaves "
+ "surprisingly. Use `atleast_1d(arr).nonzero()` if the old "
+ "behavior was intended.";
+ }
+ if (DEPRECATE(msg) < 0) {
+ return NULL;
+ }
+
+ static npy_intp const zero_dim_shape[1] = {1};
+ static npy_intp const zero_dim_strides[1] = {0};
+
+ Py_INCREF(PyArray_DESCR(self)); /* array creation steals reference */
+ PyArrayObject *self_1d = (PyArrayObject *)PyArray_NewFromDescrAndBase(
+ Py_TYPE(self), PyArray_DESCR(self),
+ 1, zero_dim_shape, zero_dim_strides, PyArray_BYTES(self),
+ PyArray_FLAGS(self), (PyObject *)self, (PyObject *)self);
+ if (self_1d == NULL) {
+ return NULL;
+ }
+ ret_tuple = PyArray_Nonzero(self_1d);
+ Py_DECREF(self_1d);
+ return ret_tuple;
+ }
/*
* First count the number of non-zeros in 'self'.
@@ -2200,9 +2280,11 @@ PyArray_Nonzero(PyArrayObject *self)
return NULL;
}
+ is_bool = PyArray_ISBOOL(self);
+
/* Allocate the result as a 2D array */
ret_dims[0] = nonzero_count;
- ret_dims[1] = (ndim == 0) ? 1 : ndim;
+ ret_dims[1] = ndim;
ret = (PyArrayObject *)PyArray_NewFromDescr(
&PyArray_Type, PyArray_DescrFromType(NPY_INTP),
2, ret_dims, NULL, NULL,
@@ -2212,11 +2294,11 @@ PyArray_Nonzero(PyArrayObject *self)
}
/* If it's a one-dimensional result, don't use an iterator */
- if (ndim <= 1) {
+ if (ndim == 1) {
npy_intp * multi_index = (npy_intp *)PyArray_DATA(ret);
char * data = PyArray_BYTES(self);
- npy_intp stride = (ndim == 0) ? 0 : PyArray_STRIDE(self, 0);
- npy_intp count = (ndim == 0) ? 1 : PyArray_DIM(self, 0);
+ npy_intp stride = PyArray_STRIDE(self, 0);
+ npy_intp count = PyArray_DIM(self, 0);
NPY_BEGIN_THREADS_DEF;
/* nothing to do */
@@ -2224,10 +2306,12 @@ PyArray_Nonzero(PyArrayObject *self)
goto finish;
}
- NPY_BEGIN_THREADS_THRESHOLDED(count);
+ if (!needs_api) {
+ NPY_BEGIN_THREADS_THRESHOLDED(count);
+ }
/* avoid function call for bool */
- if (PyArray_ISBOOL(self)) {
+ if (is_bool) {
/*
* use fast memchr variant for sparse data, see gh-4370
* the fast bool count is followed by this sparse path is faster
@@ -2260,8 +2344,14 @@ PyArray_Nonzero(PyArrayObject *self)
npy_intp j;
for (j = 0; j < count; ++j) {
if (nonzero(data, self)) {
+ if (++added_count > nonzero_count) {
+ break;
+ }
*multi_index++ = j;
}
+ if (needs_api && PyErr_Occurred()) {
+ break;
+ }
data += stride;
}
}
@@ -2302,6 +2392,8 @@ PyArray_Nonzero(PyArrayObject *self)
Py_DECREF(ret);
return NULL;
}
+
+ needs_api = NpyIter_IterationNeedsAPI(iter);
NPY_BEGIN_THREADS_NDITER(iter);
@@ -2310,7 +2402,7 @@ PyArray_Nonzero(PyArrayObject *self)
multi_index = (npy_intp *)PyArray_DATA(ret);
/* Get the multi-index for each non-zero element */
- if (PyArray_ISBOOL(self)) {
+ if (is_bool) {
/* avoid function call for bool */
do {
if (**dataptr != 0) {
@@ -2322,9 +2414,15 @@ PyArray_Nonzero(PyArrayObject *self)
else {
do {
if (nonzero(*dataptr, self)) {
+ if (++added_count > nonzero_count) {
+ break;
+ }
get_multi_index(iter, multi_index);
multi_index += ndim;
}
+ if (needs_api && PyErr_Occurred()) {
+ break;
+ }
} while(iternext(iter));
}
@@ -2334,9 +2432,18 @@ PyArray_Nonzero(PyArrayObject *self)
NpyIter_Deallocate(iter);
finish:
- /* Treat zero-dimensional as shape (1,) */
- if (ndim == 0) {
- ndim = 1;
+ if (PyErr_Occurred()) {
+ Py_DECREF(ret);
+ return NULL;
+ }
+
+ /* if executed `nonzero()` check for miscount due to side-effect */
+ if (!is_bool && added_count != nonzero_count) {
+ PyErr_SetString(PyExc_RuntimeError,
+ "number of non-zero array elements "
+ "changed during function execution.");
+ Py_DECREF(ret);
+ return NULL;
}
ret_tuple = PyTuple_New(ndim);
@@ -2345,18 +2452,11 @@ finish:
return NULL;
}
- for (i = 0; i < PyArray_NDIM(ret); ++i) {
- if (PyArray_DIMS(ret)[i] == 0) {
- is_empty = 1;
- break;
- }
- }
-
/* Create views into ret, one for each dimension */
for (i = 0; i < ndim; ++i) {
npy_intp stride = ndim * NPY_SIZEOF_INTP;
/* the result is an empty array, the view must point to valid memory */
- npy_intp data_offset = is_empty ? 0 : i * NPY_SIZEOF_INTP;
+ npy_intp data_offset = nonzero_count == 0 ? 0 : i * NPY_SIZEOF_INTP;
PyArrayObject *view = (PyArrayObject *)PyArray_NewFromDescrAndBase(
Py_TYPE(ret), PyArray_DescrFromType(NPY_INTP),
@@ -2379,7 +2479,7 @@ finish:
* array of values, which must be of length PyArray_NDIM(self).
*/
NPY_NO_EXPORT PyObject *
-PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
+PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index)
{
int idim, ndim = PyArray_NDIM(self);
char *data = PyArray_DATA(self);
@@ -2392,7 +2492,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
npy_intp ind = multi_index[idim];
if (check_and_adjust_index(&ind, shapevalue, idim, NULL) < 0) {
- return NULL;
+ return NULL;
}
data += ind * strides[idim];
}
@@ -2407,7 +2507,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index)
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
+PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index,
PyObject *obj)
{
int idim, ndim = PyArray_NDIM(self);
diff --git a/numpy/core/src/multiarray/item_selection.h b/numpy/core/src/multiarray/item_selection.h
index 90bb5100d..2276b4db7 100644
--- a/numpy/core/src/multiarray/item_selection.h
+++ b/numpy/core/src/multiarray/item_selection.h
@@ -15,7 +15,7 @@ count_boolean_trues(int ndim, char *data, npy_intp *ashape, npy_intp *astrides);
* array of values, which must be of length PyArray_NDIM(self).
*/
NPY_NO_EXPORT PyObject *
-PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index);
+PyArray_MultiIndexGetItem(PyArrayObject *self, const npy_intp *multi_index);
/*
* Sets a single item in the array, based on a single multi-index
@@ -24,7 +24,7 @@ PyArray_MultiIndexGetItem(PyArrayObject *self, npy_intp *multi_index);
* Returns 0 on success, -1 on failure.
*/
NPY_NO_EXPORT int
-PyArray_MultiIndexSetItem(PyArrayObject *self, npy_intp *multi_index,
+PyArray_MultiIndexSetItem(PyArrayObject *self, const npy_intp *multi_index,
PyObject *obj);
#endif
diff --git a/numpy/core/src/multiarray/iterators.c b/numpy/core/src/multiarray/iterators.c
index 3e3248f53..e66bb36aa 100644
--- a/numpy/core/src/multiarray/iterators.c
+++ b/numpy/core/src/multiarray/iterators.c
@@ -92,121 +92,13 @@ parse_index_entry(PyObject *op, npy_intp *step_size,
}
-/*
- * Parses an index that has no fancy indexing. Populates
- * out_dimensions, out_strides, and out_offset.
- */
-NPY_NO_EXPORT int
-parse_index(PyArrayObject *self, PyObject *op,
- npy_intp *out_dimensions,
- npy_intp *out_strides,
- npy_intp *out_offset,
- int check_index)
-{
- int i, j, n;
- int nd_old, nd_new, n_add, n_ellipsis;
- npy_intp n_steps, start, offset, step_size;
- PyObject *op1 = NULL;
- int is_slice;
-
- if (PySlice_Check(op) || op == Py_Ellipsis || op == Py_None) {
- n = 1;
- op1 = op;
- Py_INCREF(op);
- /* this relies on the fact that n==1 for loop below */
- is_slice = 1;
- }
- else {
- if (!PySequence_Check(op)) {
- PyErr_SetString(PyExc_IndexError,
- "index must be either an int "
- "or a sequence");
- return -1;
- }
- n = PySequence_Length(op);
- is_slice = 0;
- }
-
- nd_old = nd_new = 0;
-
- offset = 0;
- for (i = 0; i < n; i++) {
- if (!is_slice) {
- op1 = PySequence_GetItem(op, i);
- if (op1 == NULL) {
- return -1;
- }
- }
- start = parse_index_entry(op1, &step_size, &n_steps,
- nd_old < PyArray_NDIM(self) ?
- PyArray_DIMS(self)[nd_old] : 0,
- nd_old, check_index ?
- nd_old < PyArray_NDIM(self) : 0);
- Py_DECREF(op1);
- if (start == -1) {
- break;
- }
- if (n_steps == NEWAXIS_INDEX) {
- out_dimensions[nd_new] = 1;
- out_strides[nd_new] = 0;
- nd_new++;
- }
- else if (n_steps == ELLIPSIS_INDEX) {
- for (j = i + 1, n_ellipsis = 0; j < n; j++) {
- op1 = PySequence_GetItem(op, j);
- if (op1 == Py_None) {
- n_ellipsis++;
- }
- Py_DECREF(op1);
- }
- n_add = PyArray_NDIM(self)-(n-i-n_ellipsis-1+nd_old);
- if (n_add < 0) {
- PyErr_SetString(PyExc_IndexError, "too many indices");
- return -1;
- }
- for (j = 0; j < n_add; j++) {
- out_dimensions[nd_new] = PyArray_DIMS(self)[nd_old];
- out_strides[nd_new] = PyArray_STRIDES(self)[nd_old];
- nd_new++; nd_old++;
- }
- }
- else {
- if (nd_old >= PyArray_NDIM(self)) {
- PyErr_SetString(PyExc_IndexError, "too many indices");
- return -1;
- }
- offset += PyArray_STRIDES(self)[nd_old]*start;
- nd_old++;
- if (n_steps != SINGLE_INDEX) {
- out_dimensions[nd_new] = n_steps;
- out_strides[nd_new] = step_size *
- PyArray_STRIDES(self)[nd_old-1];
- nd_new++;
- }
- }
- }
- if (i < n) {
- return -1;
- }
- n_add = PyArray_NDIM(self)-nd_old;
- for (j = 0; j < n_add; j++) {
- out_dimensions[nd_new] = PyArray_DIMS(self)[nd_old];
- out_strides[nd_new] = PyArray_STRIDES(self)[nd_old];
- nd_new++;
- nd_old++;
- }
- *out_offset = offset;
- return nd_new;
-}
-
-
/*********************** Element-wise Array Iterator ***********************/
/* Aided by Peter J. Verveer's nd_image package and numpy's arraymap ****/
/* and Python's array iterator ***/
/* get the dataptr from its current coordinates for simple iterator */
static char*
-get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates)
+get_ptr_simple(PyArrayIterObject* iter, const npy_intp *coordinates)
{
npy_intp i;
char *ret;
@@ -224,10 +116,12 @@ get_ptr_simple(PyArrayIterObject* iter, npy_intp *coordinates)
* This is common initialization code between PyArrayIterObject and
* PyArrayNeighborhoodIterObject
*
- * Increase ao refcount
+ * Steals a reference to the array object which gets removed at deallocation,
+ * if the iterator is allocated statically and its dealloc not called, it
+ * can be thought of as borrowing the reference.
*/
-static PyObject *
-array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
+NPY_NO_EXPORT void
+PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao)
{
int nd, i;
@@ -239,7 +133,6 @@ array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
else {
it->contiguous = 0;
}
- Py_INCREF(ao);
it->ao = ao;
it->size = PyArray_SIZE(ao);
it->nd_m1 = nd - 1;
@@ -263,7 +156,7 @@ array_iter_base_init(PyArrayIterObject *it, PyArrayObject *ao)
it->translate = &get_ptr_simple;
PyArray_ITER_RESET(it);
- return (PyObject *)it;
+ return;
}
static void
@@ -278,6 +171,10 @@ array_iter_base_dealloc(PyArrayIterObject *it)
NPY_NO_EXPORT PyObject *
PyArray_IterNew(PyObject *obj)
{
+ /*
+ * Note that internall PyArray_RawIterBaseInit may be called directly on a
+ * statically allocated PyArrayIterObject.
+ */
PyArrayIterObject *it;
PyArrayObject *ao;
@@ -294,7 +191,8 @@ PyArray_IterNew(PyObject *obj)
return NULL;
}
- array_iter_base_init(it, ao);
+ Py_INCREF(ao); /* PyArray_RawIterBaseInit steals a reference */
+ PyArray_RawIterBaseInit(it, ao);
return (PyObject *)it;
}
@@ -498,6 +396,10 @@ arrayiter_next(PyArrayIterObject *it)
static void
arrayiter_dealloc(PyArrayIterObject *it)
{
+ /*
+ * Note that it is possible to statically allocate a PyArrayIterObject,
+ * which does not call this function.
+ */
array_iter_base_dealloc(it);
PyArray_free(it);
}
@@ -647,6 +549,7 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind)
char *dptr;
int size;
PyObject *obj = NULL;
+ PyObject *new;
PyArray_CopySwapFunc *copyswap;
if (ind == Py_Ellipsis) {
@@ -748,36 +651,36 @@ iter_subscript(PyArrayIterObject *self, PyObject *ind)
obj = ind;
}
- if (PyArray_Check(obj)) {
- /* Check for Boolean object */
- if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) {
- ret = iter_subscript_Bool(self, (PyArrayObject *)obj);
- Py_DECREF(indtype);
- }
- /* Check for integer array */
- else if (PyArray_ISINTEGER((PyArrayObject *)obj)) {
- PyObject *new;
- new = PyArray_FromAny(obj, indtype, 0, 0,
- NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL);
- if (new == NULL) {
- goto fail;
- }
- Py_DECREF(obj);
- obj = new;
- new = iter_subscript_int(self, (PyArrayObject *)obj);
- Py_DECREF(obj);
- return new;
- }
- else {
- goto fail;
- }
+ /* Any remaining valid input is an array or has been turned into one */
+ if (!PyArray_Check(obj)) {
+ goto fail;
+ }
+
+ /* Check for Boolean array */
+ if (PyArray_TYPE((PyArrayObject *)obj) == NPY_BOOL) {
+ ret = iter_subscript_Bool(self, (PyArrayObject *)obj);
+ Py_DECREF(indtype);
Py_DECREF(obj);
return (PyObject *)ret;
}
- else {
- Py_DECREF(indtype);
+
+ /* Only integer arrays left */
+ if (!PyArray_ISINTEGER((PyArrayObject *)obj)) {
+ goto fail;
}
+ Py_INCREF(indtype);
+ new = PyArray_FromAny(obj, indtype, 0, 0,
+ NPY_ARRAY_FORCECAST | NPY_ARRAY_ALIGNED, NULL);
+ if (new == NULL) {
+ goto fail;
+ }
+ Py_DECREF(indtype);
+ Py_DECREF(obj);
+ ret = (PyArrayObject *)iter_subscript_int(self, (PyArrayObject *)new);
+ Py_DECREF(new);
+ return (PyObject *)ret;
+
fail:
if (!PyErr_Occurred()) {
@@ -937,7 +840,6 @@ iter_ass_subscript(PyArrayIterObject *self, PyObject *ind, PyObject *val)
if (check_and_adjust_index(&start, self->size, -1, NULL) < 0) {
goto finish;
}
- retval = 0;
PyArray_ITER_GOTO1D(self, start);
retval = type->f->setitem(val, self->dataptr, self->ao);
PyArray_ITER_RESET(self);
@@ -1349,232 +1251,169 @@ PyArray_Broadcast(PyArrayMultiIterObject *mit)
return 0;
}
-/*NUMPY_API
- * Get MultiIterator from array of Python objects and any additional
- *
- * PyObject **mps -- array of PyObjects
- * int n - number of PyObjects in the array
- * int nadd - number of additional arrays to include in the iterator.
- *
- * Returns a multi-iterator object.
+static NPY_INLINE PyObject*
+multiiter_wrong_number_of_args(void)
+{
+ return PyErr_Format(PyExc_ValueError,
+ "Need at least 0 and at most %d "
+ "array objects.", NPY_MAXARGS);
+}
+
+/*
+ * Common implementation for all PyArrayMultiIterObject constructors.
*/
-NPY_NO_EXPORT PyObject *
-PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...)
+static PyObject*
+multiiter_new_impl(int n_args, PyObject **args)
{
- va_list va;
PyArrayMultiIterObject *multi;
- PyObject *current;
- PyObject *arr;
-
- int i, ntot, err=0;
+ int i;
- ntot = n + nadd;
- if (ntot < 1 || ntot > NPY_MAXARGS) {
- PyErr_Format(PyExc_ValueError,
- "Need at least 1 and at most %d "
- "array objects.", NPY_MAXARGS);
- return NULL;
- }
multi = PyArray_malloc(sizeof(PyArrayMultiIterObject));
if (multi == NULL) {
return PyErr_NoMemory();
}
PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type);
+ multi->numiter = 0;
- for (i = 0; i < ntot; i++) {
- multi->iters[i] = NULL;
- }
- multi->numiter = ntot;
- multi->index = 0;
+ for (i = 0; i < n_args; ++i) {
+ PyObject *obj = args[i];
+ PyObject *arr;
+ PyArrayIterObject *it;
- va_start(va, nadd);
- for (i = 0; i < ntot; i++) {
- if (i < n) {
- current = mps[i];
- }
- else {
- current = va_arg(va, PyObject *);
- }
- arr = PyArray_FROM_O(current);
- if (arr == NULL) {
- err = 1;
- break;
+ if (PyObject_IsInstance(obj, (PyObject *)&PyArrayMultiIter_Type)) {
+ PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *)obj;
+ int j;
+
+ if (multi->numiter + mit->numiter > NPY_MAXARGS) {
+ multiiter_wrong_number_of_args();
+ goto fail;
+ }
+ for (j = 0; j < mit->numiter; ++j) {
+ arr = (PyObject *)mit->iters[j]->ao;
+ it = (PyArrayIterObject *)PyArray_IterNew(arr);
+ if (it == NULL) {
+ goto fail;
+ }
+ multi->iters[multi->numiter++] = it;
+ }
}
- else {
- multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr);
- if (multi->iters[i] == NULL) {
- err = 1;
- break;
+ else if (multi->numiter < NPY_MAXARGS) {
+ arr = PyArray_FromAny(obj, NULL, 0, 0, 0, NULL);
+ if (arr == NULL) {
+ goto fail;
}
+ it = (PyArrayIterObject *)PyArray_IterNew(arr);
Py_DECREF(arr);
+ if (it == NULL) {
+ goto fail;
+ }
+ multi->iters[multi->numiter++] = it;
+ }
+ else {
+ multiiter_wrong_number_of_args();
+ goto fail;
}
}
- va_end(va);
- if (!err && PyArray_Broadcast(multi) < 0) {
- err = 1;
+ if (multi->numiter < 0) {
+ multiiter_wrong_number_of_args();
+ goto fail;
}
- if (err) {
- Py_DECREF(multi);
- return NULL;
+ if (PyArray_Broadcast(multi) < 0) {
+ goto fail;
}
PyArray_MultiIter_RESET(multi);
+
return (PyObject *)multi;
+
+fail:
+ Py_DECREF(multi);
+
+ return NULL;
}
/*NUMPY_API
- * Get MultiIterator,
+ * Get MultiIterator from array of Python objects and any additional
+ *
+ * PyObject **mps - array of PyObjects
+ * int n - number of PyObjects in the array
+ * int nadd - number of additional arrays to include in the iterator.
+ *
+ * Returns a multi-iterator object.
*/
-NPY_NO_EXPORT PyObject *
-PyArray_MultiIterNew(int n, ...)
+NPY_NO_EXPORT PyObject*
+PyArray_MultiIterFromObjects(PyObject **mps, int n, int nadd, ...)
{
+ PyObject *args_impl[NPY_MAXARGS];
+ int ntot = n + nadd;
+ int i;
va_list va;
- PyArrayMultiIterObject *multi;
- PyObject *current;
- PyObject *arr;
- int i, err = 0;
-
- if (n < 1 || n > NPY_MAXARGS) {
- PyErr_Format(PyExc_ValueError,
- "Need at least 1 and at most %d "
- "array objects.", NPY_MAXARGS);
- return NULL;
+ if ((ntot > NPY_MAXARGS) || (ntot < 0)) {
+ return multiiter_wrong_number_of_args();
}
- /* fprintf(stderr, "multi new...");*/
+ for (i = 0; i < n; ++i) {
+ args_impl[i] = mps[i];
+ }
- multi = PyArray_malloc(sizeof(PyArrayMultiIterObject));
- if (multi == NULL) {
- return PyErr_NoMemory();
+ va_start(va, nadd);
+ for (; i < ntot; ++i) {
+ args_impl[i] = va_arg(va, PyObject *);
}
- PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type);
+ va_end(va);
- for (i = 0; i < n; i++) {
- multi->iters[i] = NULL;
+ return multiiter_new_impl(ntot, args_impl);
+}
+
+/*NUMPY_API
+ * Get MultiIterator,
+ */
+NPY_NO_EXPORT PyObject*
+PyArray_MultiIterNew(int n, ...)
+{
+ PyObject *args_impl[NPY_MAXARGS];
+ int i;
+ va_list va;
+
+ if ((n > NPY_MAXARGS) || (n < 0)) {
+ return multiiter_wrong_number_of_args();
}
- multi->numiter = n;
- multi->index = 0;
va_start(va, n);
- for (i = 0; i < n; i++) {
- current = va_arg(va, PyObject *);
- arr = PyArray_FROM_O(current);
- if (arr == NULL) {
- err = 1;
- break;
- }
- else {
- multi->iters[i] = (PyArrayIterObject *)PyArray_IterNew(arr);
- if (multi->iters[i] == NULL) {
- err = 1;
- break;
- }
- Py_DECREF(arr);
- }
+ for (i = 0; i < n; ++i) {
+ args_impl[i] = va_arg(va, PyObject *);
}
va_end(va);
- if (!err && PyArray_Broadcast(multi) < 0) {
- err = 1;
- }
- if (err) {
- Py_DECREF(multi);
- return NULL;
- }
- PyArray_MultiIter_RESET(multi);
- return (PyObject *)multi;
+ return multiiter_new_impl(n, args_impl);
}
-static PyObject *
-arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args, PyObject *kwds)
+static PyObject*
+arraymultiter_new(PyTypeObject *NPY_UNUSED(subtype), PyObject *args,
+ PyObject *kwds)
{
+ PyObject *ret, *fast_seq;
+ Py_ssize_t n;
- Py_ssize_t n = 0;
- Py_ssize_t i, j, k;
- PyArrayMultiIterObject *multi;
- PyObject *arr;
-
- if (kwds != NULL) {
+ if (kwds != NULL && PyDict_Size(kwds) > 0) {
PyErr_SetString(PyExc_ValueError,
"keyword arguments not accepted.");
return NULL;
}
- for (j = 0; j < PyTuple_Size(args); ++j) {
- PyObject *obj = PyTuple_GET_ITEM(args, j);
-
- if (PyObject_IsInstance(obj, (PyObject *)&PyArrayMultiIter_Type)) {
- /*
- * If obj is a multi-iterator, all its arrays will be added
- * to the new multi-iterator.
- */
- n += ((PyArrayMultiIterObject *)obj)->numiter;
- }
- else {
- /* If not, will try to convert it to a single array */
- ++n;
- }
- }
- if (n < 1 || n > NPY_MAXARGS) {
- if (PyErr_Occurred()) {
- return NULL;
- }
- PyErr_Format(PyExc_ValueError,
- "Need at least 1 and at most %d "
- "array objects.", NPY_MAXARGS);
+ fast_seq = PySequence_Fast(args, ""); // needed for pypy
+ if (fast_seq == NULL) {
return NULL;
}
-
- multi = PyArray_malloc(sizeof(PyArrayMultiIterObject));
- if (multi == NULL) {
- return PyErr_NoMemory();
- }
- PyObject_Init((PyObject *)multi, &PyArrayMultiIter_Type);
-
- multi->numiter = n;
- multi->index = 0;
- i = 0;
- for (j = 0; j < PyTuple_GET_SIZE(args); ++j) {
- PyObject *obj = PyTuple_GET_ITEM(args, j);
- PyArrayIterObject *it;
-
- if (PyObject_IsInstance(obj, (PyObject *)&PyArrayMultiIter_Type)) {
- PyArrayMultiIterObject *mit = (PyArrayMultiIterObject *)obj;
-
- for (k = 0; k < mit->numiter; ++k) {
- arr = (PyObject *)mit->iters[k]->ao;
- assert (arr != NULL);
- it = (PyArrayIterObject *)PyArray_IterNew(arr);
- if (it == NULL) {
- goto fail;
- }
- multi->iters[i++] = it;
- }
- }
- else {
- arr = PyArray_FROM_O(obj);
- if (arr == NULL) {
- goto fail;
- }
- it = (PyArrayIterObject *)PyArray_IterNew(arr);
- if (it == NULL) {
- goto fail;
- }
- multi->iters[i++] = it;
- Py_DECREF(arr);
- }
- }
- assert (i == n);
- if (PyArray_Broadcast(multi) < 0) {
- goto fail;
+ n = PySequence_Fast_GET_SIZE(fast_seq);
+ if (n > NPY_MAXARGS) {
+ Py_DECREF(fast_seq);
+ return multiiter_wrong_number_of_args();
}
- PyArray_MultiIter_RESET(multi);
- return (PyObject *)multi;
-
- fail:
- Py_DECREF(multi);
- return NULL;
+ ret = multiiter_new_impl(n, PySequence_Fast_ITEMS(fast_seq));
+ Py_DECREF(fast_seq);
+ return ret;
}
static PyObject *
@@ -1826,7 +1665,7 @@ static char* _set_constant(PyArrayNeighborhoodIterObject* iter,
/* set the dataptr from its current coordinates */
static char*
-get_ptr_constant(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_constant(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS];
@@ -1881,7 +1720,7 @@ __npy_pos_remainder(npy_intp i, npy_intp n)
/* set the dataptr from its current coordinates */
static char*
-get_ptr_mirror(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_mirror(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
@@ -1915,7 +1754,7 @@ __npy_euclidean_division(npy_intp i, npy_intp n)
_coordinates[c] = lb + __npy_euclidean_division(bd, p->limits_sizes[c]);
static char*
-get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates)
+get_ptr_circular(PyArrayIterObject* _iter, const npy_intp *coordinates)
{
int i;
npy_intp bd, _coordinates[NPY_MAXDIMS], lb;
@@ -1937,7 +1776,7 @@ get_ptr_circular(PyArrayIterObject* _iter, npy_intp *coordinates)
* A Neighborhood Iterator object.
*/
NPY_NO_EXPORT PyObject*
-PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds,
+PyArray_NeighborhoodIterNew(PyArrayIterObject *x, const npy_intp *bounds,
int mode, PyArrayObject* fill)
{
int i;
@@ -1949,7 +1788,8 @@ PyArray_NeighborhoodIterNew(PyArrayIterObject *x, npy_intp *bounds,
}
PyObject_Init((PyObject *)ret, &PyArrayNeighborhoodIter_Type);
- array_iter_base_init((PyArrayIterObject*)ret, x->ao);
+ Py_INCREF(x->ao); /* PyArray_RawIterBaseInit steals a reference */
+ PyArray_RawIterBaseInit((PyArrayIterObject*)ret, x->ao);
Py_INCREF(x);
ret->_internal_iter = x;
diff --git a/numpy/core/src/multiarray/iterators.h b/numpy/core/src/multiarray/iterators.h
index 04f57c885..d942f45b8 100644
--- a/numpy/core/src/multiarray/iterators.h
+++ b/numpy/core/src/multiarray/iterators.h
@@ -1,21 +1,13 @@
#ifndef _NPY_ARRAYITERATORS_H_
#define _NPY_ARRAYITERATORS_H_
-/*
- * Parses an index that has no fancy indexing. Populates
- * out_dimensions, out_strides, and out_offset.
- */
-NPY_NO_EXPORT int
-parse_index(PyArrayObject *self, PyObject *op,
- npy_intp *out_dimensions,
- npy_intp *out_strides,
- npy_intp *out_offset,
- int check_index);
-
NPY_NO_EXPORT PyObject
*iter_subscript(PyArrayIterObject *, PyObject *);
NPY_NO_EXPORT int
iter_ass_subscript(PyArrayIterObject *, PyObject *, PyObject *);
+NPY_NO_EXPORT void
+PyArray_RawIterBaseInit(PyArrayIterObject *it, PyArrayObject *ao);
+
#endif
diff --git a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
index 159bb4103..63b2a8842 100644
--- a/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
+++ b/numpy/core/src/multiarray/lowlevel_strided_loops.c.src
@@ -82,7 +82,7 @@
/**begin repeat
* #elsize = 1, 2, 4, 8, 16#
* #elsize_half = 0, 1, 2, 4, 8#
- * #type = npy_uint8, npy_uint16, npy_uint32, npy_uint64, npy_uint128#
+ * #type = npy_uint8, npy_uint16, npy_uint32, npy_uint64, npy_uint64#
*/
/**begin repeat1
* #oper = strided_to_strided, strided_to_contig,
@@ -119,10 +119,10 @@ static void
npy_intp N, npy_intp NPY_UNUSED(src_itemsize),
NpyAuxData *NPY_UNUSED(data))
{
-#if @is_aligned@ && @elsize@ != 16
+#if @is_aligned@
/* sanity check */
- assert(npy_is_aligned(dst, _ALIGN(@type@)));
- assert(npy_is_aligned(src, _ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@)));
#endif
/*printf("fn @prefix@_@oper@_size@elsize@\n");*/
while (N > 0) {
@@ -201,8 +201,8 @@ static NPY_GCC_OPT_3 void
}
#if @is_aligned@ && @elsize@ != 16
/* sanity check */
- assert(npy_is_aligned(dst, _ALIGN(@type@)));
- assert(npy_is_aligned(src, _ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(dst, _UINT_ALIGN(@type@)));
+ assert(N == 0 || npy_is_aligned(src, _UINT_ALIGN(@type@)));
#endif
#if @elsize@ == 1 && @dst_contig@
memset(dst, *src, N);
@@ -417,32 +417,31 @@ PyArray_GetStridedCopyFn(int aligned, npy_intp src_stride,
#if !NPY_USE_UNALIGNED_ACCESS
}
else {
- /* contiguous dst */
- if (itemsize != 0 && dst_stride == itemsize) {
- /* contiguous src */
- if (itemsize != 0 && src_stride == itemsize) {
- return &_contig_to_contig;
- }
- /* general src */
- else {
- switch (itemsize) {
- case 1:
- return &_aligned_strided_to_contig_size1;
+ if (itemsize != 0) {
+ if (dst_stride == itemsize) {
+ /* contiguous dst */
+ if (src_stride == itemsize) {
+ /* contiguous src, dst */
+ return &_contig_to_contig;
+ }
+ else {
+ /* general src */
+ switch (itemsize) {
+ case 1:
+ return &_aligned_strided_to_contig_size1;
/**begin repeat
* #elsize = 2, 4, 8, 16#
*/
- case @elsize@:
- return &_strided_to_contig_size@elsize@;
+ case @elsize@:
+ return &_strided_to_contig_size@elsize@;
/**end repeat**/
+ }
}
- }
- return &_strided_to_strided;
- }
- /* general dst */
- else {
- /* contiguous src */
- if (itemsize != 0 && src_stride == itemsize) {
+ return &_strided_to_strided;
+ }
+ else if (src_stride == itemsize) {
+ /* contiguous src, general dst */
switch (itemsize) {
case 1:
return &_aligned_contig_to_strided_size1;
@@ -456,18 +455,18 @@ PyArray_GetStridedCopyFn(int aligned, npy_intp src_stride,
return &_strided_to_strided;
}
- /* general src */
- else {
- switch (itemsize) {
- case 1:
- return &_aligned_strided_to_strided_size1;
+ }
+ else {
+ /* general src, dst */
+ switch (itemsize) {
+ case 1:
+ return &_aligned_strided_to_strided_size1;
/**begin repeat
* #elsize = 2, 4, 8, 16#
*/
- case @elsize@:
- return &_strided_to_strided_size@elsize@;
+ case @elsize@:
+ return &_strided_to_strided_size@elsize@;
/**end repeat**/
- }
}
}
}
@@ -592,7 +591,7 @@ NPY_NO_EXPORT PyArray_StridedUnaryOp *
/* contiguous dst */
if (itemsize != 0 && dst_stride == itemsize) {
/* contiguous src */
- if (itemsize != 0 && src_stride == itemsize) {
+ if (src_stride == itemsize) {
switch (itemsize) {
/**begin repeat1
* #elsize = 2, 4, 8, 16#
@@ -808,12 +807,8 @@ static NPY_GCC_OPT_3 void
#if @aligned@
/* sanity check */
-# if !@is_complex1@
- assert(npy_is_aligned(src, _ALIGN(_TYPE1)));
-# endif
-# if !@is_complex2@
- assert(npy_is_aligned(dst, _ALIGN(_TYPE2)));
-# endif
+ assert(N == 0 || npy_is_aligned(src, _ALIGN(_TYPE1)));
+ assert(N == 0 || npy_is_aligned(dst, _ALIGN(_TYPE2)));
#endif
/*printf("@prefix@_cast_@name1@_to_@name2@\n");*/
@@ -1425,7 +1420,7 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
while (itersize--) {
char * self_ptr;
npy_intp indval = *((npy_intp*)ind_ptr);
- assert(npy_is_aligned(ind_ptr, _ALIGN(npy_intp)));
+ assert(npy_is_aligned(ind_ptr, _UINT_ALIGN(npy_intp)));
#if @isget@
if (check_and_adjust_index(&indval, fancy_dim, 0, _save) < 0 ) {
return -1;
@@ -1439,8 +1434,8 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
#if @isget@
#if @elsize@
- assert(npy_is_aligned(result_ptr, _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@)));
*(@copytype@ *)result_ptr = *(@copytype@ *)self_ptr;
#else
copyswap(result_ptr, self_ptr, 0, self);
@@ -1448,8 +1443,8 @@ mapiter_trivial_@name@(PyArrayObject *self, PyArrayObject *ind,
#else /* !@isget@ */
#if @elsize@
- assert(npy_is_aligned(result_ptr, _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(result_ptr, _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr, _UINT_ALIGN(@copytype@)));
*(@copytype@ *)self_ptr = *(@copytype@ *)result_ptr;
#else
copyswap(self_ptr, result_ptr, 0, self);
@@ -1571,7 +1566,7 @@ mapiter_@name@(PyArrayMapIterObject *mit)
for (i=0; i < @numiter@; i++) {
npy_intp indval = *((npy_intp*)outer_ptrs[i]);
assert(npy_is_aligned(outer_ptrs[i],
- _ALIGN(npy_intp)));
+ _UINT_ALIGN(npy_intp)));
#if @isget@ && @one_iter@
if (check_and_adjust_index(&indval, fancy_dims[i],
@@ -1591,16 +1586,20 @@ mapiter_@name@(PyArrayMapIterObject *mit)
#if @isget@
#if @elsize@
- assert(npy_is_aligned(outer_ptrs[i], _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(outer_ptrs[i],
+ _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr,
+ _UINT_ALIGN(@copytype@)));
*(@copytype@ *)(outer_ptrs[i]) = *(@copytype@ *)self_ptr;
#else
copyswap(outer_ptrs[i], self_ptr, 0, array);
#endif
#else /* !@isget@ */
#if @elsize@
- assert(npy_is_aligned(outer_ptrs[i], _ALIGN(@copytype@)));
- assert(npy_is_aligned(self_ptr, _ALIGN(@copytype@)));
+ assert(npy_is_aligned(outer_ptrs[i],
+ _UINT_ALIGN(@copytype@)));
+ assert(npy_is_aligned(self_ptr,
+ _UINT_ALIGN(@copytype@)));
*(@copytype@ *)self_ptr = *(@copytype@ *)(outer_ptrs[i]);
#else
copyswap(self_ptr, outer_ptrs[i], 0, array);
diff --git a/numpy/core/src/multiarray/mapping.c b/numpy/core/src/multiarray/mapping.c
index 038c21c92..247864775 100644
--- a/numpy/core/src/multiarray/mapping.c
+++ b/numpy/core/src/multiarray/mapping.c
@@ -15,6 +15,7 @@
#include "common.h"
#include "ctors.h"
+#include "descriptor.h"
#include "iterators.h"
#include "mapping.h"
#include "lowlevel_strided_loops.h"
@@ -175,7 +176,7 @@ unpack_tuple(PyTupleObject *index, PyObject **result, npy_intp result_n)
/* Unpack a single scalar index, taking a new reference to match unpack_tuple */
static NPY_INLINE npy_intp
-unpack_scalar(PyObject *index, PyObject **result, npy_intp result_n)
+unpack_scalar(PyObject *index, PyObject **result, npy_intp NPY_UNUSED(result_n))
{
Py_INCREF(index);
result[0] = index;
@@ -611,9 +612,9 @@ prepare_index(PyArrayObject *self, PyObject *index,
/* Convert the boolean array into multiple integer ones */
n = _nonzero_indices((PyObject *)arr, nonzero_result);
- Py_DECREF(arr);
if (n < 0) {
+ Py_DECREF(arr);
goto failed_building_indices;
}
@@ -624,6 +625,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
for (i=0; i < n; i++) {
Py_DECREF(nonzero_result[i]);
}
+ Py_DECREF(arr);
goto failed_building_indices;
}
@@ -637,6 +639,7 @@ prepare_index(PyArrayObject *self, PyObject *index,
used_ndim += 1;
curr_idx += 1;
}
+ Py_DECREF(arr);
/* All added indices have 1 dimension */
if (fancy_ndim < 1) {
@@ -710,26 +713,26 @@ prepare_index(PyArrayObject *self, PyObject *index,
* to find the ellipsis value or append an ellipsis if necessary.
*/
if (used_ndim < PyArray_NDIM(self)) {
- if (index_type & HAS_ELLIPSIS) {
- indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim;
- used_ndim = PyArray_NDIM(self);
- new_ndim += indices[ellipsis_pos].value;
- }
- else {
- /*
- * There is no ellipsis yet, but it is not a full index
- * so we append an ellipsis to the end.
- */
- index_type |= HAS_ELLIPSIS;
- indices[curr_idx].object = NULL;
- indices[curr_idx].type = HAS_ELLIPSIS;
- indices[curr_idx].value = PyArray_NDIM(self) - used_ndim;
- ellipsis_pos = curr_idx;
-
- used_ndim = PyArray_NDIM(self);
- new_ndim += indices[curr_idx].value;
- curr_idx += 1;
- }
+ if (index_type & HAS_ELLIPSIS) {
+ indices[ellipsis_pos].value = PyArray_NDIM(self) - used_ndim;
+ used_ndim = PyArray_NDIM(self);
+ new_ndim += indices[ellipsis_pos].value;
+ }
+ else {
+ /*
+ * There is no ellipsis yet, but it is not a full index
+ * so we append an ellipsis to the end.
+ */
+ index_type |= HAS_ELLIPSIS;
+ indices[curr_idx].object = NULL;
+ indices[curr_idx].type = HAS_ELLIPSIS;
+ indices[curr_idx].value = PyArray_NDIM(self) - used_ndim;
+ ellipsis_pos = curr_idx;
+
+ used_ndim = PyArray_NDIM(self);
+ new_ndim += indices[curr_idx].value;
+ curr_idx += 1;
+ }
}
else if (used_ndim > PyArray_NDIM(self)) {
PyErr_SetString(PyExc_IndexError,
@@ -1064,7 +1067,8 @@ array_boolean_subscript(PyArrayObject *self,
/* Get a dtype transfer function */
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
- if (PyArray_GetDTypeTransferFunction(IsUintAligned(self),
+ if (PyArray_GetDTypeTransferFunction(
+ IsUintAligned(self) && IsAligned(self),
fixed_strides[0], itemsize,
dtype, dtype,
0,
@@ -1128,8 +1132,8 @@ array_boolean_subscript(PyArrayObject *self,
1, &size, PyArray_STRIDES(ret), PyArray_BYTES(ret),
PyArray_FLAGS(self), (PyObject *)self, (PyObject *)tmp);
+ Py_DECREF(tmp);
if (ret == NULL) {
- Py_DECREF(tmp);
return NULL;
}
}
@@ -1253,7 +1257,8 @@ array_assign_boolean_subscript(PyArrayObject *self,
/* Get a dtype transfer function */
NpyIter_GetInnerFixedStrideArray(iter, fixed_strides);
if (PyArray_GetDTypeTransferFunction(
- IsUintAligned(self) && IsUintAligned(v),
+ IsUintAligned(self) && IsAligned(self) &&
+ IsUintAligned(v) && IsAligned(v),
v_stride, fixed_strides[0],
PyArray_DESCR(v), PyArray_DESCR(self),
0,
@@ -1389,54 +1394,14 @@ array_subscript_asarray(PyArrayObject *self, PyObject *op)
}
/*
- * Helper function for _get_field_view which turns a multifield
- * view into a "packed" copy, as done in numpy 1.15 and before.
- * In numpy 1.16 this function should be removed.
- */
-NPY_NO_EXPORT int
-_multifield_view_to_copy(PyArrayObject **view) {
- static PyObject *copyfunc = NULL;
- PyObject *viewcopy;
-
- /* return a repacked copy of the view */
- npy_cache_import("numpy.lib.recfunctions", "repack_fields", &copyfunc);
- if (copyfunc == NULL) {
- goto view_fail;
- }
-
- PyArray_CLEARFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- viewcopy = PyObject_CallFunction(copyfunc, "O", *view);
- if (viewcopy == NULL) {
- goto view_fail;
- }
- Py_DECREF(*view);
- *view = (PyArrayObject*)viewcopy;
-
- /* warn when writing to the copy */
- PyArray_ENABLEFLAGS(*view, NPY_ARRAY_WARN_ON_WRITE);
- return 0;
-
-view_fail:
- Py_DECREF(*view);
- *view = NULL;
- return 0;
-}
-
-/*
* Attempts to subscript an array using a field name or list of field names.
*
- * If an error occurred, return 0 and set view to NULL. If the subscript is not
- * a string or list of strings, return -1 and set view to NULL. Otherwise
- * return 0 and set view to point to a new view into arr for the given fields.
- *
- * In numpy 1.15 and before, in the case of a list of field names the returned
- * view will actually be a copy by default, with fields packed together.
- * The `force_view` argument causes a view to be returned. This argument can be
- * removed in 1.16 when we plan to return a view always.
+ * ret = 0, view != NULL: view points to the requested fields of arr
+ * ret = 0, view == NULL: an error occurred
+ * ret = -1, view == NULL: unrecognized input, this is not a field index.
*/
NPY_NO_EXPORT int
-_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
- int force_view)
+_get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view)
{
*view = NULL;
@@ -1476,111 +1441,44 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
}
return 0;
}
+
/* next check for a list of field names */
else if (PySequence_Check(ind) && !PyTuple_Check(ind)) {
- int seqlen, i;
- PyObject *name = NULL, *tup;
- PyObject *fields, *names;
+ npy_intp seqlen, i;
PyArray_Descr *view_dtype;
seqlen = PySequence_Size(ind);
- /* quit if have a 0-d array (seqlen==-1) or a 0-len array */
+ /* quit if have a fake sequence-like, which errors on len()*/
if (seqlen == -1) {
PyErr_Clear();
return -1;
}
+ /* 0-len list is handled elsewhere as an integer index */
if (seqlen == 0) {
return -1;
}
- fields = PyDict_New();
- if (fields == NULL) {
- return 0;
- }
- names = PyTuple_New(seqlen);
- if (names == NULL) {
- Py_DECREF(fields);
- return 0;
- }
-
+ /* check the items are strings */
for (i = 0; i < seqlen; i++) {
- name = PySequence_GetItem(ind, i);
- if (name == NULL) {
- /* only happens for strange sequence objects */
+ npy_bool is_string;
+ PyObject *item = PySequence_GetItem(ind, i);
+ if (item == NULL) {
PyErr_Clear();
- Py_DECREF(fields);
- Py_DECREF(names);
return -1;
}
-
- if (!PyBaseString_Check(name)) {
- Py_DECREF(name);
- Py_DECREF(fields);
- Py_DECREF(names);
+ is_string = PyBaseString_Check(item);
+ Py_DECREF(item);
+ if (!is_string) {
return -1;
}
-
- tup = PyDict_GetItem(PyArray_DESCR(arr)->fields, name);
- if (tup == NULL){
- PyObject *errmsg = PyUString_FromString("no field of name ");
- PyUString_ConcatAndDel(&errmsg, name);
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
- Py_DECREF(fields);
- Py_DECREF(names);
- return 0;
- }
- /* disallow use of titles as index */
- if (PyTuple_Size(tup) == 3) {
- PyObject *title = PyTuple_GET_ITEM(tup, 2);
- int titlecmp = PyObject_RichCompareBool(title, name, Py_EQ);
- if (titlecmp == 1) {
- /* if title == name, we got a title, not a field name */
- PyErr_SetString(PyExc_KeyError,
- "cannot use field titles in multi-field index");
- }
- if (titlecmp != 0 || PyDict_SetItem(fields, title, tup) < 0) {
- Py_DECREF(name);
- Py_DECREF(fields);
- Py_DECREF(names);
- return 0;
- }
- }
- /* disallow duplicate field indices */
- if (PyDict_Contains(fields, name)) {
- PyObject *errmsg = PyUString_FromString(
- "duplicate field of name ");
- PyUString_ConcatAndDel(&errmsg, name);
- PyErr_SetObject(PyExc_ValueError, errmsg);
- Py_DECREF(errmsg);
- Py_DECREF(fields);
- Py_DECREF(names);
- return 0;
- }
- if (PyDict_SetItem(fields, name, tup) < 0) {
- Py_DECREF(name);
- Py_DECREF(fields);
- Py_DECREF(names);
- return 0;
- }
- if (PyTuple_SetItem(names, i, name) < 0) {
- Py_DECREF(fields);
- Py_DECREF(names);
- return 0;
- }
}
- view_dtype = PyArray_DescrNewFromType(NPY_VOID);
+ /* Call into the dtype subscript */
+ view_dtype = arraydescr_field_subset_view(PyArray_DESCR(arr), ind);
if (view_dtype == NULL) {
- Py_DECREF(fields);
- Py_DECREF(names);
return 0;
}
- view_dtype->elsize = PyArray_DESCR(arr)->elsize;
- view_dtype->names = names;
- view_dtype->fields = fields;
- view_dtype->flags = PyArray_DESCR(arr)->flags;
*view = (PyArrayObject*)PyArray_NewFromDescr_int(
Py_TYPE(arr),
@@ -1597,11 +1495,7 @@ _get_field_view(PyArrayObject *arr, PyObject *ind, PyArrayObject **view,
return 0;
}
- /* the code below can be replaced by "return 0" in 1.16 */
- if (force_view) {
- return 0;
- }
- return _multifield_view_to_copy(view);
+ return 0;
}
return -1;
}
@@ -1629,7 +1523,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
/* return fields if op is a string index */
if (PyDataType_HASFIELDS(PyArray_DESCR(self))) {
PyArrayObject *view;
- int ret = _get_field_view(self, op, &view, 0);
+ int ret = _get_field_view(self, op, &view);
if (ret == 0){
if (view == NULL) {
return NULL;
@@ -1805,7 +1699,7 @@ array_subscript(PyArrayObject *self, PyObject *op)
PyArray_SHAPE(tmp_arr),
PyArray_STRIDES(tmp_arr),
PyArray_BYTES(tmp_arr),
- PyArray_FLAGS(self),
+ PyArray_FLAGS(tmp_arr),
(PyObject *)self, (PyObject *)tmp_arr);
Py_DECREF(tmp_arr);
if (result == NULL) {
@@ -1911,7 +1805,7 @@ array_assign_subscript(PyArrayObject *self, PyObject *ind, PyObject *op)
/* field access */
if (PyDataType_HASFIELDS(PyArray_DESCR(self))){
PyArrayObject *view;
- int ret = _get_field_view(self, ind, &view, 1);
+ int ret = _get_field_view(self, ind, &view);
if (ret == 0){
if (view == NULL) {
return -1;
@@ -2622,6 +2516,7 @@ PyArray_MapIterCheckIndices(PyArrayMapIterObject *mit)
indval = *((npy_intp*)data);
if (check_and_adjust_index(&indval,
outer_dim, outer_axis, _save) < 0) {
+ Py_DECREF(intp_type);
return -1;
}
data += stride;
@@ -2722,7 +2617,8 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
PyArrayObject *original_extra_op = extra_op;
PyArrayObject *index_arrays[NPY_MAXDIMS];
- PyArray_Descr *dtypes[NPY_MAXDIMS];
+ PyArray_Descr *intp_descr;
+ PyArray_Descr *dtypes[NPY_MAXDIMS]; /* borrowed references */
npy_uint32 op_flags[NPY_MAXDIMS];
npy_uint32 outer_flags;
@@ -2735,9 +2631,15 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
int nops;
int uses_subspace;
+ intp_descr = PyArray_DescrFromType(NPY_INTP);
+ if (intp_descr == NULL) {
+ return NULL;
+ }
+
/* create new MapIter object */
mit = (PyArrayMapIterObject *)PyArray_malloc(sizeof(PyArrayMapIterObject));
if (mit == NULL) {
+ Py_DECREF(intp_descr);
return NULL;
}
/* set all attributes of mapiter to zero */
@@ -2767,6 +2669,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
mit->nd_fancy = fancy_ndim;
if (mapiter_fill_info(mit, indices, index_num, arr) < 0) {
Py_DECREF(mit);
+ Py_DECREF(intp_descr);
return NULL;
}
@@ -2776,7 +2679,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
for (i=0; i < index_num; i++) {
if (indices[i].type & HAS_FANCY) {
index_arrays[mit->numiter] = (PyArrayObject *)indices[i].object;
- dtypes[mit->numiter] = PyArray_DescrFromType(NPY_INTP);
+ dtypes[mit->numiter] = intp_descr;
op_flags[mit->numiter] = (NPY_ITER_NBO |
NPY_ITER_ALIGNED |
@@ -2799,9 +2702,10 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
PyArray_DescrFromType(NPY_INTP), 0);
if (index_arrays[0] == NULL) {
Py_DECREF(mit);
+ Py_DECREF(intp_descr);
return NULL;
}
- dtypes[0] = PyArray_DescrFromType(NPY_INTP);
+ dtypes[0] = intp_descr;
op_flags[0] = NPY_ITER_NBO | NPY_ITER_ALIGNED | NPY_ITER_READONLY;
mit->fancy_dims[0] = 1;
@@ -3031,7 +2935,6 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
nops += 1;
index_arrays[mit->numiter] = extra_op;
- Py_INCREF(extra_op_dtype);
dtypes[mit->numiter] = extra_op_dtype;
op_flags[mit->numiter] = (extra_op_flags |
NPY_ITER_ALLOCATE |
@@ -3057,9 +2960,6 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
}
/* NpyIter cleanup and information: */
- for (i=0; i < nops; i++) {
- Py_DECREF(dtypes[i]);
- }
if (dummy_array) {
Py_DECREF(index_arrays[0]);
}
@@ -3145,6 +3045,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
/* Can now return early if no subspace is being used */
if (!uses_subspace) {
Py_XDECREF(extra_op);
+ Py_DECREF(intp_descr);
return (PyObject *)mit;
}
@@ -3214,6 +3115,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
}
Py_XDECREF(extra_op);
+ Py_DECREF(intp_descr);
return (PyObject *)mit;
fail:
@@ -3282,6 +3184,7 @@ PyArray_MapIterNew(npy_index_info *indices , int index_num, int index_type,
finish:
Py_XDECREF(extra_op);
+ Py_DECREF(intp_descr);
Py_DECREF(mit);
return NULL;
}
diff --git a/numpy/core/src/multiarray/methods.c b/numpy/core/src/multiarray/methods.c
index cdbd0d6ae..e5845f2f6 100644
--- a/numpy/core/src/multiarray/methods.c
+++ b/numpy/core/src/multiarray/methods.c
@@ -6,8 +6,10 @@
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
+#include "arrayobject.h"
#include "numpy/arrayscalars.h"
+#include "arrayfunction_override.h"
#include "npy_config.h"
#include "npy_pycompat.h"
#include "npy_import.h"
@@ -187,7 +189,7 @@ array_reshape(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
if (n <= 1) {
- if (PyTuple_GET_ITEM(args, 0) == Py_None) {
+ if (n != 0 && PyTuple_GET_ITEM(args, 0) == Py_None) {
return PyArray_View(self, NULL, NULL);
}
if (!PyArg_ParseTuple(args, "O&:reshape", PyArray_IntpConverter,
@@ -363,6 +365,7 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
npy_cache_import("numpy.core._internal", "_getfield_is_safe",
&checkfunc);
if (checkfunc == NULL) {
+ Py_DECREF(typed);
return NULL;
}
@@ -370,6 +373,7 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
safe = PyObject_CallFunction(checkfunc, "OOi", PyArray_DESCR(self),
typed, offset);
if (safe == NULL) {
+ Py_DECREF(typed);
return NULL;
}
Py_DECREF(safe);
@@ -380,14 +384,17 @@ PyArray_GetField(PyArrayObject *self, PyArray_Descr *typed, int offset)
/* check that values are valid */
if (typed_elsize > self_elsize) {
PyErr_SetString(PyExc_ValueError, "new type is larger than original type");
+ Py_DECREF(typed);
return NULL;
}
if (offset < 0) {
PyErr_SetString(PyExc_ValueError, "offset is negative");
+ Py_DECREF(typed);
return NULL;
}
if (offset > self_elsize - typed_elsize) {
PyErr_SetString(PyExc_ValueError, "new type plus offset is larger than original type");
+ Py_DECREF(typed);
return NULL;
}
@@ -432,6 +439,7 @@ PyArray_SetField(PyArrayObject *self, PyArray_Descr *dtype,
int retval = 0;
if (PyArray_FailUnlessWriteable(self, "assignment destination") < 0) {
+ Py_DECREF(dtype);
return -1;
}
@@ -576,15 +584,18 @@ array_tofile(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
+ file = NpyPath_PathlikeToFspath(file);
+ if (file == NULL) {
+ return NULL;
+ }
if (PyBytes_Check(file) || PyUnicode_Check(file)) {
- file = npy_PyFile_OpenFile(file, "wb");
+ Py_SETREF(file, npy_PyFile_OpenFile(file, "wb"));
if (file == NULL) {
return NULL;
}
own = 1;
}
else {
- Py_INCREF(file);
own = 0;
}
@@ -823,8 +834,8 @@ array_astype(PyArrayObject *self, PyObject *args, PyObject *kwds)
PyArrayObject *ret;
/* If the requested dtype is flexible, adapt it */
- PyArray_AdaptFlexibleDType((PyObject *)self, PyArray_DESCR(self),
- &dtype);
+ dtype = PyArray_AdaptFlexibleDType((PyObject *)self,
+ PyArray_DESCR(self), dtype);
if (dtype == NULL) {
return NULL;
}
@@ -988,9 +999,59 @@ array_getarray(PyArrayObject *self, PyObject *args)
}
}
+/*
+ * Check whether any of a set of input and output args have a non-default
+ * __array_ufunc__ method. Return 1 if so, 0 if not, and -1 on error.
+ *
+ * This function primarily exists to help ndarray.__array_ufunc__ determine
+ * whether it can support a ufunc (which is the case only if none of the
+ * operands have an override). Thus, unlike in umath/override.c, the
+ * actual overrides are not needed and one can stop looking once one is found.
+ */
+static int
+any_array_ufunc_overrides(PyObject *args, PyObject *kwds)
+{
+ int i;
+ int nin, nout;
+ PyObject *out_kwd_obj;
+ PyObject *fast;
+ PyObject **in_objs, **out_objs;
-static PyObject *
-array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds)
+ /* check inputs */
+ nin = PyTuple_Size(args);
+ if (nin < 0) {
+ return -1;
+ }
+ fast = PySequence_Fast(args, "Could not convert object to sequence");
+ if (fast == NULL) {
+ return -1;
+ }
+ in_objs = PySequence_Fast_ITEMS(fast);
+ for (i = 0; i < nin; ++i) {
+ if (PyUFunc_HasOverride(in_objs[i])) {
+ Py_DECREF(fast);
+ return 1;
+ }
+ }
+ Py_DECREF(fast);
+ /* check outputs, if any */
+ nout = PyUFuncOverride_GetOutObjects(kwds, &out_kwd_obj, &out_objs);
+ if (nout < 0) {
+ return -1;
+ }
+ for (i = 0; i < nout; i++) {
+ if (PyUFunc_HasOverride(out_objs[i])) {
+ Py_DECREF(out_kwd_obj);
+ return 1;
+ }
+ }
+ Py_DECREF(out_kwd_obj);
+ return 0;
+}
+
+
+NPY_NO_EXPORT PyObject *
+array_ufunc(PyArrayObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
{
PyObject *ufunc, *method_name, *normal_args, *ufunc_method;
PyObject *result = NULL;
@@ -1009,7 +1070,7 @@ array_ufunc(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
/* ndarray cannot handle overrides itself */
- has_override = PyUFunc_HasOverride(normal_args, kwds);
+ has_override = any_array_ufunc_overrides(normal_args, kwds);
if (has_override < 0) {
goto cleanup;
}
@@ -1038,13 +1099,29 @@ cleanup:
return result;
}
-
static PyObject *
-array_function(PyArrayObject *self, PyObject *args, PyObject *kwds)
+array_function(PyArrayObject *NPY_UNUSED(self), PyObject *c_args, PyObject *c_kwds)
{
- NPY_FORWARD_NDARRAY_METHOD("_array_function");
-}
+ PyObject *func, *types, *args, *kwargs, *result;
+ static char *kwlist[] = {"func", "types", "args", "kwargs", NULL};
+
+ if (!PyArg_ParseTupleAndKeywords(
+ c_args, c_kwds, "OOOO:__array_function__", kwlist,
+ &func, &types, &args, &kwargs)) {
+ return NULL;
+ }
+ types = PySequence_Fast(
+ types,
+ "types argument to ndarray.__array_function__ must be iterable");
+ if (types == NULL) {
+ return NULL;
+ }
+
+ result = array_function_method_impl(func, types, args, kwargs);
+ Py_DECREF(types);
+ return result;
+}
static PyObject *
array_copy(PyArrayObject *self, PyObject *args, PyObject *kwds)
@@ -1102,7 +1179,7 @@ array_resize(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
- ret = PyArray_Resize(self, &newshape, refcheck, NPY_CORDER);
+ ret = PyArray_Resize(self, &newshape, refcheck, NPY_ANYORDER);
npy_free_cache_dim_obj(newshape);
if (ret == NULL) {
return NULL;
@@ -1314,6 +1391,7 @@ array_argsort(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
newd = PyArray_DescrNew(saved);
+ Py_DECREF(newd->names);
newd->names = new_name;
((PyArrayObject_fields *)self)->descr = newd;
}
@@ -1368,6 +1446,7 @@ array_argpartition(PyArrayObject *self, PyObject *args, PyObject *kwds)
return NULL;
}
newd = PyArray_DescrNew(saved);
+ Py_DECREF(newd->names);
newd->names = new_name;
((PyArrayObject_fields *)self)->descr = newd;
}
@@ -1471,7 +1550,6 @@ array_deepcopy(PyArrayObject *self, PyObject *args)
copy = PyImport_ImportModule("copy");
if (copy == NULL) {
Py_DECREF(copied_array);
- Py_DECREF(copy);
return NULL;
}
deepcopy = PyObject_GetAttrString(copy, "deepcopy");
@@ -1619,6 +1697,8 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
Notice because Python does not describe a mechanism to write
raw data to the pickle, this performs a copy to a string first
+ This issue is now addressed in protocol 5, where a buffer is serialized
+ instead of a string,
*/
state = PyTuple_New(5);
@@ -1652,6 +1732,153 @@ array_reduce(PyArrayObject *self, PyObject *NPY_UNUSED(args))
}
static PyObject *
+array_reduce_ex_regular(PyArrayObject *self, int NPY_UNUSED(protocol))
+{
+ PyObject *subclass_array_reduce = NULL;
+ PyObject *ret;
+
+ /* We do not call array_reduce directly but instead lookup and call
+ * the __reduce__ method to make sure that it's possible to customize
+ * pickling in sub-classes. */
+ subclass_array_reduce = PyObject_GetAttrString((PyObject *)self,
+ "__reduce__");
+ if (subclass_array_reduce == NULL) {
+ return NULL;
+ }
+ ret = PyObject_CallObject(subclass_array_reduce, NULL);
+ Py_DECREF(subclass_array_reduce);
+ return ret;
+}
+
+static PyObject *
+array_reduce_ex_picklebuffer(PyArrayObject *self, int protocol)
+{
+ PyObject *numeric_mod = NULL, *from_buffer_func = NULL;
+ PyObject *pickle_module = NULL, *picklebuf_class = NULL;
+ PyObject *picklebuf_args = NULL;
+ PyObject *buffer = NULL, *transposed_array = NULL;
+ PyArray_Descr *descr = NULL;
+ char order;
+
+ descr = PyArray_DESCR(self);
+
+ /* if the python version is below 3.8, the pickle module does not provide
+ * built-in support for protocol 5. We try importing the pickle5
+ * backport instead */
+#if PY_VERSION_HEX >= 0x03080000
+ /* we expect protocol 5 to be available in Python 3.8 */
+ pickle_module = PyImport_ImportModule("pickle");
+#elif PY_VERSION_HEX >= 0x03060000
+ pickle_module = PyImport_ImportModule("pickle5");
+ if (pickle_module == NULL) {
+ /* for protocol 5, raise a clear ImportError if pickle5 is not found
+ */
+ PyErr_SetString(PyExc_ImportError, "Using pickle protocol 5 "
+ "requires the pickle5 module for Python >=3.6 and <3.8");
+ return NULL;
+ }
+#else
+ PyErr_SetString(PyExc_ValueError, "pickle protocol 5 is not available "
+ "for Python < 3.6");
+ return NULL;
+#endif
+ if (pickle_module == NULL){
+ return NULL;
+ }
+ picklebuf_class = PyObject_GetAttrString(pickle_module, "PickleBuffer");
+ Py_DECREF(pickle_module);
+ if (picklebuf_class == NULL) {
+ return NULL;
+ }
+
+ /* Construct a PickleBuffer of the array */
+
+ if (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*) self) &&
+ PyArray_IS_F_CONTIGUOUS((PyArrayObject*) self)) {
+ /* if the array if Fortran-contiguous and not C-contiguous,
+ * the PickleBuffer instance will hold a view on the transpose
+ * of the initial array, that is C-contiguous. */
+ order = 'F';
+ transposed_array = PyArray_Transpose((PyArrayObject*)self, NULL);
+ picklebuf_args = Py_BuildValue("(N)", transposed_array);
+ }
+ else {
+ order = 'C';
+ picklebuf_args = Py_BuildValue("(O)", self);
+ }
+ if (picklebuf_args == NULL) {
+ Py_DECREF(picklebuf_class);
+ return NULL;
+ }
+
+ buffer = PyObject_CallObject(picklebuf_class, picklebuf_args);
+ Py_DECREF(picklebuf_class);
+ Py_DECREF(picklebuf_args);
+ if (buffer == NULL) {
+ /* Some arrays may refuse to export a buffer, in which case
+ * just fall back on regular __reduce_ex__ implementation
+ * (gh-12745).
+ */
+ PyErr_Clear();
+ return array_reduce_ex_regular(self, protocol);
+ }
+
+ /* Get the _frombuffer() function for reconstruction */
+
+ numeric_mod = PyImport_ImportModule("numpy.core.numeric");
+ if (numeric_mod == NULL) {
+ Py_DECREF(buffer);
+ return NULL;
+ }
+ from_buffer_func = PyObject_GetAttrString(numeric_mod,
+ "_frombuffer");
+ Py_DECREF(numeric_mod);
+ if (from_buffer_func == NULL) {
+ Py_DECREF(buffer);
+ return NULL;
+ }
+
+ return Py_BuildValue("N(NONN)",
+ from_buffer_func, buffer, (PyObject *)descr,
+ PyObject_GetAttrString((PyObject *)self, "shape"),
+ PyUnicode_FromStringAndSize(&order, 1));
+}
+
+static PyObject *
+array_reduce_ex(PyArrayObject *self, PyObject *args)
+{
+ int protocol;
+ PyArray_Descr *descr = NULL;
+
+ if (!PyArg_ParseTuple(args, "i", &protocol)) {
+ return NULL;
+ }
+
+ descr = PyArray_DESCR(self);
+ if ((protocol < 5) ||
+ (!PyArray_IS_C_CONTIGUOUS((PyArrayObject*)self) &&
+ !PyArray_IS_F_CONTIGUOUS((PyArrayObject*)self)) ||
+ PyDataType_FLAGCHK(descr, NPY_ITEM_HASOBJECT) ||
+ (PyType_IsSubtype(((PyObject*)self)->ob_type, &PyArray_Type) &&
+ ((PyObject*)self)->ob_type != &PyArray_Type) ||
+ descr->elsize == 0) {
+ /* The PickleBuffer class from version 5 of the pickle protocol
+ * can only be used for arrays backed by a contiguous data buffer.
+ * For all other cases we fallback to the generic array_reduce
+ * method that involves using a temporary bytes allocation. */
+ return array_reduce_ex_regular(self, protocol);
+ }
+ else if (protocol == 5) {
+ return array_reduce_ex_picklebuffer(self, protocol);
+ }
+ else {
+ PyErr_Format(PyExc_ValueError,
+ "__reduce_ex__ called with protocol > 5");
+ return NULL;
+ }
+}
+
+static PyObject *
array_setstate(PyArrayObject *self, PyObject *args)
{
PyObject *shape;
@@ -1785,12 +2012,14 @@ array_setstate(PyArrayObject *self, PyObject *args)
fa->nd = nd;
if (nd > 0) {
- fa->dimensions = npy_alloc_cache_dim(3*nd);
+ fa->dimensions = npy_alloc_cache_dim(2 * nd);
if (fa->dimensions == NULL) {
return PyErr_NoMemory();
}
fa->strides = PyArray_DIMS(self) + nd;
- memcpy(PyArray_DIMS(self), dimensions, sizeof(npy_intp)*nd);
+ if (nd) {
+ memcpy(PyArray_DIMS(self), dimensions, sizeof(npy_intp)*nd);
+ }
_array_fill_strides(PyArray_STRIDES(self), dimensions, nd,
PyArray_DESCR(self)->elsize,
(is_f_order ? NPY_ARRAY_F_CONTIGUOUS :
@@ -1810,10 +2039,12 @@ array_setstate(PyArrayObject *self, PyObject *args)
if (!IsAligned(self) || swap || (len <= 1000)) {
#endif
npy_intp num = PyArray_NBYTES(self);
+ if (num == 0) {
+ Py_DECREF(rawdata);
+ Py_RETURN_NONE;
+ }
fa->data = PyDataMem_NEW(num);
if (PyArray_DATA(self) == NULL) {
- fa->nd = 0;
- npy_free_cache_dim_array(self);
Py_DECREF(rawdata);
return PyErr_NoMemory();
}
@@ -1824,7 +2055,9 @@ array_setstate(PyArrayObject *self, PyObject *args)
PyArray_DESCR(self)->elsize,
datastr, PyArray_DESCR(self)->elsize,
numels, 1, self);
- if (!PyArray_ISEXTENDED(self)) {
+ if (!(PyArray_ISEXTENDED(self) ||
+ PyArray_DESCR(self)->metadata ||
+ PyArray_DESCR(self)->c_metadata)) {
fa->descr = PyArray_DescrFromType(
PyArray_DESCR(self)->type_num);
}
@@ -1853,11 +2086,13 @@ array_setstate(PyArrayObject *self, PyObject *args)
}
}
else {
- fa->data = PyDataMem_NEW(PyArray_NBYTES(self));
+ npy_intp num = PyArray_NBYTES(self);
+ int elsize = PyArray_DESCR(self)->elsize;
+ if (num == 0 || elsize == 0) {
+ Py_RETURN_NONE;
+ }
+ fa->data = PyDataMem_NEW(num);
if (PyArray_DATA(self) == NULL) {
- fa->nd = 0;
- fa->data = PyDataMem_NEW(PyArray_DESCR(self)->elsize);
- npy_free_cache_dim_array(self);
return PyErr_NoMemory();
}
if (PyDataType_FLAGCHK(PyArray_DESCR(self), NPY_NEEDS_INIT)) {
@@ -1879,37 +2114,22 @@ array_setstate(PyArrayObject *self, PyObject *args)
NPY_NO_EXPORT int
PyArray_Dump(PyObject *self, PyObject *file, int protocol)
{
- PyObject *cpick = NULL;
+ static PyObject *method = NULL;
PyObject *ret;
- if (protocol < 0) {
- protocol = 2;
- }
-
-#if defined(NPY_PY3K)
- cpick = PyImport_ImportModule("pickle");
-#else
- cpick = PyImport_ImportModule("cPickle");
-#endif
- if (cpick == NULL) {
+ npy_cache_import("numpy.core._methods", "_dump", &method);
+ if (method == NULL) {
return -1;
}
- if (PyBytes_Check(file) || PyUnicode_Check(file)) {
- file = npy_PyFile_OpenFile(file, "wb");
- if (file == NULL) {
- Py_DECREF(cpick);
- return -1;
- }
+ if (protocol < 0) {
+ ret = PyObject_CallFunction(method, "OO", self, file);
}
else {
- Py_INCREF(file);
+ ret = PyObject_CallFunction(method, "OOi", self, file, protocol);
}
- ret = PyObject_CallMethod(cpick, "dump", "OOi", self, file, protocol);
- Py_XDECREF(ret);
- Py_DECREF(file);
- Py_DECREF(cpick);
- if (PyErr_Occurred()) {
+ if (ret == NULL) {
return -1;
}
+ Py_DECREF(ret);
return 0;
}
@@ -1917,49 +2137,31 @@ PyArray_Dump(PyObject *self, PyObject *file, int protocol)
NPY_NO_EXPORT PyObject *
PyArray_Dumps(PyObject *self, int protocol)
{
- PyObject *cpick = NULL;
- PyObject *ret;
+ static PyObject *method = NULL;
+ npy_cache_import("numpy.core._methods", "_dumps", &method);
+ if (method == NULL) {
+ return NULL;
+ }
if (protocol < 0) {
- protocol = 2;
+ return PyObject_CallFunction(method, "O", self);
}
-#if defined(NPY_PY3K)
- cpick = PyImport_ImportModule("pickle");
-#else
- cpick = PyImport_ImportModule("cPickle");
-#endif
- if (cpick == NULL) {
- return NULL;
+ else {
+ return PyObject_CallFunction(method, "Oi", self, protocol);
}
- ret = PyObject_CallMethod(cpick, "dumps", "Oi", self, protocol);
- Py_DECREF(cpick);
- return ret;
}
static PyObject *
-array_dump(PyArrayObject *self, PyObject *args)
+array_dump(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- PyObject *file = NULL;
- int ret;
-
- if (!PyArg_ParseTuple(args, "O:dump", &file)) {
- return NULL;
- }
- ret = PyArray_Dump((PyObject *)self, file, 2);
- if (ret < 0) {
- return NULL;
- }
- Py_RETURN_NONE;
+ NPY_FORWARD_NDARRAY_METHOD("_dump");
}
static PyObject *
-array_dumps(PyArrayObject *self, PyObject *args)
+array_dumps(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- if (!PyArg_ParseTuple(args, "")) {
- return NULL;
- }
- return PyArray_Dumps((PyObject *)self, 2);
+ NPY_FORWARD_NDARRAY_METHOD("_dumps");
}
@@ -2182,28 +2384,13 @@ array_trace(PyArrayObject *self, PyObject *args, PyObject *kwds)
static PyObject *
array_clip(PyArrayObject *self, PyObject *args, PyObject *kwds)
{
- PyObject *min = NULL, *max = NULL;
- PyArrayObject *out = NULL;
- static char *kwlist[] = {"min", "max", "out", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "|OOO&:clip", kwlist,
- &min,
- &max,
- PyArray_OutputConverter, &out)) {
- return NULL;
- }
- if (max == NULL && min == NULL) {
- PyErr_SetString(PyExc_ValueError, "One of max or min must be given.");
- return NULL;
- }
- return PyArray_Return((PyArrayObject *)PyArray_Clip(self, min, max, out));
+ NPY_FORWARD_NDARRAY_METHOD("_clip");
}
static PyObject *
array_conjugate(PyArrayObject *self, PyObject *args)
{
-
PyArrayObject *out = NULL;
if (!PyArg_ParseTuple(args, "|O&:conjugate",
PyArray_OutputConverter,
@@ -2329,7 +2516,24 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds)
if (write_flag != Py_None) {
if (PyObject_IsTrue(write_flag)) {
if (_IsWriteable(self)) {
+ /*
+ * _IsWritable (and PyArray_UpdateFlags) allows flipping this,
+ * although the C-Api user who created the array may have
+ * chosen to make it non-writable for a good reason, so
+ * deprecate.
+ */
+ if ((PyArray_BASE(self) == NULL) &&
+ !PyArray_CHKFLAGS(self, NPY_ARRAY_OWNDATA) &&
+ !PyArray_CHKFLAGS(self, NPY_ARRAY_WRITEABLE)) {
+ /* 2017-05-03, NumPy 1.17.0 */
+ if (DEPRECATE("making a non-writeable array writeable "
+ "is deprecated for arrays without a base "
+ "which do not own their data.") < 0) {
+ return NULL;
+ }
+ }
PyArray_ENABLEFLAGS(self, NPY_ARRAY_WRITEABLE);
+ PyArray_CLEARFLAGS(self, NPY_ARRAY_WARN_ON_WRITE);
}
else {
fa->flags = flagback;
@@ -2342,9 +2546,9 @@ array_setflags(PyArrayObject *self, PyObject *args, PyObject *kwds)
}
else {
PyArray_CLEARFLAGS(self, NPY_ARRAY_WRITEABLE);
+ PyArray_CLEARFLAGS(self, NPY_ARRAY_WARN_ON_WRITE);
}
}
-
Py_RETURN_NONE;
}
@@ -2524,15 +2728,18 @@ NPY_NO_EXPORT PyMethodDef array_methods[] = {
{"__reduce__",
(PyCFunction) array_reduce,
METH_VARARGS, NULL},
+ {"__reduce_ex__",
+ (PyCFunction) array_reduce_ex,
+ METH_VARARGS, NULL},
{"__setstate__",
(PyCFunction) array_setstate,
METH_VARARGS, NULL},
{"dumps",
(PyCFunction) array_dumps,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"dump",
(PyCFunction) array_dump,
- METH_VARARGS, NULL},
+ METH_VARARGS | METH_KEYWORDS, NULL},
{"__complex__",
(PyCFunction) array_complex,
diff --git a/numpy/core/src/multiarray/methods.h b/numpy/core/src/multiarray/methods.h
index 7bf87f42d..7a9a24a00 100644
--- a/numpy/core/src/multiarray/methods.h
+++ b/numpy/core/src/multiarray/methods.h
@@ -1,9 +1,36 @@
#ifndef _NPY_ARRAY_METHODS_H_
#define _NPY_ARRAY_METHODS_H_
+#include "npy_import.h"
+
extern NPY_NO_EXPORT PyMethodDef array_methods[];
NPY_NO_EXPORT const char *
npy_casting_to_string(NPY_CASTING casting);
+/*
+ * Pathlib support, takes a borrowed reference and returns a new one.
+ * The new object may be the same as the old.
+ */
+static inline PyObject *
+NpyPath_PathlikeToFspath(PyObject *file)
+{
+ static PyObject *os_PathLike = NULL;
+ static PyObject *os_fspath = NULL;
+ npy_cache_import("numpy.compat", "os_PathLike", &os_PathLike);
+ if (os_PathLike == NULL) {
+ return NULL;
+ }
+ npy_cache_import("numpy.compat", "os_fspath", &os_fspath);
+ if (os_fspath == NULL) {
+ return NULL;
+ }
+
+ if (!PyObject_IsInstance(file, os_PathLike)) {
+ Py_INCREF(file);
+ return file;
+ }
+ return PyObject_CallFunctionObjArgs(os_fspath, file, NULL);
+}
+
#endif
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index 8f782cff6..441567049 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -34,6 +34,7 @@
NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
/* Internal APIs */
+#include "arrayfunction_override.h"
#include "arraytypes.h"
#include "arrayobject.h"
#include "hashdescr.h"
@@ -72,10 +73,10 @@ NPY_NO_EXPORT int NPY_NUMUSERTYPES = 0;
*****************************************************************************
*/
#include "funcs.inc"
-#include "loops.h"
#include "umathmodule.h"
NPY_NO_EXPORT int initscalarmath(PyObject *);
+NPY_NO_EXPORT int set_matmul_flags(PyObject *d); /* in ufunc_object.c */
/*
* global variable to determine if legacy printing is enabled, accessible from
@@ -129,7 +130,7 @@ PyArray_GetPriority(PyObject *obj, double default_)
* Multiply a List of ints
*/
NPY_NO_EXPORT int
-PyArray_MultiplyIntList(int *l1, int n)
+PyArray_MultiplyIntList(int const *l1, int n)
{
int s = 1;
@@ -143,7 +144,7 @@ PyArray_MultiplyIntList(int *l1, int n)
* Multiply a List
*/
NPY_NO_EXPORT npy_intp
-PyArray_MultiplyList(npy_intp *l1, int n)
+PyArray_MultiplyList(npy_intp const *l1, int n)
{
npy_intp s = 1;
@@ -179,7 +180,7 @@ PyArray_OverflowMultiplyList(npy_intp *l1, int n)
* Produce a pointer into array
*/
NPY_NO_EXPORT void *
-PyArray_GetPtr(PyArrayObject *obj, npy_intp* ind)
+PyArray_GetPtr(PyArrayObject *obj, npy_intp const* ind)
{
int n = PyArray_NDIM(obj);
npy_intp *strides = PyArray_STRIDES(obj);
@@ -195,7 +196,7 @@ PyArray_GetPtr(PyArrayObject *obj, npy_intp* ind)
* Compare Lists
*/
NPY_NO_EXPORT int
-PyArray_CompareLists(npy_intp *l1, npy_intp *l2, int n)
+PyArray_CompareLists(npy_intp const *l1, npy_intp const *l2, int n)
{
int i;
@@ -272,7 +273,9 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd,
}
*((char ****)ptr) = ptr3;
}
- memcpy(dims, PyArray_DIMS(ap), nd*sizeof(npy_intp));
+ if (nd) {
+ memcpy(dims, PyArray_DIMS(ap), nd*sizeof(npy_intp));
+ }
*op = (PyObject *)ap;
return 0;
}
@@ -283,45 +286,26 @@ PyArray_AsCArray(PyObject **op, void *ptr, npy_intp *dims, int nd,
* Convert to a 1D C-array
*/
NPY_NO_EXPORT int
-PyArray_As1D(PyObject **op, char **ptr, int *d1, int typecode)
+PyArray_As1D(PyObject **NPY_UNUSED(op), char **NPY_UNUSED(ptr),
+ int *NPY_UNUSED(d1), int NPY_UNUSED(typecode))
{
- npy_intp newd1;
- PyArray_Descr *descr;
- static const char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
-
/* 2008-07-14, 1.5 */
- if (DEPRECATE(msg) < 0) {
- return -1;
- }
- descr = PyArray_DescrFromType(typecode);
- if (PyArray_AsCArray(op, (void *)ptr, &newd1, 1, descr) == -1) {
- return -1;
- }
- *d1 = (int) newd1;
- return 0;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_As1D: use PyArray_AsCArray.");
+ return -1;
}
/*NUMPY_API
* Convert to a 2D C-array
*/
NPY_NO_EXPORT int
-PyArray_As2D(PyObject **op, char ***ptr, int *d1, int *d2, int typecode)
+PyArray_As2D(PyObject **NPY_UNUSED(op), char ***NPY_UNUSED(ptr),
+ int *NPY_UNUSED(d1), int *NPY_UNUSED(d2), int NPY_UNUSED(typecode))
{
- npy_intp newdims[2];
- PyArray_Descr *descr;
- static const char msg[] = "PyArray_As1D: use PyArray_AsCArray.";
-
/* 2008-07-14, 1.5 */
- if (DEPRECATE(msg) < 0) {
- return -1;
- }
- descr = PyArray_DescrFromType(typecode);
- if (PyArray_AsCArray(op, (void *)ptr, newdims, 2, descr) == -1) {
- return -1;
- }
- *d1 = (int ) newdims[0];
- *d2 = (int ) newdims[1];
- return 0;
+ PyErr_SetString(PyExc_NotImplementedError,
+ "PyArray_As2D: use PyArray_AsCArray.");
+ return -1;
}
/* End Deprecated */
@@ -408,9 +392,12 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
npy_intp *arr_shape;
if (PyArray_NDIM(arrays[iarrays]) != ndim) {
- PyErr_SetString(PyExc_ValueError,
- "all the input arrays must have same "
- "number of dimensions");
+ PyErr_Format(PyExc_ValueError,
+ "all the input arrays must have same number of "
+ "dimensions, but the array at index %d has %d "
+ "dimension(s) and the array at index %d has %d "
+ "dimension(s)",
+ 0, ndim, iarrays, PyArray_NDIM(arrays[iarrays]));
return NULL;
}
arr_shape = PyArray_SHAPE(arrays[iarrays]);
@@ -422,10 +409,12 @@ PyArray_ConcatenateArrays(int narrays, PyArrayObject **arrays, int axis,
}
/* Validate that the rest of the dimensions match */
else if (shape[idim] != arr_shape[idim]) {
- PyErr_SetString(PyExc_ValueError,
- "all the input array dimensions "
- "except for the concatenation axis "
- "must match exactly");
+ PyErr_Format(PyExc_ValueError,
+ "all the input array dimensions for the "
+ "concatenation axis must match exactly, but "
+ "along dimension %d, the array at index %d has "
+ "size %d and the array at index %d has size %d",
+ idim, 0, shape[idim], iarrays, arr_shape[idim]);
return NULL;
}
}
@@ -833,7 +822,10 @@ PyArray_InnerProduct(PyObject *op1, PyObject *op2)
typenum = PyArray_ObjectType(op2, typenum);
typec = PyArray_DescrFromType(typenum);
if (typec == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot find a common data type.");
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "Cannot find a common data type.");
+ }
goto fail;
}
@@ -919,7 +911,10 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
typenum = PyArray_ObjectType(op2, typenum);
typec = PyArray_DescrFromType(typenum);
if (typec == NULL) {
- PyErr_SetString(PyExc_TypeError, "Cannot find a common data type.");
+ if (!PyErr_Occurred()) {
+ PyErr_SetString(PyExc_TypeError,
+ "Cannot find a common data type.");
+ }
return NULL;
}
@@ -976,7 +971,7 @@ PyArray_MatrixProduct2(PyObject *op1, PyObject *op2, PyArrayObject* out)
for (i = 0; i < PyArray_NDIM(ap2) - 2; i++) {
dimensions[j++] = PyArray_DIMS(ap2)[i];
}
- if(PyArray_NDIM(ap2) > 1) {
+ if (PyArray_NDIM(ap2) > 1) {
dimensions[j++] = PyArray_DIMS(ap2)[PyArray_NDIM(ap2)-1];
}
@@ -1312,7 +1307,7 @@ PyArray_Correlate2(PyObject *op1, PyObject *op2, int mode)
*/
if (inverted) {
st = _pyarray_revert(ret);
- if(st) {
+ if (st) {
goto clean_ret;
}
}
@@ -1359,7 +1354,7 @@ PyArray_Correlate(PyObject *op1, PyObject *op2, int mode)
}
ret = _pyarray_correlate(ap1, ap2, typenum, mode, &unused);
- if(ret == NULL) {
+ if (ret == NULL) {
goto fail;
}
Py_DECREF(ap1);
@@ -1567,7 +1562,8 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
PyArrayObject *oparr = NULL, *ret = NULL;
npy_bool subok = NPY_FALSE;
npy_bool copy = NPY_TRUE;
- int ndmin = 0, nd;
+ int nd;
+ npy_intp ndmin = 0;
PyArray_Descr *type = NULL;
PyArray_Descr *oldtype = NULL;
NPY_ORDER order = NPY_KEEPORDER;
@@ -1638,17 +1634,15 @@ _array_fromobject(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws)
}
}
- /* copy=False with default dtype, order and ndim */
- if (STRIDING_OK(oparr, order)) {
- ret = oparr;
- Py_INCREF(ret);
- goto finish;
- }
+ /* copy=False with default dtype, order (any is OK) and ndim */
+ ret = oparr;
+ Py_INCREF(ret);
+ goto finish;
}
}
full_path:
- if(!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|O&O&O&O&i:array", kwd,
&op,
PyArray_DescrConverter2, &type,
PyArray_BoolConverter, &copy,
@@ -1746,7 +1740,7 @@ static PyObject *
array_copyto(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"dst","src","casting","where",NULL};
+ static char *kwlist[] = {"dst", "src", "casting", "where", NULL};
PyObject *wheremask_in = NULL;
PyArrayObject *dst = NULL, *src = NULL, *wheremask = NULL;
NPY_CASTING casting = NPY_SAME_KIND_CASTING;
@@ -1791,7 +1785,7 @@ static PyObject *
array_empty(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape","dtype","order",NULL};
+ static char *kwlist[] = {"shape", "dtype", "order", NULL};
PyArray_Descr *typecode = NULL;
PyArray_Dims shape = {NULL, 0};
NPY_ORDER order = NPY_CORDER;
@@ -1834,23 +1828,28 @@ static PyObject *
array_empty_like(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"prototype","dtype","order","subok",NULL};
+ static char *kwlist[] = {"prototype", "dtype", "order", "subok", "shape", NULL};
PyArrayObject *prototype = NULL;
PyArray_Descr *dtype = NULL;
NPY_ORDER order = NPY_KEEPORDER;
PyArrayObject *ret = NULL;
int subok = 1;
+ PyArray_Dims shape = {NULL, 0};
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&i:empty_like", kwlist,
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O&|O&O&iO&:empty_like", kwlist,
&PyArray_Converter, &prototype,
&PyArray_DescrConverter2, &dtype,
&PyArray_OrderConverter, &order,
- &subok)) {
+ &subok,
+ &PyArray_IntpConverter, &shape)) {
goto fail;
}
/* steals the reference to dtype if it's not NULL */
- ret = (PyArrayObject *)PyArray_NewLikeArray(prototype,
- order, dtype, subok);
+ ret = (PyArrayObject *)PyArray_NewLikeArrayWithShape(prototype, order, dtype,
+ shape.len, shape.ptr, subok);
+ if (!ret) {
+ goto fail;
+ }
Py_DECREF(prototype);
return (PyObject *)ret;
@@ -1869,7 +1868,7 @@ static PyObject *
array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"dtype","obj", NULL};
+ static char *kwlist[] = {"dtype", "obj", NULL};
PyArray_Descr *typecode;
PyObject *obj = NULL, *tmpobj = NULL;
int alloc = 0;
@@ -1945,7 +1944,7 @@ array_scalar(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
static PyObject *
array_zeros(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kwds)
{
- static char *kwlist[] = {"shape","dtype","order",NULL};
+ static char *kwlist[] = {"shape", "dtype", "order", NULL};
PyArray_Descr *typecode = NULL;
PyArray_Dims shape = {NULL, 0};
NPY_ORDER order = NPY_CORDER;
@@ -2043,54 +2042,81 @@ array_fromstring(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds
static PyObject *
array_fromfile(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *keywds)
{
- PyObject *file = NULL, *ret;
+ PyObject *file = NULL, *ret = NULL;
+ PyObject *err_type = NULL, *err_value = NULL, *err_traceback = NULL;
char *sep = "";
Py_ssize_t nin = -1;
- static char *kwlist[] = {"file", "dtype", "count", "sep", NULL};
+ static char *kwlist[] = {"file", "dtype", "count", "sep", "offset", NULL};
PyArray_Descr *type = NULL;
int own;
- npy_off_t orig_pos = 0;
+ npy_off_t orig_pos = 0, offset = 0;
FILE *fp;
if (!PyArg_ParseTupleAndKeywords(args, keywds,
- "O|O&" NPY_SSIZE_T_PYFMT "s:fromfile", kwlist,
- &file, PyArray_DescrConverter, &type, &nin, &sep)) {
+ "O|O&" NPY_SSIZE_T_PYFMT "s" NPY_OFF_T_PYFMT ":fromfile", kwlist,
+ &file, PyArray_DescrConverter, &type, &nin, &sep, &offset)) {
+ Py_XDECREF(type);
+ return NULL;
+ }
+
+ file = NpyPath_PathlikeToFspath(file);
+ if (file == NULL) {
+ return NULL;
+ }
+
+ if (offset != 0 && strcmp(sep, "") != 0) {
+ PyErr_SetString(PyExc_TypeError, "'offset' argument only permitted for binary files");
Py_XDECREF(type);
+ Py_DECREF(file);
return NULL;
}
if (PyString_Check(file) || PyUnicode_Check(file)) {
- file = npy_PyFile_OpenFile(file, "rb");
+ Py_SETREF(file, npy_PyFile_OpenFile(file, "rb"));
if (file == NULL) {
+ Py_XDECREF(type);
return NULL;
}
own = 1;
}
else {
- Py_INCREF(file);
own = 0;
}
fp = npy_PyFile_Dup2(file, "rb", &orig_pos);
if (fp == NULL) {
Py_DECREF(file);
+ Py_XDECREF(type);
return NULL;
}
+ if (npy_fseek(fp, offset, SEEK_CUR) != 0) {
+ PyErr_SetFromErrno(PyExc_IOError);
+ goto cleanup;
+ }
if (type == NULL) {
type = PyArray_DescrFromType(NPY_DEFAULT_TYPE);
}
ret = PyArray_FromFile(fp, type, (npy_intp) nin, sep);
+ /* If an exception is thrown in the call to PyArray_FromFile
+ * we need to clear it, and restore it later to ensure that
+ * we can cleanup the duplicated file descriptor properly.
+ */
+cleanup:
+ PyErr_Fetch(&err_type, &err_value, &err_traceback);
if (npy_PyFile_DupClose2(file, fp, orig_pos) < 0) {
+ npy_PyErr_ChainExceptions(err_type, err_value, err_traceback);
goto fail;
}
if (own && npy_PyFile_CloseFile(file) < 0) {
+ npy_PyErr_ChainExceptions(err_type, err_value, err_traceback);
goto fail;
}
+ PyErr_Restore(err_type, err_value, err_traceback);
Py_DECREF(file);
return ret;
fail:
Py_DECREF(file);
- Py_DECREF(ret);
+ Py_XDECREF(ret);
return NULL;
}
@@ -2303,154 +2329,6 @@ fail:
return NULL;
}
-
-
-/*
- * matmul
- *
- * Implements the protocol used by the '@' operator defined in PEP 364.
- * Not in the NUMPY API at this time, maybe later.
- *
- *
- * in1: Left hand side operand
- * in2: Right hand side operand
- * out: Either NULL, or an array into which the output should be placed.
- *
- * Returns NULL on error.
- */
-static PyObject *
-array_matmul(PyObject *NPY_UNUSED(m), PyObject *args, PyObject* kwds)
-{
- PyObject *in1, *in2, *out = NULL;
- char* kwlist[] = {"a", "b", "out", NULL };
- PyArrayObject *ap1, *ap2, *ret = NULL;
- NPY_ORDER order = NPY_KEEPORDER;
- NPY_CASTING casting = NPY_SAFE_CASTING;
- PyArray_Descr *dtype;
- int nd1, nd2, typenum;
- char *subscripts;
- PyArrayObject *ops[2];
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O:matmul", kwlist,
- &in1, &in2, &out)) {
- return NULL;
- }
-
- if (out != NULL) {
- if (out == Py_None) {
- out = NULL;
- }
- else if (!PyArray_Check(out)) {
- PyErr_SetString(PyExc_TypeError, "'out' must be an array");
- return NULL;
- }
- }
-
- dtype = PyArray_DescrFromObject(in1, NULL);
- dtype = PyArray_DescrFromObject(in2, dtype);
- if (dtype == NULL) {
- PyErr_SetString(PyExc_ValueError, "Cannot find a common data type.");
- return NULL;
- }
- typenum = dtype->type_num;
-
- if (typenum == NPY_OBJECT) {
- /* matmul is not currently implemented for object arrays */
- PyErr_SetString(PyExc_TypeError,
- "Object arrays are not currently supported");
- Py_DECREF(dtype);
- return NULL;
- }
-
- ap1 = (PyArrayObject *)PyArray_FromAny(in1, dtype, 0, 0,
- NPY_ARRAY_ALIGNED, NULL);
- if (ap1 == NULL) {
- return NULL;
- }
-
- Py_INCREF(dtype);
- ap2 = (PyArrayObject *)PyArray_FromAny(in2, dtype, 0, 0,
- NPY_ARRAY_ALIGNED, NULL);
- if (ap2 == NULL) {
- Py_DECREF(ap1);
- return NULL;
- }
-
- if (PyArray_NDIM(ap1) == 0 || PyArray_NDIM(ap2) == 0) {
- /* Scalars are rejected */
- PyErr_SetString(PyExc_ValueError,
- "Scalar operands are not allowed, use '*' instead");
- return NULL;
- }
-
- nd1 = PyArray_NDIM(ap1);
- nd2 = PyArray_NDIM(ap2);
-
-#if defined(HAVE_CBLAS)
- if (nd1 <= 2 && nd2 <= 2 &&
- (NPY_DOUBLE == typenum || NPY_CDOUBLE == typenum ||
- NPY_FLOAT == typenum || NPY_CFLOAT == typenum)) {
- return cblas_matrixproduct(typenum, ap1, ap2, (PyArrayObject *)out);
- }
-#endif
-
- /*
- * Use einsum for the stacked cases. This is a quick implementation
- * to avoid setting up the proper iterators. Einsum broadcasts, so
- * we need to check dimensions before the call.
- */
- if (nd1 == 1 && nd2 == 1) {
- /* vector vector */
- if (PyArray_DIM(ap1, 0) != PyArray_DIM(ap2, 0)) {
- dot_alignment_error(ap1, 0, ap2, 0);
- goto fail;
- }
- subscripts = "i, i";
- }
- else if (nd1 == 1) {
- /* vector matrix */
- if (PyArray_DIM(ap1, 0) != PyArray_DIM(ap2, nd2 - 2)) {
- dot_alignment_error(ap1, 0, ap2, nd2 - 2);
- goto fail;
- }
- subscripts = "i, ...ij";
- }
- else if (nd2 == 1) {
- /* matrix vector */
- if (PyArray_DIM(ap1, nd1 - 1) != PyArray_DIM(ap2, 0)) {
- dot_alignment_error(ap1, nd1 - 1, ap2, 0);
- goto fail;
- }
- subscripts = "...i, i";
- }
- else {
- /* matrix * matrix */
- if (PyArray_DIM(ap1, nd1 - 1) != PyArray_DIM(ap2, nd2 - 2)) {
- dot_alignment_error(ap1, nd1 - 1, ap2, nd2 - 2);
- goto fail;
- }
- subscripts = "...ij, ...jk";
- }
- ops[0] = ap1;
- ops[1] = ap2;
- ret = PyArray_EinsteinSum(subscripts, 2, ops, NULL, order, casting,
- (PyArrayObject *)out);
- Py_DECREF(ap1);
- Py_DECREF(ap2);
-
- /* If no output was supplied, possibly convert to a scalar */
- if (ret != NULL && out == NULL) {
- return PyArray_Return((PyArrayObject *)ret);
- }
- return (PyObject *)ret;
-
-fail:
- Py_XDECREF(ap1);
- Py_XDECREF(ap2);
- return NULL;
-}
-
-
static int
einsum_sub_op_from_str(PyObject *args, PyObject **str_obj, char **subscripts,
PyArrayObject **op)
@@ -2622,7 +2500,7 @@ einsum_sub_op_from_lists(PyObject *args,
"operand and a subscripts list to einsum");
return -1;
}
- else if(nop >= NPY_MAXARGS) {
+ else if (nop >= NPY_MAXARGS) {
PyErr_SetString(PyExc_ValueError, "too many operands");
return -1;
}
@@ -2857,7 +2735,7 @@ array_arange(PyObject *NPY_UNUSED(ignored), PyObject *args, PyObject *kws) {
static char *kwd[]= {"start", "stop", "step", "dtype", NULL};
PyArray_Descr *typecode = NULL;
- if(!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd,
+ if (!PyArg_ParseTupleAndKeywords(args, kws, "O|OOO&:arange", kwd,
&o_start,
&o_stop,
&o_step,
@@ -2895,7 +2773,7 @@ array__get_ndarray_c_version(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObje
{
static char *kwlist[] = {NULL};
- if(!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "", kwlist )) {
return NULL;
}
return PyInt_FromLong( (long) PyArray_GetNDArrayCVersion() );
@@ -2968,7 +2846,7 @@ array_set_string_function(PyObject *NPY_UNUSED(self), PyObject *args,
int repr = 1;
static char *kwlist[] = {"f", "repr", NULL};
- if(!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "|Oi:set_string_function", kwlist, &op, &repr)) {
return NULL;
}
/* reset the array_repr function to built-in */
@@ -2990,7 +2868,7 @@ array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args),
{
PyObject *oldops = NULL;
- if ((oldops = PyArray_GetNumericOps()) == NULL) {
+ if ((oldops = _PyArray_GetNumericOps()) == NULL) {
return NULL;
}
/*
@@ -3000,8 +2878,10 @@ array_set_ops_function(PyObject *NPY_UNUSED(self), PyObject *NPY_UNUSED(args),
*/
if (kwds && PyArray_SetNumericOps(kwds) == -1) {
Py_DECREF(oldops);
- PyErr_SetString(PyExc_ValueError,
+ if (PyErr_Occurred() == NULL) {
+ PyErr_SetString(PyExc_ValueError,
"one or more objects not callable");
+ }
return NULL;
}
return oldops;
@@ -3276,7 +3156,7 @@ array_promote_types(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyArray_Descr *d1 = NULL;
PyArray_Descr *d2 = NULL;
PyObject *ret = NULL;
- if(!PyArg_ParseTuple(args, "O&O&:promote_types",
+ if (!PyArg_ParseTuple(args, "O&O&:promote_types",
PyArray_DescrConverter2, &d1, PyArray_DescrConverter2, &d2)) {
goto finish;
}
@@ -3302,7 +3182,7 @@ array_min_scalar_type(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyArrayObject *array;
PyObject *ret = NULL;
- if(!PyArg_ParseTuple(args, "O:min_scalar_type", &array_in)) {
+ if (!PyArg_ParseTuple(args, "O:min_scalar_type", &array_in)) {
return NULL;
}
@@ -3379,12 +3259,13 @@ array_datetime_data(PyObject *NPY_UNUSED(dummy), PyObject *args)
PyArray_Descr *dtype;
PyArray_DatetimeMetaData *meta;
- if(!PyArg_ParseTuple(args, "O&:datetime_data",
+ if (!PyArg_ParseTuple(args, "O&:datetime_data",
PyArray_DescrConverter, &dtype)) {
return NULL;
}
meta = get_datetime_metadata_from_dtype(dtype);
+ Py_DECREF(dtype);
if (meta == NULL) {
return NULL;
}
@@ -3398,7 +3279,7 @@ new_buffer(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
int size;
- if(!PyArg_ParseTuple(args, "i:buffer", &size)) {
+ if (!PyArg_ParseTuple(args, "i:buffer", &size)) {
return NULL;
}
return PyBuffer_New(size);
@@ -3749,6 +3630,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
if (nargs == -1 || nargs > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"len(args) must be < %d", NPY_MAXARGS - 1);
+ Py_DECREF(type);
goto err;
}
@@ -3756,6 +3638,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
for (i = 1; i < nargs; i++) {
PyObject* item = PySequence_GetItem(args, i-1);
if (item == NULL) {
+ Py_DECREF(type);
goto err;
}
broadcast_args[i] = item;
@@ -3764,6 +3647,7 @@ _vec_string_with_args(PyArrayObject* char_array, PyArray_Descr* type,
in_iter = (PyArrayMultiIterObject*)PyArray_MultiIterFromObjects
(broadcast_args, nargs, 0);
if (in_iter == NULL) {
+ Py_DECREF(type);
goto err;
}
n = in_iter->numiter;
@@ -3844,6 +3728,7 @@ _vec_string_no_args(PyArrayObject* char_array,
in_iter = (PyArrayIterObject*)PyArray_IterNew((PyObject*)char_array);
if (in_iter == NULL) {
+ Py_DECREF(type);
goto err;
}
@@ -3897,10 +3782,10 @@ _vec_string_no_args(PyArrayObject* char_array,
}
static PyObject *
-_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
+_vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUSED(kwds))
{
PyArrayObject* char_array = NULL;
- PyArray_Descr *type = NULL;
+ PyArray_Descr *type;
PyObject* method_name;
PyObject* args_seq = NULL;
@@ -3923,9 +3808,11 @@ _vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
else {
PyErr_SetString(PyExc_TypeError,
"string operation on non-string array");
+ Py_DECREF(type);
goto err;
}
if (method == NULL) {
+ Py_DECREF(type);
goto err;
}
@@ -3937,6 +3824,7 @@ _vec_string(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *kwds)
result = _vec_string_with_args(char_array, type, method, args_seq);
}
else {
+ Py_DECREF(type);
PyErr_SetString(PyExc_TypeError,
"'args' must be a sequence of arguments");
goto err;
@@ -4134,7 +4022,7 @@ array_shares_memory_impl(PyObject *args, PyObject *kwds, Py_ssize_t default_max_
}
else if (result == MEM_OVERLAP_TOO_HARD) {
if (raise_exceptions) {
- npy_cache_import("numpy.core._internal", "TooHardError",
+ npy_cache_import("numpy.core._exceptions", "TooHardError",
&too_hard_cls);
if (too_hard_cls) {
PyErr_SetString(too_hard_cls, "Exceeded max_work");
@@ -4193,6 +4081,9 @@ normalize_axis_index(PyObject *NPY_UNUSED(self), PyObject *args, PyObject *kwds)
}
static struct PyMethodDef array_module_methods[] = {
+ {"_get_implementing_args",
+ (PyCFunction)array__get_implementing_args,
+ METH_VARARGS, NULL},
{"_get_ndarray_c_version",
(PyCFunction)array__get_ndarray_c_version,
METH_VARARGS|METH_KEYWORDS, NULL},
@@ -4265,9 +4156,6 @@ static struct PyMethodDef array_module_methods[] = {
{"vdot",
(PyCFunction)array_vdot,
METH_VARARGS | METH_KEYWORDS, NULL},
- {"matmul",
- (PyCFunction)array_matmul,
- METH_VARARGS | METH_KEYWORDS, NULL},
{"c_einsum",
(PyCFunction)array_einsum,
METH_VARARGS|METH_KEYWORDS, NULL},
@@ -4358,6 +4246,9 @@ static struct PyMethodDef array_module_methods[] = {
METH_VARARGS | METH_KEYWORDS, NULL},
{"_monotonicity", (PyCFunction)arr__monotonicity,
METH_VARARGS | METH_KEYWORDS, NULL},
+ {"implement_array_function",
+ (PyCFunction)array_implement_array_function,
+ METH_VARARGS, NULL},
{"interp", (PyCFunction)arr_interp,
METH_VARARGS | METH_KEYWORDS, NULL},
{"interp_complex", (PyCFunction)arr_interp_complex,
@@ -4608,8 +4499,8 @@ NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_prepare = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_wrap = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_array_finalize = NULL;
-NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_buffer = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_ufunc = NULL;
+NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_implementation = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_order = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_copy = NULL;
NPY_VISIBILITY_HIDDEN PyObject * npy_ma_str_dtype = NULL;
@@ -4624,8 +4515,8 @@ intern_strings(void)
npy_ma_str_array_prepare = PyUString_InternFromString("__array_prepare__");
npy_ma_str_array_wrap = PyUString_InternFromString("__array_wrap__");
npy_ma_str_array_finalize = PyUString_InternFromString("__array_finalize__");
- npy_ma_str_buffer = PyUString_InternFromString("__buffer__");
npy_ma_str_ufunc = PyUString_InternFromString("__array_ufunc__");
+ npy_ma_str_implementation = PyUString_InternFromString("_implementation");
npy_ma_str_order = PyUString_InternFromString("order");
npy_ma_str_copy = PyUString_InternFromString("copy");
npy_ma_str_dtype = PyUString_InternFromString("dtype");
@@ -4635,12 +4526,11 @@ intern_strings(void)
return npy_ma_str_array && npy_ma_str_array_prepare &&
npy_ma_str_array_wrap && npy_ma_str_array_finalize &&
- npy_ma_str_buffer && npy_ma_str_ufunc &&
+ npy_ma_str_ufunc && npy_ma_str_implementation &&
npy_ma_str_order && npy_ma_str_copy && npy_ma_str_dtype &&
npy_ma_str_ndmin && npy_ma_str_axis1 && npy_ma_str_axis2;
}
-
#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
@@ -4705,15 +4595,23 @@ PyMODINIT_FUNC init_multiarray_umath(void) {
*/
PyArray_Type.tp_hash = PyObject_HashNotImplemented;
+ if (PyType_Ready(&PyUFunc_Type) < 0) {
+ goto err;
+ }
+
/* Load the ufunc operators into the array module's namespace */
if (InitOperators(d) < 0) {
goto err;
}
+ if (set_matmul_flags(d) < 0) {
+ goto err;
+ }
initialize_casting_tables();
initialize_numeric_types();
- if(initscalarmath(m) < 0)
+ if (initscalarmath(m) < 0) {
goto err;
+ }
if (PyType_Ready(&PyArray_Type) < 0) {
goto err;
@@ -4834,11 +4732,9 @@ PyMODINIT_FUNC init_multiarray_umath(void) {
set_flaginfo(d);
/* Create the typeinfo types */
- typeinfo_init_structsequences();
- PyDict_SetItemString(d,
- "typeinfo", (PyObject *)&PyArray_typeinfoType);
- PyDict_SetItemString(d,
- "typeinforanged", (PyObject *)&PyArray_typeinforangedType);
+ if (typeinfo_init_structsequences(d) < 0) {
+ goto err;
+ }
if (!intern_strings()) {
goto err;
diff --git a/numpy/core/src/multiarray/multiarraymodule.h b/numpy/core/src/multiarray/multiarraymodule.h
index 3de68c549..dd437e091 100644
--- a/numpy/core/src/multiarray/multiarraymodule.h
+++ b/numpy/core/src/multiarray/multiarraymodule.h
@@ -5,8 +5,8 @@ NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_prepare;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_wrap;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_array_finalize;
-NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_buffer;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_ufunc;
+NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_implementation;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_order;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_copy;
NPY_VISIBILITY_HIDDEN extern PyObject * npy_ma_str_dtype;
diff --git a/numpy/core/src/multiarray/nditer_api.c b/numpy/core/src/multiarray/nditer_api.c
index 7a33ac05e..db0bfcece 100644
--- a/numpy/core/src/multiarray/nditer_api.c
+++ b/numpy/core/src/multiarray/nditer_api.c
@@ -406,7 +406,7 @@ NpyIter_ResetToIterIndexRange(NpyIter *iter,
* Returns NPY_SUCCEED on success, NPY_FAIL on failure.
*/
NPY_NO_EXPORT int
-NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp *multi_index)
+NpyIter_GotoMultiIndex(NpyIter *iter, npy_intp const *multi_index)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -1628,15 +1628,12 @@ npyiter_coalesce_axes(NpyIter *iter)
npy_intp istrides, nstrides = NAD_NSTRIDES();
NpyIter_AxisData *axisdata = NIT_AXISDATA(iter);
npy_intp sizeof_axisdata = NIT_AXISDATA_SIZEOF(itflags, ndim, nop);
- NpyIter_AxisData *ad_compress;
+ NpyIter_AxisData *ad_compress = axisdata;
npy_intp new_ndim = 1;
/* The HASMULTIINDEX or IDENTPERM flags do not apply after coalescing */
NIT_ITFLAGS(iter) &= ~(NPY_ITFLAG_IDENTPERM|NPY_ITFLAG_HASMULTIINDEX);
- axisdata = NIT_AXISDATA(iter);
- ad_compress = axisdata;
-
for (idim = 0; idim < ndim-1; ++idim) {
int can_coalesce = 1;
npy_intp shape0 = NAD_SHAPE(ad_compress);
diff --git a/numpy/core/src/multiarray/nditer_constr.c b/numpy/core/src/multiarray/nditer_constr.c
index dbb24f26b..d40836dc2 100644
--- a/numpy/core/src/multiarray/nditer_constr.c
+++ b/numpy/core/src/multiarray/nditer_constr.c
@@ -24,7 +24,7 @@ static int
npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags);
static int
npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
- npy_intp *itershape);
+ const npy_intp *itershape);
static int
npyiter_calculate_ndim(int nop, PyArrayObject **op_in,
int oa_ndim);
@@ -55,7 +55,7 @@ npyiter_check_casting(int nop, PyArrayObject **op,
static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
- npy_uint32 *op_flags, int **op_axes,
+ const npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape);
static void
npyiter_replace_axisdata(NpyIter *iter, int iop,
@@ -74,23 +74,23 @@ static void
npyiter_find_best_axis_ordering(NpyIter *iter);
static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
+ const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs);
static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
- PyArray_Descr *op_dtype, int *op_axes);
+ PyArray_Descr *op_dtype, const int *op_axes);
static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
+ const npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes);
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags,
+ const npyiter_opitflags *op_itflags,
double *subtype_priority, PyTypeObject **subtype);
static int
npyiter_allocate_transfer_functions(NpyIter *iter);
@@ -787,7 +787,7 @@ npyiter_check_global_flags(npy_uint32 flags, npy_uint32* itflags)
static int
npyiter_check_op_axes(int nop, int oa_ndim, int **op_axes,
- npy_intp *itershape)
+ const npy_intp *itershape)
{
char axes_dupcheck[NPY_MAXDIMS];
int iop, idim;
@@ -1101,8 +1101,8 @@ npyiter_prepare_one_operand(PyArrayObject **op,
/* We just have a borrowed reference to op_request_dtype */
Py_INCREF(op_request_dtype);
/* If the requested dtype is flexible, adapt it */
- PyArray_AdaptFlexibleDType((PyObject *)(*op), PyArray_DESCR(*op),
- &op_request_dtype);
+ op_request_dtype = PyArray_AdaptFlexibleDType((PyObject *)(*op), PyArray_DESCR(*op),
+ op_request_dtype);
if (op_request_dtype == NULL) {
return 0;
}
@@ -1132,7 +1132,7 @@ npyiter_prepare_one_operand(PyArrayObject **op,
/* Check if the operand is aligned */
if (op_flags & NPY_ITER_ALIGNED) {
/* Check alignment */
- if (!IsUintAligned(*op)) {
+ if (!IsAligned(*op)) {
NPY_IT_DBG_PRINT("Iterator: Setting NPY_OP_ITFLAG_CAST "
"because of NPY_ITER_ALIGNED\n");
*op_itflags |= NPY_OP_ITFLAG_CAST;
@@ -1248,9 +1248,9 @@ npyiter_prepare_operands(int nop, PyArrayObject **op_in,
return 1;
fail_nop:
- iop = nop;
+ iop = nop - 1;
fail_iop:
- for (i = 0; i < iop; ++i) {
+ for (i = 0; i < iop+1; ++i) {
Py_XDECREF(op[i]);
Py_XDECREF(op_dtype[i]);
}
@@ -1423,7 +1423,7 @@ check_mask_for_writemasked_reduction(NpyIter *iter, int iop)
static int
npyiter_fill_axisdata(NpyIter *iter, npy_uint32 flags, npyiter_opitflags *op_itflags,
char **op_dataptr,
- npy_uint32 *op_flags, int **op_axes,
+ const npy_uint32 *op_flags, int **op_axes,
npy_intp *itershape)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -2120,8 +2120,8 @@ npyiter_apply_forced_iteration_order(NpyIter *iter, NPY_ORDER order)
/* Check that all the array inputs are fortran order */
for (iop = 0; iop < nop; ++iop, ++op) {
if (*op && !PyArray_CHKFLAGS(*op, NPY_ARRAY_F_CONTIGUOUS)) {
- forder = 0;
- break;
+ forder = 0;
+ break;
}
}
@@ -2409,7 +2409,7 @@ npyiter_find_best_axis_ordering(NpyIter *iter)
*/
static PyArray_Descr *
npyiter_get_common_dtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
+ const npyiter_opitflags *op_itflags, PyArray_Descr **op_dtype,
PyArray_Descr **op_request_dtypes,
int only_inputs)
{
@@ -2477,7 +2477,7 @@ static PyArrayObject *
npyiter_new_temp_array(NpyIter *iter, PyTypeObject *subtype,
npy_uint32 flags, npyiter_opitflags *op_itflags,
int op_ndim, npy_intp *shape,
- PyArray_Descr *op_dtype, int *op_axes)
+ PyArray_Descr *op_dtype, const int *op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
int idim, ndim = NIT_NDIM(iter);
@@ -2706,7 +2706,7 @@ static int
npyiter_allocate_arrays(NpyIter *iter,
npy_uint32 flags,
PyArray_Descr **op_dtype, PyTypeObject *subtype,
- npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
+ const npy_uint32 *op_flags, npyiter_opitflags *op_itflags,
int **op_axes)
{
npy_uint32 itflags = NIT_ITFLAGS(iter);
@@ -2851,8 +2851,14 @@ npyiter_allocate_arrays(NpyIter *iter,
npyiter_replace_axisdata(iter, iop, op[iop], ondim,
PyArray_DATA(op[iop]), op_axes ? op_axes[iop] : NULL);
- /* New arrays are aligned and need no cast */
- op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ /*
+ * New arrays are guaranteed true-aligned, but copy/cast code
+ * needs uint-alignment in addition.
+ */
+ if (IsUintAligned(out)) {
+ op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ }
+ /* New arrays need no cast */
op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST;
}
/*
@@ -2888,11 +2894,17 @@ npyiter_allocate_arrays(NpyIter *iter,
PyArray_DATA(op[iop]), NULL);
/*
- * New arrays are aligned need no cast, and in the case
+ * New arrays are guaranteed true-aligned, but copy/cast code
+ * needs uint-alignment in addition.
+ */
+ if (IsUintAligned(temp)) {
+ op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ }
+ /*
+ * New arrays need no cast, and in the case
* of scalars, always have stride 0 so never need buffering
*/
- op_itflags[iop] |= (NPY_OP_ITFLAG_ALIGNED |
- NPY_OP_ITFLAG_BUFNEVER);
+ op_itflags[iop] |= NPY_OP_ITFLAG_BUFNEVER;
op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST;
if (itflags & NPY_ITFLAG_BUFFER) {
NBF_STRIDES(bufferdata)[iop] = 0;
@@ -2953,8 +2965,14 @@ npyiter_allocate_arrays(NpyIter *iter,
npyiter_replace_axisdata(iter, iop, op[iop], ondim,
PyArray_DATA(op[iop]), op_axes ? op_axes[iop] : NULL);
- /* The temporary copy is aligned and needs no cast */
- op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ /*
+ * New arrays are guaranteed true-aligned, but copy/cast code
+ * additionally needs uint-alignment in addition.
+ */
+ if (IsUintAligned(temp)) {
+ op_itflags[iop] |= NPY_OP_ITFLAG_ALIGNED;
+ }
+ /* The temporary copy needs no cast */
op_itflags[iop] &= ~NPY_OP_ITFLAG_CAST;
}
else {
@@ -3091,7 +3109,7 @@ npyiter_allocate_arrays(NpyIter *iter,
*/
static void
npyiter_get_priority_subtype(int nop, PyArrayObject **op,
- npyiter_opitflags *op_itflags,
+ const npyiter_opitflags *op_itflags,
double *subtype_priority,
PyTypeObject **subtype)
{
@@ -3157,6 +3175,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
&stransfer,
&transferdata,
&needs_api) != NPY_SUCCEED) {
+ iop -= 1; /* This one cannot be cleaned up yet. */
goto fail;
}
readtransferfn[iop] = stransfer;
@@ -3250,7 +3269,7 @@ npyiter_allocate_transfer_functions(NpyIter *iter)
return 1;
fail:
- for (i = 0; i < iop; ++i) {
+ for (i = 0; i < iop+1; ++i) {
if (readtransferdata[iop] != NULL) {
NPY_AUXDATA_FREE(readtransferdata[iop]);
readtransferdata[iop] = NULL;
diff --git a/numpy/core/src/multiarray/nditer_pywrap.c b/numpy/core/src/multiarray/nditer_pywrap.c
index 5a9f3c5fa..4b9d41aa4 100644
--- a/numpy/core/src/multiarray/nditer_pywrap.c
+++ b/numpy/core/src/multiarray/nditer_pywrap.c
@@ -82,7 +82,8 @@ static int npyiter_cache_values(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_new(PyTypeObject *subtype, PyObject *args, PyObject *kwds)
+npyiter_new(PyTypeObject *subtype, PyObject *NPY_UNUSED(args),
+ PyObject *NPY_UNUSED(kwds))
{
NewNpyArrayIterObject *self;
@@ -535,7 +536,7 @@ try_single_dtype:
}
static int
-npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
+npyiter_convert_op_axes(PyObject *op_axes_in, int nop,
int **op_axes, int *oa_ndim)
{
PyObject *a;
@@ -572,6 +573,7 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
if (*oa_ndim > NPY_MAXDIMS) {
PyErr_SetString(PyExc_ValueError,
"Too many dimensions in op_axes");
+ Py_DECREF(a);
return 0;
}
}
@@ -602,8 +604,8 @@ npyiter_convert_op_axes(PyObject *op_axes_in, npy_intp nop,
}
Py_DECREF(v);
}
- Py_DECREF(a);
}
+ Py_DECREF(a);
}
if (*oa_ndim == -1) {
@@ -2355,6 +2357,8 @@ npyiter_close(NewNpyArrayIterObject *self)
}
ret = NpyIter_Deallocate(iter);
self->iter = NULL;
+ Py_XDECREF(self->nested_child);
+ self->nested_child = NULL;
if (ret < 0) {
return NULL;
}
@@ -2362,7 +2366,7 @@ npyiter_close(NewNpyArrayIterObject *self)
}
static PyObject *
-npyiter_exit(NewNpyArrayIterObject *self, PyObject *args)
+npyiter_exit(NewNpyArrayIterObject *self, PyObject *NPY_UNUSED(args))
{
/* even if called via exception handling, writeback any data */
return npyiter_close(self);
diff --git a/numpy/core/src/multiarray/number.c b/numpy/core/src/multiarray/number.c
index dabbae064..dabc866ff 100644
--- a/numpy/core/src/multiarray/number.c
+++ b/numpy/core/src/multiarray/number.c
@@ -71,12 +71,8 @@ array_inplace_power(PyArrayObject *a1, PyObject *o2, PyObject *NPY_UNUSED(modulo
n_ops.op = temp; \
}
-
-/*NUMPY_API
- *Set internal structure with number functions that all arrays will use
- */
NPY_NO_EXPORT int
-PyArray_SetNumericOps(PyObject *dict)
+_PyArray_SetNumericOps(PyObject *dict)
{
PyObject *temp = NULL;
SET(add);
@@ -116,19 +112,33 @@ PyArray_SetNumericOps(PyObject *dict)
SET(minimum);
SET(rint);
SET(conjugate);
+ SET(matmul);
+ SET(clip);
return 0;
}
+/*NUMPY_API
+ *Set internal structure with number functions that all arrays will use
+ */
+NPY_NO_EXPORT int
+PyArray_SetNumericOps(PyObject *dict)
+{
+ /* 2018-09-09, 1.16 */
+ if (DEPRECATE("PyArray_SetNumericOps is deprecated. Use "
+ "PyUFunc_ReplaceLoopBySignature to replace ufunc inner loop functions "
+ "instead.") < 0) {
+ return -1;
+ }
+ return _PyArray_SetNumericOps(dict);
+}
+
/* Note - macro contains goto */
#define GET(op) if (n_ops.op && \
(PyDict_SetItemString(dict, #op, n_ops.op)==-1)) \
goto fail;
-/*NUMPY_API
- Get dictionary showing number functions that all arrays will use
-*/
NPY_NO_EXPORT PyObject *
-PyArray_GetNumericOps(void)
+_PyArray_GetNumericOps(void)
{
PyObject *dict;
if ((dict = PyDict_New())==NULL)
@@ -169,6 +179,8 @@ PyArray_GetNumericOps(void)
GET(minimum);
GET(rint);
GET(conjugate);
+ GET(matmul);
+ GET(clip);
return dict;
fail:
@@ -176,6 +188,19 @@ PyArray_GetNumericOps(void)
return NULL;
}
+/*NUMPY_API
+ Get dictionary showing number functions that all arrays will use
+*/
+NPY_NO_EXPORT PyObject *
+PyArray_GetNumericOps(void)
+{
+ /* 2018-09-09, 1.16 */
+ if (DEPRECATE("PyArray_GetNumericOps is deprecated.") < 0) {
+ return NULL;
+ }
+ return _PyArray_GetNumericOps();
+}
+
static PyObject *
_get_keywords(int rtype, PyArrayObject *out)
{
@@ -361,18 +386,13 @@ array_divmod(PyArrayObject *m1, PyObject *m2)
static PyObject *
array_matrix_multiply(PyArrayObject *m1, PyObject *m2)
{
- static PyObject *matmul = NULL;
-
- npy_cache_import("numpy.core.multiarray", "matmul", &matmul);
- if (matmul == NULL) {
- return NULL;
- }
BINOP_GIVE_UP_IF_NEEDED(m1, m2, nb_matrix_multiply, array_matrix_multiply);
- return PyArray_GenericBinaryFunction(m1, m2, matmul);
+ return PyArray_GenericBinaryFunction(m1, m2, n_ops.matmul);
}
static PyObject *
-array_inplace_matrix_multiply(PyArrayObject *m1, PyObject *m2)
+array_inplace_matrix_multiply(
+ PyArrayObject *NPY_UNUSED(m1), PyObject *NPY_UNUSED(m2))
{
PyErr_SetString(PyExc_TypeError,
"In-place matrix multiplication is not (yet) supported. "
@@ -578,19 +598,20 @@ array_positive(PyArrayObject *m1)
*/
PyObject *exc, *val, *tb;
PyErr_Fetch(&exc, &val, &tb);
- if (has_non_default_array_ufunc((PyObject *)m1)) {
+ if (PyUFunc_HasOverride((PyObject *)m1)) {
PyErr_Restore(exc, val, tb);
return NULL;
}
+ Py_XDECREF(exc);
+ Py_XDECREF(val);
+ Py_XDECREF(tb);
+
/* 2018-06-28, 1.16.0 */
if (DEPRECATE("Applying '+' to a non-numerical array is "
"ill-defined. Returning a copy, but in the future "
"this will error.") < 0) {
return NULL;
}
- Py_XDECREF(exc);
- Py_XDECREF(val);
- Py_XDECREF(tb);
value = PyArray_Return((PyArrayObject *)PyArray_Copy(m1));
}
return value;
diff --git a/numpy/core/src/multiarray/number.h b/numpy/core/src/multiarray/number.h
index 99a2a722b..643241b3d 100644
--- a/numpy/core/src/multiarray/number.h
+++ b/numpy/core/src/multiarray/number.h
@@ -39,6 +39,8 @@ typedef struct {
PyObject *minimum;
PyObject *rint;
PyObject *conjugate;
+ PyObject *matmul;
+ PyObject *clip;
} NumericOps;
extern NPY_NO_EXPORT NumericOps n_ops;
@@ -48,10 +50,10 @@ NPY_NO_EXPORT PyObject *
array_int(PyArrayObject *v);
NPY_NO_EXPORT int
-PyArray_SetNumericOps(PyObject *dict);
+_PyArray_SetNumericOps(PyObject *dict);
NPY_NO_EXPORT PyObject *
-PyArray_GetNumericOps(void);
+_PyArray_GetNumericOps(void);
NPY_NO_EXPORT PyObject *
PyArray_GenericBinaryFunction(PyArrayObject *m1, PyObject *m2, PyObject *op);
diff --git a/numpy/core/src/multiarray/refcount.c b/numpy/core/src/multiarray/refcount.c
index 4b018b056..6033929d9 100644
--- a/numpy/core/src/multiarray/refcount.c
+++ b/numpy/core/src/multiarray/refcount.c
@@ -11,6 +11,7 @@
#define _MULTIARRAYMODULE
#include "numpy/arrayobject.h"
#include "numpy/arrayscalars.h"
+#include "iterators.h"
#include "npy_config.h"
@@ -19,8 +20,12 @@
static void
_fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype);
-/* Incref all objects found at this record */
+
/*NUMPY_API
+ * XINCREF all objects in a single array item. This is complicated for
+ * structured datatypes where the position of objects needs to be extracted.
+ * The function is execute recursively for each nested field or subarrays dtype
+ * such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
*/
NPY_NO_EXPORT void
PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
@@ -51,11 +56,37 @@ PyArray_Item_INCREF(char *data, PyArray_Descr *descr)
PyArray_Item_INCREF(data + offset, new);
}
}
+ else if (PyDataType_HASSUBARRAY(descr)) {
+ int size, i, inner_elsize;
+
+ inner_elsize = descr->subarray->base->elsize;
+ if (inner_elsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ /* Subarrays are always contiguous in memory */
+ size = descr->elsize / inner_elsize;
+
+ for (i = 0; i < size; i++){
+ /* Recursively increment the reference count of subarray elements */
+ PyArray_Item_INCREF(data + i * inner_elsize,
+ descr->subarray->base);
+ }
+ }
+ else {
+ /* This path should not be reachable. */
+ assert(0);
+ }
return;
}
-/* XDECREF all objects found at this record */
+
/*NUMPY_API
+ *
+ * XDECREF all objects in a single array item. This is complicated for
+ * structured datatypes where the position of objects needs to be extracted.
+ * The function is execute recursively for each nested field or subarrays dtype
+ * such as as `np.dtype([("field1", "O"), ("field2", "f,O", (3,2))])`
*/
NPY_NO_EXPORT void
PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
@@ -87,6 +118,27 @@ PyArray_Item_XDECREF(char *data, PyArray_Descr *descr)
PyArray_Item_XDECREF(data + offset, new);
}
}
+ else if (PyDataType_HASSUBARRAY(descr)) {
+ int size, i, inner_elsize;
+
+ inner_elsize = descr->subarray->base->elsize;
+ if (inner_elsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ /* Subarrays are always contiguous in memory */
+ size = descr->elsize / inner_elsize;
+
+ for (i = 0; i < size; i++){
+ /* Recursively decrement the reference count of subarray elements */
+ PyArray_Item_XDECREF(data + i * inner_elsize,
+ descr->subarray->base);
+ }
+ }
+ else {
+ /* This path should not be reachable. */
+ assert(0);
+ }
return;
}
@@ -159,21 +211,22 @@ PyArray_XDECREF(PyArrayObject *mp)
npy_intp i, n;
PyObject **data;
PyObject *temp;
- PyArrayIterObject *it;
+ /*
+ * statically allocating it allows this function to not modify the
+ * reference count of the array for use during dealloc.
+ * (statically is not necessary as such)
+ */
+ PyArrayIterObject it;
if (!PyDataType_REFCHK(PyArray_DESCR(mp))) {
return 0;
}
if (PyArray_DESCR(mp)->type_num != NPY_OBJECT) {
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
- if (it == NULL) {
- return -1;
+ PyArray_RawIterBaseInit(&it, mp);
+ while(it.index < it.size) {
+ PyArray_Item_XDECREF(it.dataptr, PyArray_DESCR(mp));
+ PyArray_ITER_NEXT(&it);
}
- while(it->index < it->size) {
- PyArray_Item_XDECREF(it->dataptr, PyArray_DESCR(mp));
- PyArray_ITER_NEXT(it);
- }
- Py_DECREF(it);
return 0;
}
@@ -191,16 +244,12 @@ PyArray_XDECREF(PyArrayObject *mp)
}
}
else { /* handles misaligned data too */
- it = (PyArrayIterObject *)PyArray_IterNew((PyObject *)mp);
- if (it == NULL) {
- return -1;
- }
- while(it->index < it->size) {
- NPY_COPY_PYOBJECT_PTR(&temp, it->dataptr);
+ PyArray_RawIterBaseInit(&it, mp);
+ while(it.index < it.size) {
+ NPY_COPY_PYOBJECT_PTR(&temp, it.dataptr);
Py_XDECREF(temp);
- PyArray_ITER_NEXT(it);
+ PyArray_ITER_NEXT(&it);
}
- Py_DECREF(it);
}
return 0;
}
@@ -258,6 +307,10 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
Py_XDECREF(arr);
}
}
+ if (dtype->type_num == NPY_OBJECT) {
+ Py_XINCREF(obj);
+ NPY_COPY_PYOBJECT_PTR(optr, &obj);
+ }
else if (PyDataType_HASFIELDS(dtype)) {
PyObject *key, *value, *title = NULL;
PyArray_Descr *new;
@@ -274,15 +327,26 @@ _fillobject(char *optr, PyObject *obj, PyArray_Descr *dtype)
_fillobject(optr + offset, obj, new);
}
}
- else {
- npy_intp i;
- npy_intp nsize = dtype->elsize / sizeof(obj);
+ else if (PyDataType_HASSUBARRAY(dtype)) {
+ int size, i, inner_elsize;
- for (i = 0; i < nsize; i++) {
- Py_XINCREF(obj);
- NPY_COPY_PYOBJECT_PTR(optr, &obj);
- optr += sizeof(obj);
+ inner_elsize = dtype->subarray->base->elsize;
+ if (inner_elsize == 0) {
+ /* There cannot be any elements, so return */
+ return;
+ }
+ /* Subarrays are always contiguous in memory */
+ size = dtype->elsize / inner_elsize;
+
+ /* Call _fillobject on each item recursively. */
+ for (i = 0; i < size; i++){
+ _fillobject(optr, obj, dtype->subarray->base);
+ optr += inner_elsize;
}
- return;
}
+ else {
+ /* This path should not be reachable. */
+ assert(0);
+ }
+ return;
}
diff --git a/numpy/core/src/multiarray/scalarapi.c b/numpy/core/src/multiarray/scalarapi.c
index 5ef6c0bbf..b669a3e76 100644
--- a/numpy/core/src/multiarray/scalarapi.c
+++ b/numpy/core/src/multiarray/scalarapi.c
@@ -471,12 +471,18 @@ PyArray_DescrFromTypeObject(PyObject *type)
/* Do special thing for VOID sub-types */
if (PyType_IsSubtype((PyTypeObject *)type, &PyVoidArrType_Type)) {
new = PyArray_DescrNewFromType(NPY_VOID);
- conv = _arraydescr_fromobj(type);
- if (conv) {
+ if (new == NULL) {
+ return NULL;
+ }
+ if (_arraydescr_from_dtype_attr(type, &conv)) {
+ if (conv == NULL) {
+ Py_DECREF(new);
+ return NULL;
+ }
new->fields = conv->fields;
- Py_INCREF(new->fields);
+ Py_XINCREF(new->fields);
new->names = conv->names;
- Py_INCREF(new->names);
+ Py_XINCREF(new->names);
new->elsize = conv->elsize;
new->subarray = conv->subarray;
conv->subarray = NULL;
@@ -802,6 +808,9 @@ PyArray_Scalar(void *data, PyArray_Descr *descr, PyObject *base)
return obj;
}
}
+ if (itemsize == 0) {
+ return obj;
+ }
destptr = PyDataMem_NEW(itemsize);
if (destptr == NULL) {
Py_DECREF(obj);
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 6dd8b1a29..9adca6773 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -139,6 +139,7 @@ gentype_alloc(PyTypeObject *type, Py_ssize_t nitems)
static void
gentype_dealloc(PyObject *v)
{
+ _dealloc_cached_buffer_info(v);
Py_TYPE(v)->tp_free(v);
}
@@ -1103,8 +1104,7 @@ static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_add, /*nb_add*/
(binaryfunc)gentype_subtract, /*nb_subtract*/
(binaryfunc)gentype_multiply, /*nb_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(binaryfunc)gentype_divide, /*nb_divide*/
#endif
(binaryfunc)gentype_remainder, /*nb_remainder*/
@@ -1120,8 +1120,7 @@ static PyNumberMethods gentype_as_number = {
(binaryfunc)gentype_and, /*nb_and*/
(binaryfunc)gentype_xor, /*nb_xor*/
(binaryfunc)gentype_or, /*nb_or*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*nb_coerce*/
#endif
(unaryfunc)gentype_int, /*nb_int*/
@@ -1131,16 +1130,14 @@ static PyNumberMethods gentype_as_number = {
(unaryfunc)gentype_long, /*nb_long*/
#endif
(unaryfunc)gentype_float, /*nb_float*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(unaryfunc)gentype_oct, /*nb_oct*/
(unaryfunc)gentype_hex, /*nb_hex*/
#endif
0, /*inplace_add*/
0, /*inplace_subtract*/
0, /*inplace_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*inplace_divide*/
#endif
0, /*inplace_remainder*/
@@ -1155,6 +1152,10 @@ static PyNumberMethods gentype_as_number = {
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
(unaryfunc)NULL, /*nb_index*/
+#if PY_VERSION_HEX >= 0x03050000
+ 0, /*np_matmul*/
+ 0, /*np_inplace_matmul*/
+#endif
};
@@ -1858,6 +1859,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
* sticks around after the release.
*/
PyBuffer_Release(&view);
+ _dealloc_cached_buffer_info(self);
}
else {
Py_DECREF(ret);
@@ -1991,6 +1993,92 @@ static PyObject *
}
/**end repeat**/
+/**begin repeat
+ * #name = half, float, double, longdouble#
+ * #Name = Half, Float, Double, LongDouble#
+ * #is_half = 1,0,0,0#
+ * #c = f, f, , l#
+ * #convert = PyLong_FromDouble, PyLong_FromDouble, PyLong_FromDouble,
+ * npy_longdouble_to_PyLong#
+ * #
+ */
+/* Heavily copied from the builtin float.as_integer_ratio */
+static PyObject *
+@name@_as_integer_ratio(PyObject *self)
+{
+#if @is_half@
+ npy_double val = npy_half_to_double(PyArrayScalar_VAL(self, @Name@));
+ npy_double frac;
+#else
+ npy_@name@ val = PyArrayScalar_VAL(self, @Name@);
+ npy_@name@ frac;
+#endif
+ int exponent;
+ int i;
+
+ PyObject *py_exponent = NULL;
+ PyObject *numerator = NULL;
+ PyObject *denominator = NULL;
+ PyObject *result_pair = NULL;
+ PyNumberMethods *long_methods = PyLong_Type.tp_as_number;
+
+ if (npy_isnan(val)) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot convert NaN to integer ratio");
+ return NULL;
+ }
+ if (!npy_isfinite(val)) {
+ PyErr_SetString(PyExc_OverflowError,
+ "cannot convert Infinity to integer ratio");
+ return NULL;
+ }
+
+ frac = npy_frexp@c@(val, &exponent); /* val == frac * 2**exponent exactly */
+
+ /* This relies on the floating point type being base 2 to converge */
+ for (i = 0; frac != npy_floor@c@(frac); i++) {
+ frac *= 2.0;
+ exponent--;
+ }
+
+ /* self == frac * 2**exponent exactly and frac is integral. */
+ numerator = @convert@(frac);
+ if (numerator == NULL)
+ goto error;
+ denominator = PyLong_FromLong(1);
+ if (denominator == NULL)
+ goto error;
+ py_exponent = PyLong_FromLong(exponent < 0 ? -exponent : exponent);
+ if (py_exponent == NULL)
+ goto error;
+
+ /* fold in 2**exponent */
+ if (exponent > 0) {
+ PyObject *temp = long_methods->nb_lshift(numerator, py_exponent);
+ if (temp == NULL)
+ goto error;
+ Py_DECREF(numerator);
+ numerator = temp;
+ }
+ else {
+ PyObject *temp = long_methods->nb_lshift(denominator, py_exponent);
+ if (temp == NULL)
+ goto error;
+ Py_DECREF(denominator);
+ denominator = temp;
+ }
+
+ result_pair = PyTuple_Pack(2, numerator, denominator);
+
+error:
+ Py_XDECREF(py_exponent);
+ Py_XDECREF(denominator);
+ Py_XDECREF(numerator);
+ return result_pair;
+}
+/**end repeat**/
+
+
/*
* need to fill in doc-strings for these methods on import -- copy from
* array docstrings
@@ -2254,6 +2342,17 @@ static PyMethodDef @name@type_methods[] = {
};
/**end repeat**/
+/**begin repeat
+ * #name = half,float,double,longdouble#
+ */
+static PyMethodDef @name@type_methods[] = {
+ {"as_integer_ratio",
+ (PyCFunction)@name@_as_integer_ratio,
+ METH_NOARGS, NULL},
+ {NULL, NULL, 0, NULL}
+};
+/**end repeat**/
+
/************* As_mapping functions for void array scalar ************/
static Py_ssize_t
@@ -2597,6 +2696,8 @@ NPY_NO_EXPORT PyTypeObject PyGenericArrType_Type = {
static void
void_dealloc(PyVoidScalarObject *v)
{
+ _dealloc_cached_buffer_info((PyObject *)v);
+
if (v->flags & NPY_ARRAY_OWNDATA) {
npy_free_cache(v->obval, Py_SIZE(v));
}
@@ -3923,7 +4024,6 @@ initialize_casting_tables(void)
}
_npy_can_cast_safely_table[NPY_STRING][NPY_UNICODE] = 1;
- _npy_can_cast_safely_table[NPY_BOOL][NPY_TIMEDELTA] = 1;
#ifndef NPY_SIZEOF_BYTE
#define NPY_SIZEOF_BYTE 1
@@ -4307,6 +4407,15 @@ initialize_numeric_types(void)
/**end repeat**/
+ /**begin repeat
+ * #name = half, float, double, longdouble#
+ * #Name = Half, Float, Double, LongDouble#
+ */
+
+ Py@Name@ArrType_Type.tp_methods = @name@type_methods;
+
+ /**end repeat**/
+
#if (NPY_SIZEOF_INT != NPY_SIZEOF_LONG) || defined(NPY_PY3K)
/* We won't be inheriting from Python Int type. */
PyIntArrType_Type.tp_hash = int_arrtype_hash;
@@ -4383,6 +4492,36 @@ initialize_numeric_types(void)
PyArrayIter_Type.tp_iter = PyObject_SelfIter;
PyArrayMapIter_Type.tp_iter = PyObject_SelfIter;
+
+ /*
+ * Give types different names when they are the same size (gh-9799).
+ * `np.intX` always refers to the first int of that size in the sequence
+ * `['LONG', 'LONGLONG', 'INT', 'SHORT', 'BYTE']`.
+ */
+#if (NPY_SIZEOF_BYTE == NPY_SIZEOF_SHORT)
+ PyByteArrType_Type.tp_name = "numpy.byte";
+ PyUByteArrType_Type.tp_name = "numpy.ubyte";
+#endif
+#if (NPY_SIZEOF_SHORT == NPY_SIZEOF_INT)
+ PyShortArrType_Type.tp_name = "numpy.short";
+ PyUShortArrType_Type.tp_name = "numpy.ushort";
+#endif
+#if (NPY_SIZEOF_INT == NPY_SIZEOF_LONG)
+ PyIntArrType_Type.tp_name = "numpy.intc";
+ PyUIntArrType_Type.tp_name = "numpy.uintc";
+#endif
+#if (NPY_SIZEOF_LONGLONG == NPY_SIZEOF_LONG)
+ PyLongLongArrType_Type.tp_name = "numpy.longlong";
+ PyULongLongArrType_Type.tp_name = "numpy.ulonglong";
+#endif
+
+ /*
+ Do the same for longdouble
+ */
+#if (NPY_SIZEOF_LONGDOUBLE == NPY_SIZEOF_DOUBLE)
+ PyLongDoubleArrType_Type.tp_name = "numpy.longdouble";
+ PyCLongDoubleArrType_Type.tp_name = "numpy.clongdouble";
+#endif
}
typedef struct {
diff --git a/numpy/core/src/multiarray/shape.c b/numpy/core/src/multiarray/shape.c
index 3ac71e285..4e31f003b 100644
--- a/numpy/core/src/multiarray/shape.c
+++ b/numpy/core/src/multiarray/shape.c
@@ -26,7 +26,7 @@ static int
_fix_unknown_dimension(PyArray_Dims *newshape, PyArrayObject *arr);
static int
-_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
+_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
npy_intp *newstrides, int is_f_order);
static void
@@ -40,11 +40,11 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype);
*/
NPY_NO_EXPORT PyObject *
PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
- NPY_ORDER order)
+ NPY_ORDER NPY_UNUSED(order))
{
npy_intp oldnbytes, newnbytes;
npy_intp oldsize, newsize;
- int new_nd=newshape->len, k, n, elsize;
+ int new_nd=newshape->len, k, elsize;
int refcnt;
npy_intp* new_dimensions=newshape->ptr;
npy_intp new_strides[NPY_MAXDIMS];
@@ -89,11 +89,19 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
return NULL;
}
+ if (PyArray_BASE(self) != NULL
+ || (((PyArrayObject_fields *)self)->weakreflist != NULL)) {
+ PyErr_SetString(PyExc_ValueError,
+ "cannot resize an array that "
+ "references or is referenced\n"
+ "by another array in this way. Use the np.resize function.");
+ return NULL;
+ }
if (refcheck) {
#ifdef PYPY_VERSION
PyErr_SetString(PyExc_ValueError,
"cannot resize an array with refcheck=True on PyPy.\n"
- "Use the resize function or refcheck=False");
+ "Use the np.resize function or refcheck=False");
return NULL;
#else
refcnt = PyArray_REFCOUNT(self);
@@ -102,13 +110,12 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
else {
refcnt = 1;
}
- if ((refcnt > 2)
- || (PyArray_BASE(self) != NULL)
- || (((PyArrayObject_fields *)self)->weakreflist != NULL)) {
+ if (refcnt > 2) {
PyErr_SetString(PyExc_ValueError,
"cannot resize an array that "
"references or is referenced\n"
- "by another array in this way. Use the resize function");
+ "by another array in this way.\n"
+ "Use the np.resize function or refcheck=False");
return NULL;
}
@@ -129,8 +136,8 @@ PyArray_Resize(PyArrayObject *self, PyArray_Dims *newshape, int refcheck,
PyObject *zero = PyInt_FromLong(0);
char *optr;
optr = PyArray_BYTES(self) + oldnbytes;
- n = newsize - oldsize;
- for (k = 0; k < n; k++) {
+ npy_intp n_new = newsize - oldsize;
+ for (npy_intp i = 0; i < n_new; i++) {
_putzero((char *)optr, zero, PyArray_DESCR(self));
optr += elsize;
}
@@ -354,7 +361,7 @@ _putzero(char *optr, PyObject *zero, PyArray_Descr *dtype)
* stride of the next-fastest index.
*/
static int
-_attempt_nocopy_reshape(PyArrayObject *self, int newnd, npy_intp* newdims,
+_attempt_nocopy_reshape(PyArrayObject *self, int newnd, const npy_intp *newdims,
npy_intp *newstrides, int is_f_order)
{
int oldnd;
@@ -759,7 +766,7 @@ static int _npy_stride_sort_item_comparator(const void *a, const void *b)
* [(2, 12), (0, 4), (1, -2)].
*/
NPY_NO_EXPORT void
-PyArray_CreateSortedStridePerm(int ndim, npy_intp *strides,
+PyArray_CreateSortedStridePerm(int ndim, npy_intp const *strides,
npy_stride_sort_item *out_strideperm)
{
int i;
@@ -1041,7 +1048,7 @@ build_shape_string(npy_intp n, npy_intp *vals)
* from a reduction result once its computation is complete.
*/
NPY_NO_EXPORT void
-PyArray_RemoveAxesInPlace(PyArrayObject *arr, npy_bool *flags)
+PyArray_RemoveAxesInPlace(PyArrayObject *arr, const npy_bool *flags)
{
PyArrayObject_fields *fa = (PyArrayObject_fields *)arr;
npy_intp *shape = fa->dimensions, *strides = fa->strides;
diff --git a/numpy/core/src/multiarray/temp_elide.c b/numpy/core/src/multiarray/temp_elide.c
index 3d2f976f2..09b948218 100644
--- a/numpy/core/src/multiarray/temp_elide.c
+++ b/numpy/core/src/multiarray/temp_elide.c
@@ -166,7 +166,7 @@ check_callers(int * cannot)
return 0;
}
/* get multiarray base address */
- if (dladdr(&PyArray_SetNumericOps, &info)) {
+ if (dladdr(&PyArray_INCREF, &info)) {
pos_ma_start = info.dli_fbase;
pos_ma_end = info.dli_fbase;
}
diff --git a/numpy/core/src/multiarray/typeinfo.c b/numpy/core/src/multiarray/typeinfo.c
index f0af76809..14c4f27cb 100644
--- a/numpy/core/src/multiarray/typeinfo.c
+++ b/numpy/core/src/multiarray/typeinfo.c
@@ -3,8 +3,7 @@
* Unfortunately, we need two different types to cover the cases where min/max
* do and do not appear in the tuple.
*/
-#define PY_SSIZE_T_CLEAN
-#include <Python.h>
+#include "typeinfo.h"
/* In python 2, this is not exported from Python.h */
#include <structseq.h>
@@ -14,8 +13,8 @@
#include "npy_pycompat.h"
-PyTypeObject PyArray_typeinfoType;
-PyTypeObject PyArray_typeinforangedType;
+static PyTypeObject PyArray_typeinfoType;
+static PyTypeObject PyArray_typeinforangedType;
static PyStructSequence_Field typeinfo_fields[] = {
{"char", "The character used to represent the type"},
@@ -51,7 +50,7 @@ static PyStructSequence_Desc typeinforanged_desc = {
7, /* n_in_sequence */
};
-PyObject *
+NPY_NO_EXPORT PyObject *
PyArray_typeinfo(
char typechar, int typenum, int nbits, int align,
PyTypeObject *type_obj)
@@ -77,7 +76,7 @@ PyArray_typeinfo(
return entry;
}
-PyObject *
+NPY_NO_EXPORT PyObject *
PyArray_typeinforanged(
char typechar, int typenum, int nbits, int align,
PyObject *max, PyObject *min, PyTypeObject *type_obj)
@@ -105,10 +104,38 @@ PyArray_typeinforanged(
return entry;
}
-void typeinfo_init_structsequences(void)
+/* Python version only needed for backport to 2.7 */
+#if (PY_VERSION_HEX < 0x03040000) \
+ || (defined(PYPY_VERSION_NUM) && (PYPY_VERSION_NUM < 0x07020000))
+
+ static int
+ PyStructSequence_InitType2(PyTypeObject *type, PyStructSequence_Desc *desc) {
+ PyStructSequence_InitType(type, desc);
+ if (PyErr_Occurred()) {
+ return -1;
+ }
+ return 0;
+ }
+#endif
+
+NPY_NO_EXPORT int
+typeinfo_init_structsequences(PyObject *multiarray_dict)
{
- PyStructSequence_InitType(
- &PyArray_typeinfoType, &typeinfo_desc);
- PyStructSequence_InitType(
- &PyArray_typeinforangedType, &typeinforanged_desc);
+ if (PyStructSequence_InitType2(
+ &PyArray_typeinfoType, &typeinfo_desc) < 0) {
+ return -1;
+ }
+ if (PyStructSequence_InitType2(
+ &PyArray_typeinforangedType, &typeinforanged_desc) < 0) {
+ return -1;
+ }
+ if (PyDict_SetItemString(multiarray_dict,
+ "typeinfo", (PyObject *)&PyArray_typeinfoType) < 0) {
+ return -1;
+ }
+ if (PyDict_SetItemString(multiarray_dict,
+ "typeinforanged", (PyObject *)&PyArray_typeinforangedType) < 0) {
+ return -1;
+ }
+ return 0;
}
diff --git a/numpy/core/src/multiarray/typeinfo.h b/numpy/core/src/multiarray/typeinfo.h
index 5899c2093..28afa4120 100644
--- a/numpy/core/src/multiarray/typeinfo.h
+++ b/numpy/core/src/multiarray/typeinfo.h
@@ -1,17 +1,19 @@
#ifndef _NPY_PRIVATE_TYPEINFO_H_
#define _NPY_PRIVATE_TYPEINFO_H_
-void typeinfo_init_structsequences(void);
+#define PY_SSIZE_T_CLEAN
+#include <Python.h>
+#include "npy_config.h"
-extern PyTypeObject PyArray_typeinfoType;
-extern PyTypeObject PyArray_typeinforangedType;
+NPY_VISIBILITY_HIDDEN int
+typeinfo_init_structsequences(PyObject *multiarray_dict);
-PyObject *
+NPY_VISIBILITY_HIDDEN PyObject *
PyArray_typeinfo(
char typechar, int typenum, int nbits, int align,
PyTypeObject *type_obj);
-PyObject *
+NPY_VISIBILITY_HIDDEN PyObject *
PyArray_typeinforanged(
char typechar, int typenum, int nbits, int align,
PyObject *max, PyObject *min, PyTypeObject *type_obj);
diff --git a/numpy/core/src/multiarray/usertypes.c b/numpy/core/src/multiarray/usertypes.c
index 8e8090002..2e8fb514f 100644
--- a/numpy/core/src/multiarray/usertypes.c
+++ b/numpy/core/src/multiarray/usertypes.c
@@ -40,19 +40,27 @@ maintainer email: oliphant.travis@ieee.org
NPY_NO_EXPORT PyArray_Descr **userdescrs=NULL;
-static int *
-_append_new(int *types, int insert)
+static int
+_append_new(int **p_types, int insert)
{
int n = 0;
int *newtypes;
+ int *types = *p_types;
while (types[n] != NPY_NOTYPE) {
n++;
}
newtypes = (int *)realloc(types, (n + 2)*sizeof(int));
+ if (newtypes == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
newtypes[n] = insert;
newtypes[n + 1] = NPY_NOTYPE;
- return newtypes;
+
+ /* Replace the passed-in pointer */
+ *p_types = newtypes;
+ return 0;
}
static npy_bool
@@ -247,10 +255,13 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
*/
if (descr->f->cancastto == NULL) {
descr->f->cancastto = (int *)malloc(1*sizeof(int));
+ if (descr->f->cancastto == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
descr->f->cancastto[0] = NPY_NOTYPE;
}
- descr->f->cancastto = _append_new(descr->f->cancastto,
- totype);
+ return _append_new(&descr->f->cancastto, totype);
}
else {
/* register with cancastscalarkindto */
@@ -258,6 +269,10 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
int i;
descr->f->cancastscalarkindto =
(int **)malloc(NPY_NSCALARKINDS* sizeof(int*));
+ if (descr->f->cancastscalarkindto == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
for (i = 0; i < NPY_NSCALARKINDS; i++) {
descr->f->cancastscalarkindto[i] = NULL;
}
@@ -265,11 +280,13 @@ PyArray_RegisterCanCast(PyArray_Descr *descr, int totype,
if (descr->f->cancastscalarkindto[scalar] == NULL) {
descr->f->cancastscalarkindto[scalar] =
(int *)malloc(1*sizeof(int));
+ if (descr->f->cancastscalarkindto[scalar] == NULL) {
+ PyErr_NoMemory();
+ return -1;
+ }
descr->f->cancastscalarkindto[scalar][0] =
NPY_NOTYPE;
}
- descr->f->cancastscalarkindto[scalar] =
- _append_new(descr->f->cancastscalarkindto[scalar], totype);
+ return _append_new(&descr->f->cancastscalarkindto[scalar], totype);
}
- return 0;
}
diff --git a/numpy/core/src/npymath/halffloat.c b/numpy/core/src/npymath/halffloat.c
index c2bd28d60..84af86009 100644
--- a/numpy/core/src/npymath/halffloat.c
+++ b/numpy/core/src/npymath/halffloat.c
@@ -301,15 +301,23 @@ npy_uint16 npy_floatbits_to_halfbits(npy_uint32 f)
npy_set_floatstatus_underflow();
}
#endif
+ /*
+ * Usually the significand is shifted by 13. For subnormals an
+ * additional shift needs to occur. This shift is one for the largest
+ * exponent giving a subnormal `f_exp = 0x38000000 >> 23 = 112`, which
+ * offsets the new first bit. At most the shift can be 1+10 bits.
+ */
f_sig >>= (113 - f_exp);
/* Handle rounding by adding 1 to the bit beyond half precision */
#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
* If the last bit in the half significand is 0 (already even), and
* the remaining bit pattern is 1000...0, then we do not add one
- * to the bit after the half significand. In all other cases, we do.
+ * to the bit after the half significand. However, the (113 - f_exp)
+ * shift can lose up to 11 bits, so the || checks them in the original.
+ * In all other cases, we can just add one.
*/
- if ((f_sig&0x00003fffu) != 0x00001000u) {
+ if (((f_sig&0x00003fffu) != 0x00001000u) || (f&0x000007ffu)) {
f_sig += 0x00001000u;
}
#else
@@ -416,7 +424,16 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
npy_set_floatstatus_underflow();
}
#endif
- d_sig >>= (1009 - d_exp);
+ /*
+ * Unlike floats, doubles have enough room to shift left to align
+ * the subnormal significand leading to no loss of the last bits.
+ * The smallest possible exponent giving a subnormal is:
+ * `d_exp = 0x3e60000000000000 >> 52 = 998`. All larger subnormals are
+ * shifted with respect to it. This adds a shift of 10+1 bits the final
+ * right shift when comparing it to the one in the normal branch.
+ */
+ assert(d_exp - 998 >= 0);
+ d_sig <<= (d_exp - 998);
/* Handle rounding by adding 1 to the bit beyond half precision */
#if NPY_HALF_ROUND_TIES_TO_EVEN
/*
@@ -424,13 +441,13 @@ npy_uint16 npy_doublebits_to_halfbits(npy_uint64 d)
* the remaining bit pattern is 1000...0, then we do not add one
* to the bit after the half significand. In all other cases, we do.
*/
- if ((d_sig&0x000007ffffffffffULL) != 0x0000020000000000ULL) {
- d_sig += 0x0000020000000000ULL;
+ if ((d_sig&0x003fffffffffffffULL) != 0x0010000000000000ULL) {
+ d_sig += 0x0010000000000000ULL;
}
#else
- d_sig += 0x0000020000000000ULL;
+ d_sig += 0x0010000000000000ULL;
#endif
- h_sig = (npy_uint16) (d_sig >> 42);
+ h_sig = (npy_uint16) (d_sig >> 53);
/*
* If the rounding causes a bit to spill into h_exp, it will
* increment h_exp from zero to one and h_sig will be zero.
diff --git a/numpy/core/src/npymath/ieee754.c.src b/numpy/core/src/npymath/ieee754.c.src
index 8b5eef87a..3f66b24a4 100644
--- a/numpy/core/src/npymath/ieee754.c.src
+++ b/numpy/core/src/npymath/ieee754.c.src
@@ -568,13 +568,21 @@ int npy_get_floatstatus() {
/*
* Functions to set the floating point status word.
- * keep in sync with NO_FLOATING_POINT_SUPPORT in ufuncobject.h
*/
#if (defined(__unix__) || defined(unix)) && !defined(USG)
#include <sys/param.h>
#endif
+
+/*
+ * Define floating point status functions. We must define
+ * npy_get_floatstatus_barrier, npy_clear_floatstatus_barrier,
+ * npy_set_floatstatus_{divbyzero, overflow, underflow, invalid}
+ * for all supported platforms.
+ */
+
+
/* Solaris --------------------------------------------------------*/
/* --------ignoring SunOS ieee_flags approach, someone else can
** deal with that! */
@@ -626,117 +634,95 @@ void npy_set_floatstatus_invalid(void)
fpsetsticky(FP_X_INV);
}
+#elif defined(_AIX)
+#include <float.h>
+#include <fpxcp.h>
-#elif defined(__GLIBC__) || defined(__APPLE__) || \
- defined(__CYGWIN__) || defined(__MINGW32__) || \
- (defined(__FreeBSD__) && (__FreeBSD_version >= 502114))
-# include <fenv.h>
-
-int npy_get_floatstatus_barrier(char* param)
+int npy_get_floatstatus_barrier(char *param)
{
- int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
- FE_UNDERFLOW | FE_INVALID);
+ int fpstatus = fp_read_flag();
/*
* By using a volatile, the compiler cannot reorder this call
*/
if (param != NULL) {
volatile char NPY_UNUSED(c) = *(char*)param;
}
-
- return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
- ((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
- ((FE_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
- ((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
+ return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
+ ((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
+ ((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
+ ((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
int npy_clear_floatstatus_barrier(char * param)
{
- /* testing float status is 50-100 times faster than clearing on x86 */
int fpstatus = npy_get_floatstatus_barrier(param);
- if (fpstatus != 0) {
- feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
- FE_UNDERFLOW | FE_INVALID);
- }
+ fp_swap_flag(0);
return fpstatus;
}
-
void npy_set_floatstatus_divbyzero(void)
{
- feraiseexcept(FE_DIVBYZERO);
+ fp_raise_xcp(FP_DIV_BY_ZERO);
}
void npy_set_floatstatus_overflow(void)
{
- feraiseexcept(FE_OVERFLOW);
+ fp_raise_xcp(FP_OVERFLOW);
}
void npy_set_floatstatus_underflow(void)
{
- feraiseexcept(FE_UNDERFLOW);
+ fp_raise_xcp(FP_UNDERFLOW);
}
void npy_set_floatstatus_invalid(void)
{
- feraiseexcept(FE_INVALID);
-}
-
-#elif defined(_AIX)
-#include <float.h>
-#include <fpxcp.h>
-
-int npy_get_floatstatus_barrier(char *param)
-{
- int fpstatus = fp_read_flag();
- /*
- * By using a volatile, the compiler cannot reorder this call
- */
- if (param != NULL) {
- volatile char NPY_UNUSED(c) = *(char*)param;
- }
- return ((FP_DIV_BY_ZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
- ((FP_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
- ((FP_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
- ((FP_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
+ fp_raise_xcp(FP_INVALID);
}
-int npy_clear_floatstatus_barrier(char * param)
-{
- int fpstatus = npy_get_floatstatus_barrier(param);
- fp_swap_flag(0);
+#elif defined(_MSC_VER) || (defined(__osf__) && defined(__alpha)) || \
+ defined (__UCLIBC__) || (defined(__arc__) && defined(__GLIBC__))
- return fpstatus;
-}
+/*
+ * By using a volatile floating point value,
+ * the compiler is forced to actually do the requested
+ * operations because of potential concurrency.
+ *
+ * We shouldn't write multiple values to a single
+ * global here, because that would cause
+ * a race condition.
+ */
+static volatile double _npy_floatstatus_x,
+ _npy_floatstatus_zero = 0.0, _npy_floatstatus_big = 1e300,
+ _npy_floatstatus_small = 1e-300, _npy_floatstatus_inf;
void npy_set_floatstatus_divbyzero(void)
{
- fp_raise_xcp(FP_DIV_BY_ZERO);
+ _npy_floatstatus_x = 1.0 / _npy_floatstatus_zero;
}
void npy_set_floatstatus_overflow(void)
{
- fp_raise_xcp(FP_OVERFLOW);
+ _npy_floatstatus_x = _npy_floatstatus_big * 1e300;
}
void npy_set_floatstatus_underflow(void)
{
- fp_raise_xcp(FP_UNDERFLOW);
+ _npy_floatstatus_x = _npy_floatstatus_small * 1e-300;
}
void npy_set_floatstatus_invalid(void)
{
- fp_raise_xcp(FP_INVALID);
+ _npy_floatstatus_inf = NPY_INFINITY;
+ _npy_floatstatus_x = _npy_floatstatus_inf - NPY_INFINITY;
}
-#else
-
/* MS Windows -----------------------------------------------------*/
#if defined(_MSC_VER)
#include <float.h>
-
int npy_get_floatstatus_barrier(char *param)
{
/*
@@ -796,53 +782,61 @@ int npy_clear_floatstatus_barrier(char *param)
return fpstatus;
}
+#endif
+/* End of defined(_MSC_VER) || (defined(__osf__) && defined(__alpha)) */
+
#else
+/* General GCC code, should work on most platforms */
+# include <fenv.h>
-int npy_get_floatstatus_barrier(char *NPY_UNUSED(param))
+int npy_get_floatstatus_barrier(char* param)
{
- return 0;
+ int fpstatus = fetestexcept(FE_DIVBYZERO | FE_OVERFLOW |
+ FE_UNDERFLOW | FE_INVALID);
+ /*
+ * By using a volatile, the compiler cannot reorder this call
+ */
+ if (param != NULL) {
+ volatile char NPY_UNUSED(c) = *(char*)param;
+ }
+
+ return ((FE_DIVBYZERO & fpstatus) ? NPY_FPE_DIVIDEBYZERO : 0) |
+ ((FE_OVERFLOW & fpstatus) ? NPY_FPE_OVERFLOW : 0) |
+ ((FE_UNDERFLOW & fpstatus) ? NPY_FPE_UNDERFLOW : 0) |
+ ((FE_INVALID & fpstatus) ? NPY_FPE_INVALID : 0);
}
-int npy_clear_floatstatus_barrier(char *param)
+int npy_clear_floatstatus_barrier(char * param)
{
+ /* testing float status is 50-100 times faster than clearing on x86 */
int fpstatus = npy_get_floatstatus_barrier(param);
- return 0;
-}
+ if (fpstatus != 0) {
+ feclearexcept(FE_DIVBYZERO | FE_OVERFLOW |
+ FE_UNDERFLOW | FE_INVALID);
+ }
-#endif
+ return fpstatus;
+}
-/*
- * By using a volatile floating point value,
- * the compiler is forced to actually do the requested
- * operations because of potential concurrency.
- *
- * We shouldn't write multiple values to a single
- * global here, because that would cause
- * a race condition.
- */
-static volatile double _npy_floatstatus_x,
- _npy_floatstatus_zero = 0.0, _npy_floatstatus_big = 1e300,
- _npy_floatstatus_small = 1e-300, _npy_floatstatus_inf;
void npy_set_floatstatus_divbyzero(void)
{
- _npy_floatstatus_x = 1.0 / _npy_floatstatus_zero;
+ feraiseexcept(FE_DIVBYZERO);
}
void npy_set_floatstatus_overflow(void)
{
- _npy_floatstatus_x = _npy_floatstatus_big * 1e300;
+ feraiseexcept(FE_OVERFLOW);
}
void npy_set_floatstatus_underflow(void)
{
- _npy_floatstatus_x = _npy_floatstatus_small * 1e-300;
+ feraiseexcept(FE_UNDERFLOW);
}
void npy_set_floatstatus_invalid(void)
{
- _npy_floatstatus_inf = NPY_INFINITY;
- _npy_floatstatus_x = _npy_floatstatus_inf - NPY_INFINITY;
+ feraiseexcept(FE_INVALID);
}
#endif
diff --git a/numpy/core/src/npymath/npy_math_complex.c.src b/numpy/core/src/npymath/npy_math_complex.c.src
index cf427dad8..8c432e483 100644
--- a/numpy/core/src/npymath/npy_math_complex.c.src
+++ b/numpy/core/src/npymath/npy_math_complex.c.src
@@ -40,13 +40,14 @@
* flag in an efficient way. The flag is IEEE specific. See
* https://github.com/freebsd/freebsd/blob/4c6378299/lib/msun/src/catrig.c#L42
*/
+#if !defined(HAVE_CACOSF) || !defined(HAVE_CACOSL) || !defined(HAVE_CASINHF) || !defined(HAVE_CASINHL)
#define raise_inexact() do { \
volatile npy_float NPY_UNUSED(junk) = 1 + tiny; \
} while (0)
static const volatile npy_float tiny = 3.9443045e-31f;
-
+#endif
/**begin repeat
* #type = npy_float, npy_double, npy_longdouble#
@@ -64,9 +65,6 @@ static const volatile npy_float tiny = 3.9443045e-31f;
* Constants
*=========================================================*/
static const @ctype@ c_1@c@ = {1.0@C@, 0.0};
-static const @ctype@ c_half@c@ = {0.5@C@, 0.0};
-static const @ctype@ c_i@c@ = {0.0, 1.0@C@};
-static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@};
/*==========================================================
* Helper functions
@@ -76,22 +74,6 @@ static const @ctype@ c_ihalf@c@ = {0.0, 0.5@C@};
*=========================================================*/
static NPY_INLINE
@ctype@
-cadd@c@(@ctype@ a, @ctype@ b)
-{
- return npy_cpack@c@(npy_creal@c@(a) + npy_creal@c@(b),
- npy_cimag@c@(a) + npy_cimag@c@(b));
-}
-
-static NPY_INLINE
-@ctype@
-csub@c@(@ctype@ a, @ctype@ b)
-{
- return npy_cpack@c@(npy_creal@c@(a) - npy_creal@c@(b),
- npy_cimag@c@(a) - npy_cimag@c@(b));
-}
-
-static NPY_INLINE
-@ctype@
cmul@c@(@ctype@ a, @ctype@ b)
{
@type@ ar, ai, br, bi;
@@ -132,20 +114,6 @@ cdiv@c@(@ctype@ a, @ctype@ b)
}
}
-static NPY_INLINE
-@ctype@
-cneg@c@(@ctype@ a)
-{
- return npy_cpack@c@(-npy_creal@c@(a), -npy_cimag@c@(a));
-}
-
-static NPY_INLINE
-@ctype@
-cmuli@c@(@ctype@ a)
-{
- return npy_cpack@c@(-npy_cimag@c@(a), npy_creal@c@(a));
-}
-
/*==========================================================
* Custom implementation of missing complex C99 functions
*=========================================================*/
@@ -1246,7 +1214,7 @@ _clog_for_large_values@c@(@type@ x, @type@ y,
* Divide x and y by E, and then add 1 to the logarithm. This depends
* on E being larger than sqrt(2).
* Dividing by E causes an insignificant loss of accuracy; however
- * this method is still poor since it is uneccessarily slow.
+ * this method is still poor since it is unnecessarily slow.
*/
if (ax > @TMAX@ / 2) {
*rr = npy_log@c@(npy_hypot@c@(x / NPY_E@c@, y / NPY_E@c@)) + 1;
@@ -1430,19 +1398,14 @@ npy_casinh@c@(@ctype@ z)
#if @precision@ == 1
/* this is sqrt(6*EPS) */
const npy_float SQRT_6_EPSILON = 8.4572793338e-4f;
- /* chosen such that pio2_hi + pio2_lo == pio2_hi but causes FE_INEXACT. */
- const volatile npy_float pio2_lo = 7.5497899549e-9f;
#endif
#if @precision@ == 2
const npy_double SQRT_6_EPSILON = 3.65002414998885671e-08;
- const volatile npy_double pio2_lo = 6.1232339957367659e-17;
#endif
#if @precision@ == 3
const npy_longdouble SQRT_6_EPSILON = 8.0654900873493277169e-10l;
- const volatile npy_longdouble pio2_lo = 2.710505431213761085e-20l;
#endif
const @type@ RECIP_EPSILON = 1.0@c@ / @TEPS@;
- const @type@ pio2_hi = NPY_PI_2@c@;
@type@ x, y, ax, ay, wx, wy, rx, ry, B, sqrt_A2my2, new_y;
npy_int B_is_usable;
diff --git a/numpy/core/src/npymath/npy_math_internal.h.src b/numpy/core/src/npymath/npy_math_internal.h.src
index f2e5229b0..18b6d1434 100644
--- a/numpy/core/src/npymath/npy_math_internal.h.src
+++ b/numpy/core/src/npymath/npy_math_internal.h.src
@@ -654,7 +654,7 @@ npy_divmod@c@(@type@ a, @type@ b, @type@ *modulus)
}
else {
/* if mod is zero ensure correct sign */
- mod = (b > 0) ? 0.0@c@ : -0.0@c@;
+ mod = npy_copysign@c@(0, b);
}
/* snap quotient to nearest integral value */
@@ -665,7 +665,7 @@ npy_divmod@c@(@type@ a, @type@ b, @type@ *modulus)
}
else {
/* if div is zero ensure correct sign */
- floordiv = (a / b > 0) ? 0.0@c@ : -0.0@c@;
+ floordiv = npy_copysign@c@(0, a/b);
}
*modulus = mod;
@@ -716,3 +716,44 @@ npy_@func@@c@(@type@ a, @type@ b)
return npy_@func@u@c@(a < 0 ? -a : a, b < 0 ? -b : b);
}
/**end repeat**/
+
+/* Unlike LCM and GCD, we need byte and short variants for the shift operators,
+ * since the result is dependent on the width of the type
+ */
+/**begin repeat
+ *
+ * #type = byte, short, int, long, longlong#
+ * #c = hh,h,,l,ll#
+ */
+/**begin repeat1
+ *
+ * #u = u,#
+ * #is_signed = 0,1#
+ */
+NPY_INPLACE npy_@u@@type@
+npy_lshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b)
+{
+ if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) {
+ return a << b;
+ }
+ else {
+ return 0;
+ }
+}
+NPY_INPLACE npy_@u@@type@
+npy_rshift@u@@c@(npy_@u@@type@ a, npy_@u@@type@ b)
+{
+ if (NPY_LIKELY((size_t)b < sizeof(a) * CHAR_BIT)) {
+ return a >> b;
+ }
+#if @is_signed@
+ else if (a < 0) {
+ return (npy_@u@@type@)-1; /* preserve the sign bit */
+ }
+#endif
+ else {
+ return 0;
+ }
+}
+/**end repeat1**/
+/**end repeat**/
diff --git a/numpy/core/src/npysort/npysort_common.h b/numpy/core/src/npysort/npysort_common.h
index a22045b41..5fd03b96f 100644
--- a/numpy/core/src/npysort/npysort_common.h
+++ b/numpy/core/src/npysort/npysort_common.h
@@ -273,10 +273,10 @@ STRING_SWAP(char *s1, char *s2, size_t len)
NPY_INLINE static int
-STRING_LT(char *s1, char *s2, size_t len)
+STRING_LT(const char *s1, const char *s2, size_t len)
{
- const unsigned char *c1 = (unsigned char *)s1;
- const unsigned char *c2 = (unsigned char *)s2;
+ const unsigned char *c1 = (const unsigned char *)s1;
+ const unsigned char *c2 = (const unsigned char *)s2;
size_t i;
int ret = 0;
@@ -311,7 +311,7 @@ UNICODE_SWAP(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
NPY_INLINE static int
-UNICODE_LT(npy_ucs4 *s1, npy_ucs4 *s2, size_t len)
+UNICODE_LT(const npy_ucs4 *s1, const npy_ucs4 *s2, size_t len)
{
size_t i;
int ret = 0;
diff --git a/numpy/core/src/npysort/radixsort.c.src b/numpy/core/src/npysort/radixsort.c.src
new file mode 100644
index 000000000..72887d7e4
--- /dev/null
+++ b/numpy/core/src/npysort/radixsort.c.src
@@ -0,0 +1,231 @@
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "npy_sort.h"
+#include "npysort_common.h"
+#include <stdlib.h>
+
+/*
+ *****************************************************************************
+ ** INTEGER SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG#
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong#
+ * #type = npy_ubyte, npy_ubyte, npy_ubyte, npy_ushort, npy_ushort, npy_uint,
+ * npy_uint, npy_ulong, npy_ulong, npy_ulonglong, npy_ulonglong#
+ * #sign = 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
+ * #floating = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0#
+ */
+
+// Reference: https://github.com/eloj/radix-sorting#-key-derivation
+#if @sign@
+ // Floating-point is currently disabled.
+ // Floating-point tests succeed for double and float on macOS but not on Windows/Linux.
+ // Basic sorting tests succeed but others relying on sort fail.
+ // Possibly related to floating-point normalisation or multiple NaN reprs? Not sure.
+ #if @floating@
+ // For floats, we invert the key if the sign bit is set, else we invert the sign bit.
+ #define KEY_OF(x) ((x) ^ (-((x) >> (sizeof(@type@) * 8 - 1)) | ((@type@)1 << (sizeof(@type@) * 8 - 1))))
+ #else
+ // For signed ints, we flip the sign bit so the negatives are below the positives.
+ #define KEY_OF(x) ((x) ^ ((@type@)1 << (sizeof(@type@) * 8 - 1)))
+ #endif
+#else
+ // For unsigned ints, the key is as-is
+ #define KEY_OF(x) (x)
+#endif
+
+static inline npy_ubyte
+nth_byte_@suff@(@type@ key, npy_intp l) {
+ return (key >> (l << 3)) & 0xFF;
+}
+
+@type@*
+radixsort0_@suff@(@type@ *arr, @type@ *aux, npy_intp num)
+{
+ npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
+ npy_intp i;
+ size_t l;
+ @type@ key0 = KEY_OF(arr[0]);
+ size_t ncols = 0;
+ npy_ubyte cols[sizeof(@type@)];
+
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[i]);
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ cnt[l][nth_byte_@suff@(k, l)]++;
+ }
+ }
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ if (cnt[l][nth_byte_@suff@(key0, l)] != num) {
+ cols[ncols++] = l;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ npy_intp a = 0;
+ for (i = 0; i < 256; i++) {
+ npy_intp b = cnt[cols[l]][i];
+ cnt[cols[l]][i] = a;
+ a += b;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ @type@* temp;
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[i]);
+ npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++;
+ aux[dst] = arr[i];
+ }
+
+ temp = aux;
+ aux = arr;
+ arr = temp;
+ }
+
+ return arr;
+}
+
+int
+radixsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
+{
+ void *sorted;
+ @type@ *aux;
+ @type@ *arr = start;
+ @type@ k1, k2;
+ npy_bool all_sorted = 1;
+
+ if (num < 2) {
+ return 0;
+ }
+
+ k1 = KEY_OF(arr[0]);
+ for (npy_intp i = 1; i < num; i++) {
+ k2 = KEY_OF(arr[i]);
+ if (k1 > k2) {
+ all_sorted = 0;
+ break;
+ }
+ k1 = k2;
+ }
+
+ if (all_sorted) {
+ return 0;
+ }
+
+ aux = malloc(num * sizeof(@type@));
+ if (aux == NULL) {
+ return -NPY_ENOMEM;
+ }
+
+ sorted = radixsort0_@suff@(start, aux, num);
+ if (sorted != start) {
+ memcpy(start, sorted, num * sizeof(@type@));
+ }
+
+ free(aux);
+ return 0;
+}
+
+npy_intp*
+aradixsort0_@suff@(@type@ *arr, npy_intp *aux, npy_intp *tosort, npy_intp num)
+{
+ npy_intp cnt[sizeof(@type@)][1 << 8] = { { 0 } };
+ npy_intp i;
+ size_t l;
+ @type@ key0 = KEY_OF(arr[0]);
+ size_t ncols = 0;
+ npy_ubyte cols[sizeof(@type@)];
+
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[i]);
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ cnt[l][nth_byte_@suff@(k, l)]++;
+ }
+ }
+
+ for (l = 0; l < sizeof(@type@); l++) {
+ if (cnt[l][nth_byte_@suff@(key0, l)] != num) {
+ cols[ncols++] = l;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ npy_intp a = 0;
+ for (i = 0; i < 256; i++) {
+ npy_intp b = cnt[cols[l]][i];
+ cnt[cols[l]][i] = a;
+ a += b;
+ }
+ }
+
+ for (l = 0; l < ncols; l++) {
+ npy_intp* temp;
+ for (i = 0; i < num; i++) {
+ @type@ k = KEY_OF(arr[tosort[i]]);
+ npy_intp dst = cnt[cols[l]][nth_byte_@suff@(k, cols[l])]++;
+ aux[dst] = tosort[i];
+ }
+
+ temp = aux;
+ aux = tosort;
+ tosort = temp;
+ }
+
+ return tosort;
+}
+
+int
+aradixsort_@suff@(void *start, npy_intp* tosort, npy_intp num, void *NPY_UNUSED(varr))
+{
+ npy_intp *sorted;
+ npy_intp *aux;
+ @type@ *arr = start;
+ @type@ k1, k2;
+ npy_bool all_sorted = 1;
+
+ if (num < 2) {
+ return 0;
+ }
+
+ k1 = KEY_OF(arr[tosort[0]]);
+ for (npy_intp i = 1; i < num; i++) {
+ k2 = KEY_OF(arr[tosort[i]]);
+ if (k1 > k2) {
+ all_sorted = 0;
+ break;
+ }
+ k1 = k2;
+ }
+
+ if (all_sorted) {
+ return 0;
+ }
+
+ aux = malloc(num * sizeof(npy_intp));
+ if (aux == NULL) {
+ return -NPY_ENOMEM;
+ }
+
+ sorted = aradixsort0_@suff@(start, aux, tosort, num);
+ if (sorted != tosort) {
+ memcpy(tosort, sorted, num * sizeof(npy_intp));
+ }
+
+ free(aux);
+ return 0;
+}
+
+#undef KEY_OF
+
+/**end repeat**/
diff --git a/numpy/core/src/npysort/selection.c.src b/numpy/core/src/npysort/selection.c.src
index 1e0934558..be645450f 100644
--- a/numpy/core/src/npysort/selection.c.src
+++ b/numpy/core/src/npysort/selection.c.src
@@ -40,7 +40,7 @@ static NPY_INLINE void store_pivot(npy_intp pivot, npy_intp kth,
}
/*
- * If pivot is the requested kth store it, overwritting other pivots if
+ * If pivot is the requested kth store it, overwriting other pivots if
* required. This must be done so iterative partition can work without
* manually shifting lower data offset by kth each time
*/
diff --git a/numpy/core/src/npysort/timsort.c.src b/numpy/core/src/npysort/timsort.c.src
new file mode 100644
index 000000000..26313ca5b
--- /dev/null
+++ b/numpy/core/src/npysort/timsort.c.src
@@ -0,0 +1,2574 @@
+/* -*- c -*- */
+
+/*
+ * The purpose of this module is to add faster sort functions
+ * that are type-specific. This is done by altering the
+ * function table for the builtin descriptors.
+ *
+ * These sorting functions are copied almost directly from numarray
+ * with a few modifications (complex comparisons compare the imaginary
+ * part if the real parts are equal, for example), and the names
+ * are changed.
+ *
+ * The original sorting code is due to Charles R. Harris who wrote
+ * it for numarray.
+ */
+
+/*
+ * Quick sort is usually the fastest, but the worst case scenario can
+ * be slower than the merge and heap sorts. The merge sort requires
+ * extra memory and so for large arrays may not be useful.
+ *
+ * The merge sort is *stable*, meaning that equal components
+ * are unmoved from their entry versions, so it can be used to
+ * implement lexigraphic sorting on multiple keys.
+ *
+ * The heap sort is included for completeness.
+ */
+
+
+/* For details of Timsort, refer to
+ * https://github.com/python/cpython/blob/3.7/Objects/listsort.txt
+ */
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "npy_sort.h"
+#include "npysort_common.h"
+#include <stdlib.h>
+
+/* enough for 32 * 1.618 ** 128 elements */
+#define TIMSORT_STACK_SIZE 128
+
+
+
+npy_intp compute_min_run(npy_intp num)
+{
+ npy_intp r = 0;
+
+ while (64 < num) {
+ r |= num & 1;
+ num >>= 1;
+ }
+
+ return num + r;
+}
+
+typedef struct {
+ npy_intp s; /* start pointer */
+ npy_intp l; /* length */
+} run;
+
+
+/* buffer for argsort. Declared here to avoid multiple declarations. */
+typedef struct {
+ npy_intp *pw;
+ npy_intp size;
+} buffer_intp;
+
+
+/* buffer method */
+static NPY_INLINE int
+resize_buffer_intp(buffer_intp *buffer, npy_intp new_size)
+{
+ if (new_size <= buffer->size) {
+ return 0;
+ }
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ buffer->pw = malloc(new_size * sizeof(npy_intp));
+ } else {
+ buffer->pw = realloc(buffer->pw, new_size * sizeof(npy_intp));
+ }
+
+ buffer->size = new_size;
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ return -NPY_ENOMEM;
+ } else {
+ return 0;
+ }
+}
+
+/*
+ *****************************************************************************
+ ** NUMERIC SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #TYPE = BOOL, BYTE, UBYTE, SHORT, USHORT, INT, UINT, LONG, ULONG,
+ * LONGLONG, ULONGLONG, HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE, DATETIME, TIMEDELTA#
+ * #suff = bool, byte, ubyte, short, ushort, int, uint, long, ulong,
+ * longlong, ulonglong, half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble, datetime, timedelta#
+ * #type = npy_bool, npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int,
+ * npy_uint, npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_ushort, npy_float, npy_double, npy_longdouble, npy_cfloat,
+ * npy_cdouble, npy_clongdouble, npy_datetime, npy_timedelta#
+ */
+
+
+typedef struct {
+ @type@ * pw;
+ npy_intp size;
+} buffer_@suff@;
+
+
+static NPY_INLINE int
+resize_buffer_@suff@(buffer_@suff@ *buffer, npy_intp new_size)
+{
+ if (new_size <= buffer->size) {
+ return 0;
+ }
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ buffer->pw = malloc(new_size * sizeof(@type@));
+ } else {
+ buffer->pw = realloc(buffer->pw, new_size * sizeof(@type@));
+ }
+
+ buffer->size = new_size;
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ return -NPY_ENOMEM;
+ } else {
+ return 0;
+ }
+}
+
+
+static npy_intp
+count_run_@suff@(@type@ *arr, npy_intp l, npy_intp num, npy_intp minrun)
+{
+ npy_intp sz;
+ @type@ vc, *pl, *pi, *pj, *pr;
+
+ if (NPY_UNLIKELY(num - l == 1)) {
+ return 1;
+ }
+
+ pl = arr + l;
+
+ /* (not strictly) ascending sequence */
+ if (!@TYPE@_LT(*(pl + 1), *pl)) {
+ for (pi = pl + 1; pi < arr + num - 1 && !@TYPE@_LT(*(pi + 1), *pi); ++pi) {
+ }
+ } else { /* (strictly) descending sequence */
+ for (pi = pl + 1; pi < arr + num - 1 && @TYPE@_LT(*(pi + 1), *pi); ++pi) {
+ }
+
+ for (pj = pl, pr = pi; pj < pr; ++pj, --pr) {
+ @TYPE@_SWAP(*pj, *pr);
+ }
+ }
+
+ ++pi;
+ sz = pi - pl;
+
+ if (sz < minrun) {
+ if (l + minrun < num) {
+ sz = minrun;
+ } else {
+ sz = num - l;
+ }
+
+ pr = pl + sz;
+
+ /* insertion sort */
+ for (; pi < pr; ++pi) {
+ vc = *pi;
+ pj = pi;
+
+ while (pl < pj && @TYPE@_LT(vc, *(pj - 1))) {
+ *pj = *(pj - 1);
+ --pj;
+ }
+
+ *pj = vc;
+ }
+ }
+
+ return sz;
+}
+
+
+/* when the left part of the array (p1) is smaller, copy p1 to buffer
+ * and merge from left to right
+ */
+static void
+merge_left_@suff@(@type@ *p1, npy_intp l1, @type@ *p2, npy_intp l2,
+ @type@ *p3)
+{
+ @type@ *end = p2 + l2;
+ memcpy(p3, p1, sizeof(@type@) * l1);
+ /* first element must be in p2 otherwise skipped in the caller */
+ *p1++ = *p2++;
+
+ while (p1 < p2 && p2 < end) {
+ if (@TYPE@_LT(*p2, *p3)) {
+ *p1++ = *p2++;
+ } else {
+ *p1++ = *p3++;
+ }
+ }
+
+ if (p1 != p2) {
+ memcpy(p1, p3, sizeof(@type@) * (p2 - p1));
+ }
+}
+
+
+/* when the right part of the array (p2) is smaller, copy p2 to buffer
+ * and merge from right to left
+ */
+static void
+merge_right_@suff@(@type@ *p1, npy_intp l1, @type@ *p2, npy_intp l2,
+ @type@ *p3)
+{
+ npy_intp ofs;
+ @type@ *start = p1 - 1;
+ memcpy(p3, p2, sizeof(@type@) * l2);
+ p1 += l1 - 1;
+ p2 += l2 - 1;
+ p3 += l2 - 1;
+ /* first element must be in p1 otherwise skipped in the caller */
+ *p2-- = *p1--;
+
+ while (p1 < p2 && start < p1) {
+ if (@TYPE@_LT(*p3, *p1)) {
+ *p2-- = *p1--;
+ } else {
+ *p2-- = *p3--;
+ }
+ }
+
+ if (p1 != p2) {
+ ofs = p2 - start;
+ memcpy(start + 1, p3 - ofs + 1, sizeof(@type@) * ofs);
+ }
+}
+
+
+/* Note: the naming convention of gallop functions are different from that of
+ * CPython. For example, here gallop_right means gallop from left toward right,
+ * whereas in CPython gallop_right means gallop
+ * and find the right most element among equal elements
+ */
+static npy_intp
+gallop_right_@suff@(const @type@ *arr, const npy_intp size, const @type@ key)
+{
+ npy_intp last_ofs, ofs, m;
+
+ if (@TYPE@_LT(key, arr[0])) {
+ return 0;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size; /* arr[ofs] is never accessed */
+ break;
+ }
+
+ if (@TYPE@_LT(key, arr[ofs])) {
+ break;
+ } else {
+ last_ofs = ofs;
+ /* ofs = 1, 3, 7, 15... */
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[last_ofs] <= key < arr[ofs] */
+ while (last_ofs + 1 < ofs) {
+ m = last_ofs + ((ofs - last_ofs) >> 1);
+
+ if (@TYPE@_LT(key, arr[m])) {
+ ofs = m;
+ } else {
+ last_ofs = m;
+ }
+ }
+
+ /* now that arr[ofs-1] <= key < arr[ofs] */
+ return ofs;
+}
+
+
+static npy_intp
+gallop_left_@suff@(const @type@ *arr, const npy_intp size, const @type@ key)
+{
+ npy_intp last_ofs, ofs, l, m, r;
+
+ if (@TYPE@_LT(arr[size - 1], key)) {
+ return size;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size;
+ break;
+ }
+
+ if (@TYPE@_LT(arr[size - ofs - 1], key)) {
+ break;
+ } else {
+ last_ofs = ofs;
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[size-ofs-1] < key <= arr[size-last_ofs-1] */
+ l = size - ofs - 1;
+ r = size - last_ofs - 1;
+
+ while (l + 1 < r) {
+ m = l + ((r - l) >> 1);
+
+ if (@TYPE@_LT(arr[m], key)) {
+ l = m;
+ } else {
+ r = m;
+ }
+ }
+
+ /* now that arr[r-1] < key <= arr[r] */
+ return r;
+}
+
+
+static int
+merge_at_@suff@(@type@ *arr, const run *stack, const npy_intp at,
+ buffer_@suff@ *buffer)
+{
+ int ret;
+ npy_intp s1, l1, s2, l2, k;
+ @type@ *p1, *p2;
+ s1 = stack[at].s;
+ l1 = stack[at].l;
+ s2 = stack[at + 1].s;
+ l2 = stack[at + 1].l;
+ /* arr[s2] belongs to arr[s1+k].
+ * if try to comment this out for debugging purpose, remember
+ * in the merging process the first element is skipped
+ */
+ k = gallop_right_@suff@(arr + s1, l1, arr[s2]);
+
+ if (l1 == k) {
+ /* already sorted */
+ return 0;
+ }
+
+ p1 = arr + s1 + k;
+ l1 -= k;
+ p2 = arr + s2;
+ /* arr[s2-1] belongs to arr[s2+l2] */
+ l2 = gallop_left_@suff@(arr + s2, l2, arr[s2 - 1]);
+
+ if (l2 < l1) {
+ ret = resize_buffer_@suff@(buffer, l2);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ merge_right_@suff@(p1, l1, p2, l2, buffer->pw);
+ } else {
+ ret = resize_buffer_@suff@(buffer, l1);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ merge_left_@suff@(p1, l1, p2, l2, buffer->pw);
+ }
+
+ return 0;
+}
+
+
+static int
+try_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
+ buffer_@suff@ *buffer)
+{
+ int ret;
+ npy_intp A, B, C, top;
+ top = *stack_ptr;
+
+ while (1 < top) {
+ B = stack[top - 2].l;
+ C = stack[top - 1].l;
+
+ if ((2 < top && stack[top - 3].l <= B + C) ||
+ (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) {
+ A = stack[top - 3].l;
+
+ if (A <= C) {
+ ret = merge_at_@suff@(arr, stack, top - 3, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += B;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ }
+ } else if (1 < top && B <= C) {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ } else {
+ break;
+ }
+ }
+
+ *stack_ptr = top;
+ return 0;
+}
+
+static int
+force_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
+ buffer_@suff@ *buffer)
+{
+ int ret;
+ npy_intp top = *stack_ptr;
+
+ while (2 < top) {
+ if (stack[top - 3].l <= stack[top - 1].l) {
+ ret = merge_at_@suff@(arr, stack, top - 3, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += stack[top - 2].l;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += stack[top - 1].l;
+ --top;
+ }
+ }
+
+ if (1 < top) {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+ }
+
+ return 0;
+}
+
+
+int
+timsort_@suff@(void *start, npy_intp num, void *NPY_UNUSED(varr))
+{
+ int ret;
+ npy_intp l, n, stack_ptr, minrun;
+ buffer_@suff@ buffer;
+ run stack[TIMSORT_STACK_SIZE];
+ buffer.pw = NULL;
+ buffer.size = 0;
+ stack_ptr = 0;
+ minrun = compute_min_run(num);
+
+ for (l = 0; l < num;) {
+ n = count_run_@suff@(start, l, num, minrun);
+ stack[stack_ptr].s = l;
+ stack[stack_ptr].l = n;
+ ++stack_ptr;
+ ret = try_collapse_@suff@(start, stack, &stack_ptr, &buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ l += n;
+ }
+
+ ret = force_collapse_@suff@(start, stack, &stack_ptr, &buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ ret = 0;
+cleanup:
+
+ if (buffer.pw != NULL) {
+ free(buffer.pw);
+ }
+
+ return ret;
+}
+
+
+/* argsort */
+
+
+static npy_intp
+acount_run_@suff@(@type@ *arr, npy_intp *tosort, npy_intp l, npy_intp num,
+ npy_intp minrun)
+{
+ npy_intp sz;
+ @type@ vc;
+ npy_intp vi;
+ npy_intp *pl, *pi, *pj, *pr;
+
+ if (NPY_UNLIKELY(num - l == 1)) {
+ return 1;
+ }
+
+ pl = tosort + l;
+
+ /* (not strictly) ascending sequence */
+ if (!@TYPE@_LT(arr[*(pl + 1)], arr[*pl])) {
+ for (pi = pl + 1; pi < tosort + num - 1
+ && !@TYPE@_LT(arr[*(pi + 1)], arr[*pi]); ++pi) {
+ }
+ } else { /* (strictly) descending sequence */
+ for (pi = pl + 1; pi < tosort + num - 1
+ && @TYPE@_LT(arr[*(pi + 1)], arr[*pi]); ++pi) {
+ }
+
+ for (pj = pl, pr = pi; pj < pr; ++pj, --pr) {
+ INTP_SWAP(*pj, *pr);
+ }
+ }
+
+ ++pi;
+ sz = pi - pl;
+
+ if (sz < minrun) {
+ if (l + minrun < num) {
+ sz = minrun;
+ } else {
+ sz = num - l;
+ }
+
+ pr = pl + sz;
+
+ /* insertion sort */
+ for (; pi < pr; ++pi) {
+ vi = *pi;
+ vc = arr[*pi];
+ pj = pi;
+
+ while (pl < pj && @TYPE@_LT(vc, arr[*(pj - 1)])) {
+ *pj = *(pj - 1);
+ --pj;
+ }
+
+ *pj = vi;
+ }
+ }
+
+ return sz;
+}
+
+
+static npy_intp
+agallop_right_@suff@(const @type@ *arr, const npy_intp *tosort,
+ const npy_intp size, const @type@ key)
+{
+ npy_intp last_ofs, ofs, m;
+
+ if (@TYPE@_LT(key, arr[tosort[0]])) {
+ return 0;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size; /* arr[ofs] is never accessed */
+ break;
+ }
+
+ if (@TYPE@_LT(key, arr[tosort[ofs]])) {
+ break;
+ } else {
+ last_ofs = ofs;
+ /* ofs = 1, 3, 7, 15... */
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[tosort[last_ofs]] <= key < arr[tosort[ofs]] */
+ while (last_ofs + 1 < ofs) {
+ m = last_ofs + ((ofs - last_ofs) >> 1);
+
+ if (@TYPE@_LT(key, arr[tosort[m]])) {
+ ofs = m;
+ } else {
+ last_ofs = m;
+ }
+ }
+
+ /* now that arr[tosort[ofs-1]] <= key < arr[tosort[ofs]] */
+ return ofs;
+}
+
+
+
+static npy_intp
+agallop_left_@suff@(const @type@ *arr, const npy_intp *tosort,
+ const npy_intp size, const @type@ key)
+{
+ npy_intp last_ofs, ofs, l, m, r;
+
+ if (@TYPE@_LT(arr[tosort[size - 1]], key)) {
+ return size;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size;
+ break;
+ }
+
+ if (@TYPE@_LT(arr[tosort[size - ofs - 1]], key)) {
+ break;
+ } else {
+ last_ofs = ofs;
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[tosort[size-ofs-1]] < key <= arr[tosort[size-last_ofs-1]] */
+ l = size - ofs - 1;
+ r = size - last_ofs - 1;
+
+ while (l + 1 < r) {
+ m = l + ((r - l) >> 1);
+
+ if (@TYPE@_LT(arr[tosort[m]], key)) {
+ l = m;
+ } else {
+ r = m;
+ }
+ }
+
+ /* now that arr[tosort[r-1]] < key <= arr[tosort[r]] */
+ return r;
+}
+
+
+static void
+amerge_left_@suff@(@type@ *arr, npy_intp *p1, npy_intp l1, npy_intp *p2,
+ npy_intp l2,
+ npy_intp *p3)
+{
+ npy_intp *end = p2 + l2;
+ memcpy(p3, p1, sizeof(npy_intp) * l1);
+ /* first element must be in p2 otherwise skipped in the caller */
+ *p1++ = *p2++;
+
+ while (p1 < p2 && p2 < end) {
+ if (@TYPE@_LT(arr[*p2], arr[*p3])) {
+ *p1++ = *p2++;
+ } else {
+ *p1++ = *p3++;
+ }
+ }
+
+ if (p1 != p2) {
+ memcpy(p1, p3, sizeof(npy_intp) * (p2 - p1));
+ }
+}
+
+
+static void
+amerge_right_@suff@(@type@ *arr, npy_intp* p1, npy_intp l1, npy_intp *p2,
+ npy_intp l2,
+ npy_intp *p3)
+{
+ npy_intp ofs;
+ npy_intp *start = p1 - 1;
+ memcpy(p3, p2, sizeof(npy_intp) * l2);
+ p1 += l1 - 1;
+ p2 += l2 - 1;
+ p3 += l2 - 1;
+ /* first element must be in p1 otherwise skipped in the caller */
+ *p2-- = *p1--;
+
+ while (p1 < p2 && start < p1) {
+ if (@TYPE@_LT(arr[*p3], arr[*p1])) {
+ *p2-- = *p1--;
+ } else {
+ *p2-- = *p3--;
+ }
+ }
+
+ if (p1 != p2) {
+ ofs = p2 - start;
+ memcpy(start + 1, p3 - ofs + 1, sizeof(npy_intp) * ofs);
+ }
+}
+
+
+static int
+amerge_at_@suff@(@type@ *arr, npy_intp *tosort, const run *stack,
+ const npy_intp at,
+ buffer_intp *buffer)
+{
+ int ret;
+ npy_intp s1, l1, s2, l2, k;
+ npy_intp *p1, *p2;
+ s1 = stack[at].s;
+ l1 = stack[at].l;
+ s2 = stack[at + 1].s;
+ l2 = stack[at + 1].l;
+ /* tosort[s2] belongs to tosort[s1+k] */
+ k = agallop_right_@suff@(arr, tosort + s1, l1, arr[tosort[s2]]);
+
+ if (l1 == k) {
+ /* already sorted */
+ return 0;
+ }
+
+ p1 = tosort + s1 + k;
+ l1 -= k;
+ p2 = tosort + s2;
+ /* tosort[s2-1] belongs to tosort[s2+l2] */
+ l2 = agallop_left_@suff@(arr, tosort + s2, l2, arr[tosort[s2 - 1]]);
+
+ if (l2 < l1) {
+ ret = resize_buffer_intp(buffer, l2);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ amerge_right_@suff@(arr, p1, l1, p2, l2, buffer->pw);
+ } else {
+ ret = resize_buffer_intp(buffer, l1);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ amerge_left_@suff@(arr, p1, l1, p2, l2, buffer->pw);
+ }
+
+ return 0;
+}
+
+
+static int
+atry_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
+ npy_intp *stack_ptr,
+ buffer_intp *buffer)
+{
+ int ret;
+ npy_intp A, B, C, top;
+ top = *stack_ptr;
+
+ while (1 < top) {
+ B = stack[top - 2].l;
+ C = stack[top - 1].l;
+
+ if ((2 < top && stack[top - 3].l <= B + C) ||
+ (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) {
+ A = stack[top - 3].l;
+
+ if (A <= C) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 3, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += B;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ }
+ } else if (1 < top && B <= C) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ } else {
+ break;
+ }
+ }
+
+ *stack_ptr = top;
+ return 0;
+}
+
+
+static int
+aforce_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
+ npy_intp *stack_ptr,
+ buffer_intp *buffer)
+{
+ int ret;
+ npy_intp top = *stack_ptr;
+
+ while (2 < top) {
+ if (stack[top - 3].l <= stack[top - 1].l) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 3, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += stack[top - 2].l;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += stack[top - 1].l;
+ --top;
+ }
+ }
+
+ if (1 < top) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+ }
+
+ return 0;
+}
+
+
+int
+atimsort_@suff@(void *v, npy_intp *tosort, npy_intp num,
+ void *NPY_UNUSED(varr))
+{
+ int ret;
+ npy_intp l, n, stack_ptr, minrun;
+ buffer_intp buffer;
+ run stack[TIMSORT_STACK_SIZE];
+ buffer.pw = NULL;
+ buffer.size = 0;
+ stack_ptr = 0;
+ minrun = compute_min_run(num);
+
+ for (l = 0; l < num;) {
+ n = acount_run_@suff@(v, tosort, l, num, minrun);
+ stack[stack_ptr].s = l;
+ stack[stack_ptr].l = n;
+ ++stack_ptr;
+ ret = atry_collapse_@suff@(v, tosort, stack, &stack_ptr, &buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ l += n;
+ }
+
+ ret = aforce_collapse_@suff@(v, tosort, stack, &stack_ptr, &buffer);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ ret = 0;
+cleanup:
+
+ if (buffer.pw != NULL) {
+ free(buffer.pw);
+ }
+
+ return ret;
+}
+
+/**end repeat**/
+
+
+
+/* For string sorts and generic sort, element comparisons are very expensive,
+ * and the time cost of insertion sort (involves N**2 comparison) clearly hurts.
+ * Implementing binary insertion sort and probably gallop mode during merging process
+ * can hopefully boost the performance. Here as a temporary workaround we use shorter
+ * run length to reduce the cost of insertion sort.
+ */
+
+npy_intp compute_min_run_short(npy_intp num)
+{
+ npy_intp r = 0;
+
+ while (16 < num) {
+ r |= num & 1;
+ num >>= 1;
+ }
+
+ return num + r;
+}
+
+/*
+ *****************************************************************************
+ ** STRING SORTS **
+ *****************************************************************************
+ */
+
+
+/**begin repeat
+ *
+ * #TYPE = STRING, UNICODE#
+ * #suff = string, unicode#
+ * #type = npy_char, npy_ucs4#
+ */
+
+
+typedef struct {
+ @type@ * pw;
+ npy_intp size;
+ size_t len;
+} buffer_@suff@;
+
+
+static NPY_INLINE int
+resize_buffer_@suff@(buffer_@suff@ *buffer, npy_intp new_size)
+{
+ if (new_size <= buffer->size) {
+ return 0;
+ }
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ buffer->pw = malloc(sizeof(@type@) * new_size * buffer->len);
+ } else {
+ buffer->pw = realloc(buffer->pw, sizeof(@type@) * new_size * buffer->len);
+ }
+
+ buffer->size = new_size;
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ return -NPY_ENOMEM;
+ } else {
+ return 0;
+ }
+}
+
+
+static npy_intp
+count_run_@suff@(@type@ *arr, npy_intp l, npy_intp num, npy_intp minrun,
+ @type@ *vp, size_t len)
+{
+ npy_intp sz;
+ @type@ *pl, *pi, *pj, *pr;
+
+ if (NPY_UNLIKELY(num - l == 1)) {
+ return 1;
+ }
+
+ pl = arr + l * len;
+
+ /* (not strictly) ascending sequence */
+ if (!@TYPE@_LT(pl + len, pl, len)) {
+ for (pi = pl + len; pi < arr + (num - 1) * len
+ && !@TYPE@_LT(pi + len, pi, len); pi += len) {
+ }
+ } else { /* (strictly) descending sequence */
+ for (pi = pl + len; pi < arr + (num - 1) * len
+ && @TYPE@_LT(pi + len, pi, len); pi += len) {
+ }
+
+ for (pj = pl, pr = pi; pj < pr; pj += len, pr -= len) {
+ @TYPE@_SWAP(pj, pr, len);
+ }
+ }
+
+ pi += len;
+ sz = (pi - pl) / len;
+
+ if (sz < minrun) {
+ if (l + minrun < num) {
+ sz = minrun;
+ } else {
+ sz = num - l;
+ }
+
+ pr = pl + sz * len;
+
+ /* insertion sort */
+ for (; pi < pr; pi += len) {
+ @TYPE@_COPY(vp, pi, len);
+ pj = pi;
+
+ while (pl < pj && @TYPE@_LT(vp, pj - len, len)) {
+ @TYPE@_COPY(pj, pj - len, len);
+ pj -= len;
+ }
+
+ @TYPE@_COPY(pj, vp, len);
+ }
+ }
+
+ return sz;
+}
+
+
+static npy_intp
+gallop_right_@suff@(const @type@ *arr, const npy_intp size,
+ const @type@ *key, size_t len)
+{
+ npy_intp last_ofs, ofs, m;
+
+ if (@TYPE@_LT(key, arr, len)) {
+ return 0;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size; /* arr[ofs] is never accessed */
+ break;
+ }
+
+ if (@TYPE@_LT(key, arr + ofs * len, len)) {
+ break;
+ } else {
+ last_ofs = ofs;
+ /* ofs = 1, 3, 7, 15... */
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[last_ofs*len] <= key < arr[ofs*len] */
+ while (last_ofs + 1 < ofs) {
+ m = last_ofs + ((ofs - last_ofs) >> 1);
+
+ if (@TYPE@_LT(key, arr + m * len, len)) {
+ ofs = m;
+ } else {
+ last_ofs = m;
+ }
+ }
+
+ /* now that arr[(ofs-1)*len] <= key < arr[ofs*len] */
+ return ofs;
+}
+
+
+
+static npy_intp
+gallop_left_@suff@(const @type@ *arr, const npy_intp size, const @type@ *key,
+ size_t len)
+{
+ npy_intp last_ofs, ofs, l, m, r;
+
+ if (@TYPE@_LT(arr + (size - 1) * len, key, len)) {
+ return size;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size;
+ break;
+ }
+
+ if (@TYPE@_LT(arr + (size - ofs - 1) * len, key, len)) {
+ break;
+ } else {
+ last_ofs = ofs;
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[(size-ofs-1)*len] < key <= arr[(size-last_ofs-1)*len] */
+ l = size - ofs - 1;
+ r = size - last_ofs - 1;
+
+ while (l + 1 < r) {
+ m = l + ((r - l) >> 1);
+
+ if (@TYPE@_LT(arr + m * len, key, len)) {
+ l = m;
+ } else {
+ r = m;
+ }
+ }
+
+ /* now that arr[(r-1)*len] < key <= arr[r*len] */
+ return r;
+}
+
+
+static void
+merge_left_@suff@(@type@ *p1, npy_intp l1, @type@ *p2, npy_intp l2,
+ @type@ *p3, size_t len)
+{
+ @type@ *end = p2 + l2 * len;
+ memcpy(p3, p1, sizeof(@type@) * l1 * len);
+ /* first element must be in p2 otherwise skipped in the caller */
+ @TYPE@_COPY(p1, p2, len);
+ p1 += len;
+ p2 += len;
+
+ while (p1 < p2 && p2 < end) {
+ if (@TYPE@_LT(p2, p3, len)) {
+ @TYPE@_COPY(p1, p2, len);
+ p1 += len;
+ p2 += len;
+ } else {
+ @TYPE@_COPY(p1, p3, len);
+ p1 += len;
+ p3 += len;
+ }
+ }
+
+ if (p1 != p2) {
+ memcpy(p1, p3, sizeof(@type@) * (p2 - p1));
+ }
+}
+
+
+static void
+merge_right_@suff@(@type@ *p1, npy_intp l1, @type@ *p2, npy_intp l2,
+ @type@ *p3, size_t len)
+{
+ npy_intp ofs;
+ @type@ *start = p1 - len;
+ memcpy(p3, p2, sizeof(@type@) * l2 * len);
+ p1 += (l1 - 1) * len;
+ p2 += (l2 - 1) * len;
+ p3 += (l2 - 1) * len;
+ /* first element must be in p1 otherwise skipped in the caller */
+ @TYPE@_COPY(p2, p1, len);
+ p2 -= len;
+ p1 -= len;
+
+ while (p1 < p2 && start < p1) {
+ if (@TYPE@_LT(p3, p1, len)) {
+ @TYPE@_COPY(p2, p1, len);
+ p2 -= len;
+ p1 -= len;
+ } else {
+ @TYPE@_COPY(p2, p3, len);
+ p2 -= len;
+ p3 -= len;
+ }
+ }
+
+ if (p1 != p2) {
+ ofs = p2 - start;
+ memcpy(start + len, p3 - ofs + len, sizeof(@type@) * ofs);
+ }
+}
+
+
+static int
+merge_at_@suff@(@type@ *arr, const run *stack, const npy_intp at,
+ buffer_@suff@ *buffer, size_t len)
+{
+ int ret;
+ npy_intp s1, l1, s2, l2, k;
+ @type@ *p1, *p2;
+ s1 = stack[at].s;
+ l1 = stack[at].l;
+ s2 = stack[at + 1].s;
+ l2 = stack[at + 1].l;
+ /* arr[s2] belongs to arr[s1+k] */
+ @TYPE@_COPY(buffer->pw, arr + s2 * len, len);
+ k = gallop_right_@suff@(arr + s1 * len, l1, buffer->pw, len);
+
+ if (l1 == k) {
+ /* already sorted */
+ return 0;
+ }
+
+ p1 = arr + (s1 + k) * len;
+ l1 -= k;
+ p2 = arr + s2 * len;
+ /* arr[s2-1] belongs to arr[s2+l2] */
+ @TYPE@_COPY(buffer->pw, arr + (s2 - 1) * len, len);
+ l2 = gallop_left_@suff@(arr + s2 * len, l2, buffer->pw, len);
+
+ if (l2 < l1) {
+ ret = resize_buffer_@suff@(buffer, l2);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ merge_right_@suff@(p1, l1, p2, l2, buffer->pw, len);
+ } else {
+ ret = resize_buffer_@suff@(buffer, l1);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ merge_left_@suff@(p1, l1, p2, l2, buffer->pw, len);
+ }
+
+ return 0;
+}
+
+
+static int
+try_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
+ buffer_@suff@ *buffer, size_t len)
+{
+ int ret;
+ npy_intp A, B, C, top;
+ top = *stack_ptr;
+
+ while (1 < top) {
+ B = stack[top - 2].l;
+ C = stack[top - 1].l;
+
+ if ((2 < top && stack[top - 3].l <= B + C) ||
+ (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) {
+ A = stack[top - 3].l;
+
+ if (A <= C) {
+ ret = merge_at_@suff@(arr, stack, top - 3, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += B;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ }
+ } else if (1 < top && B <= C) {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ } else {
+ break;
+ }
+ }
+
+ *stack_ptr = top;
+ return 0;
+}
+
+
+static int
+force_collapse_@suff@(@type@ *arr, run *stack, npy_intp *stack_ptr,
+ buffer_@suff@ *buffer, size_t len)
+{
+ int ret;
+ npy_intp top = *stack_ptr;
+
+ while (2 < top) {
+ if (stack[top - 3].l <= stack[top - 1].l) {
+ ret = merge_at_@suff@(arr, stack, top - 3, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += stack[top - 2].l;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += stack[top - 1].l;
+ --top;
+ }
+ }
+
+ if (1 < top) {
+ ret = merge_at_@suff@(arr, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+ }
+
+ return 0;
+}
+
+
+int
+timsort_@suff@(void *start, npy_intp num, void *varr)
+{
+ PyArrayObject *arr = varr;
+ size_t elsize = PyArray_ITEMSIZE(arr);
+ size_t len = elsize / sizeof(@type@);
+ int ret;
+ npy_intp l, n, stack_ptr, minrun;
+ run stack[TIMSORT_STACK_SIZE];
+ buffer_@suff@ buffer;
+
+ /* Items that have zero size don't make sense to sort */
+ if (len == 0) {
+ return 0;
+ }
+
+ buffer.pw = NULL;
+ buffer.size = 0;
+ buffer.len = len;
+ stack_ptr = 0;
+ minrun = compute_min_run_short(num);
+ /* used for insertion sort and gallop key */
+ ret = resize_buffer_@suff@(&buffer, 1);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ for (l = 0; l < num;) {
+ n = count_run_@suff@(start, l, num, minrun, buffer.pw, len);
+ /* both s and l are scaled by len */
+ stack[stack_ptr].s = l;
+ stack[stack_ptr].l = n;
+ ++stack_ptr;
+ ret = try_collapse_@suff@(start, stack, &stack_ptr, &buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ l += n;
+ }
+
+ ret = force_collapse_@suff@(start, stack, &stack_ptr, &buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ ret = 0;
+
+cleanup:
+ if (buffer.pw != NULL) {
+ free(buffer.pw);
+ }
+ return ret;
+}
+
+
+/* argsort */
+
+
+static npy_intp
+acount_run_@suff@(@type@ *arr, npy_intp *tosort, npy_intp l, npy_intp num,
+ npy_intp minrun, size_t len)
+{
+ npy_intp sz;
+ npy_intp vi;
+ npy_intp *pl, *pi, *pj, *pr;
+
+ if (NPY_UNLIKELY(num - l == 1)) {
+ return 1;
+ }
+
+ pl = tosort + l;
+
+ /* (not strictly) ascending sequence */
+ if (!@TYPE@_LT(arr + (*(pl + 1)) * len, arr + (*pl) * len, len)) {
+ for (pi = pl + 1; pi < tosort + num - 1
+ && !@TYPE@_LT(arr + (*(pi + 1)) * len, arr + (*pi) * len, len); ++pi) {
+ }
+ } else { /* (strictly) descending sequence */
+ for (pi = pl + 1; pi < tosort + num - 1
+ && @TYPE@_LT(arr + (*(pi + 1)) * len, arr + (*pi) * len, len); ++pi) {
+ }
+
+ for (pj = pl, pr = pi; pj < pr; ++pj, --pr) {
+ INTP_SWAP(*pj, *pr);
+ }
+ }
+
+ ++pi;
+ sz = pi - pl;
+
+ if (sz < minrun) {
+ if (l + minrun < num) {
+ sz = minrun;
+ } else {
+ sz = num - l;
+ }
+
+ pr = pl + sz;
+
+ /* insertion sort */
+ for (; pi < pr; ++pi) {
+ vi = *pi;
+ pj = pi;
+
+ while (pl < pj && @TYPE@_LT(arr + vi * len, arr + (*(pj - 1)) * len, len)) {
+ *pj = *(pj - 1);
+ --pj;
+ }
+
+ *pj = vi;
+ }
+ }
+
+ return sz;
+}
+
+
+static npy_intp
+agallop_left_@suff@(const @type@ *arr, const npy_intp *tosort,
+ const npy_intp size, const @type@ *key, size_t len)
+{
+ npy_intp last_ofs, ofs, l, m, r;
+
+ if (@TYPE@_LT(arr + tosort[size - 1] * len, key, len)) {
+ return size;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size;
+ break;
+ }
+
+ if (@TYPE@_LT(arr + tosort[size - ofs - 1] * len, key, len)) {
+ break;
+ } else {
+ last_ofs = ofs;
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[tosort[size-ofs-1]*len] < key <= arr[tosort[size-last_ofs-1]*len] */
+ l = size - ofs - 1;
+ r = size - last_ofs - 1;
+
+ while (l + 1 < r) {
+ m = l + ((r - l) >> 1);
+
+ if (@TYPE@_LT(arr + tosort[m] * len, key, len)) {
+ l = m;
+ } else {
+ r = m;
+ }
+ }
+
+ /* now that arr[tosort[r-1]*len] < key <= arr[tosort[r]*len] */
+ return r;
+}
+
+
+static npy_intp
+agallop_right_@suff@(const @type@ *arr, const npy_intp *tosort,
+ const npy_intp size, const @type@ *key, size_t len)
+{
+ npy_intp last_ofs, ofs, m;
+
+ if (@TYPE@_LT(key, arr + tosort[0] * len, len)) {
+ return 0;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size; /* arr[ofs] is never accessed */
+ break;
+ }
+
+ if (@TYPE@_LT(key, arr + tosort[ofs] * len, len)) {
+ break;
+ } else {
+ last_ofs = ofs;
+ /* ofs = 1, 3, 7, 15... */
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[tosort[last_ofs]*len] <= key < arr[tosort[ofs]*len] */
+ while (last_ofs + 1 < ofs) {
+ m = last_ofs + ((ofs - last_ofs) >> 1);
+
+ if (@TYPE@_LT(key, arr + tosort[m] * len, len)) {
+ ofs = m;
+ } else {
+ last_ofs = m;
+ }
+ }
+
+ /* now that arr[tosort[ofs-1]*len] <= key < arr[tosort[ofs]*len] */
+ return ofs;
+}
+
+
+
+static void
+amerge_left_@suff@(@type@ *arr, npy_intp *p1, npy_intp l1, npy_intp *p2,
+ npy_intp l2, npy_intp *p3, size_t len)
+{
+ npy_intp *end = p2 + l2;
+ memcpy(p3, p1, sizeof(npy_intp) * l1);
+ /* first element must be in p2 otherwise skipped in the caller */
+ *p1++ = *p2++;
+
+ while (p1 < p2 && p2 < end) {
+ if (@TYPE@_LT(arr + (*p2) * len, arr + (*p3) * len, len)) {
+ *p1++ = *p2++;
+ } else {
+ *p1++ = *p3++;
+ }
+ }
+
+ if (p1 != p2) {
+ memcpy(p1, p3, sizeof(npy_intp) * (p2 - p1));
+ }
+}
+
+
+static void
+amerge_right_@suff@(@type@ *arr, npy_intp* p1, npy_intp l1, npy_intp *p2,
+ npy_intp l2, npy_intp *p3, size_t len)
+{
+ npy_intp ofs;
+ npy_intp *start = p1 - 1;
+ memcpy(p3, p2, sizeof(npy_intp) * l2);
+ p1 += l1 - 1;
+ p2 += l2 - 1;
+ p3 += l2 - 1;
+ /* first element must be in p1 otherwise skipped in the caller */
+ *p2-- = *p1--;
+
+ while (p1 < p2 && start < p1) {
+ if (@TYPE@_LT(arr + (*p3) * len, arr + (*p1) * len, len)) {
+ *p2-- = *p1--;
+ } else {
+ *p2-- = *p3--;
+ }
+ }
+
+ if (p1 != p2) {
+ ofs = p2 - start;
+ memcpy(start + 1, p3 - ofs + 1, sizeof(npy_intp) * ofs);
+ }
+}
+
+
+
+static int
+amerge_at_@suff@(@type@ *arr, npy_intp *tosort, const run *stack,
+ const npy_intp at, buffer_intp *buffer, size_t len)
+{
+ int ret;
+ npy_intp s1, l1, s2, l2, k;
+ npy_intp *p1, *p2;
+ s1 = stack[at].s;
+ l1 = stack[at].l;
+ s2 = stack[at + 1].s;
+ l2 = stack[at + 1].l;
+ /* tosort[s2] belongs to tosort[s1+k] */
+ k = agallop_right_@suff@(arr, tosort + s1, l1, arr + tosort[s2] * len, len);
+
+ if (l1 == k) {
+ /* already sorted */
+ return 0;
+ }
+
+ p1 = tosort + s1 + k;
+ l1 -= k;
+ p2 = tosort + s2;
+ /* tosort[s2-1] belongs to tosort[s2+l2] */
+ l2 = agallop_left_@suff@(arr, tosort + s2, l2, arr + tosort[s2 - 1] * len,
+ len);
+
+ if (l2 < l1) {
+ ret = resize_buffer_intp(buffer, l2);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ amerge_right_@suff@(arr, p1, l1, p2, l2, buffer->pw, len);
+ } else {
+ ret = resize_buffer_intp(buffer, l1);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ amerge_left_@suff@(arr, p1, l1, p2, l2, buffer->pw, len);
+ }
+
+ return 0;
+}
+
+
+static int
+atry_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
+ npy_intp *stack_ptr, buffer_intp *buffer, size_t len)
+{
+ int ret;
+ npy_intp A, B, C, top;
+ top = *stack_ptr;
+
+ while (1 < top) {
+ B = stack[top - 2].l;
+ C = stack[top - 1].l;
+
+ if ((2 < top && stack[top - 3].l <= B + C) ||
+ (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) {
+ A = stack[top - 3].l;
+
+ if (A <= C) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 3, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += B;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ }
+ } else if (1 < top && B <= C) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ } else {
+ break;
+ }
+ }
+
+ *stack_ptr = top;
+ return 0;
+}
+
+
+
+static int
+aforce_collapse_@suff@(@type@ *arr, npy_intp *tosort, run *stack,
+ npy_intp *stack_ptr, buffer_intp *buffer, size_t len)
+{
+ int ret;
+ npy_intp top = *stack_ptr;
+
+ while (2 < top) {
+ if (stack[top - 3].l <= stack[top - 1].l) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 3, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += stack[top - 2].l;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += stack[top - 1].l;
+ --top;
+ }
+ }
+
+ if (1 < top) {
+ ret = amerge_at_@suff@(arr, tosort, stack, top - 2, buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+ }
+
+ return 0;
+}
+
+
+int
+atimsort_@suff@(void *start, npy_intp *tosort, npy_intp num, void *varr)
+{
+ PyArrayObject *arr = varr;
+ size_t elsize = PyArray_ITEMSIZE(arr);
+ size_t len = elsize / sizeof(@type@);
+ int ret;
+ npy_intp l, n, stack_ptr, minrun;
+ run stack[TIMSORT_STACK_SIZE];
+ buffer_intp buffer;
+
+ /* Items that have zero size don't make sense to sort */
+ if (len == 0) {
+ return 0;
+ }
+
+ buffer.pw = NULL;
+ buffer.size = 0;
+ stack_ptr = 0;
+ minrun = compute_min_run_short(num);
+
+ for (l = 0; l < num;) {
+ n = acount_run_@suff@(start, tosort, l, num, minrun, len);
+ /* both s and l are scaled by len */
+ stack[stack_ptr].s = l;
+ stack[stack_ptr].l = n;
+ ++stack_ptr;
+ ret = atry_collapse_@suff@(start, tosort, stack, &stack_ptr, &buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ l += n;
+ }
+
+ ret = aforce_collapse_@suff@(start, tosort, stack, &stack_ptr, &buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ ret = 0;
+
+cleanup:
+ if (buffer.pw != NULL) {
+ free(buffer.pw);
+ }
+ return ret;
+}
+
+
+/**end repeat**/
+
+
+
+/*
+ *****************************************************************************
+ ** GENERIC SORT **
+ *****************************************************************************
+ */
+
+
+typedef struct {
+ char *pw;
+ npy_intp size;
+ size_t len;
+} buffer_char;
+
+
+static NPY_INLINE int
+resize_buffer_char(buffer_char *buffer, npy_intp new_size)
+{
+ if (new_size <= buffer->size) {
+ return 0;
+ }
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ buffer->pw = malloc(sizeof(char) * new_size * buffer->len);
+ } else {
+ buffer->pw = realloc(buffer->pw, sizeof(char) * new_size * buffer->len);
+ }
+
+ buffer->size = new_size;
+
+ if (NPY_UNLIKELY(buffer->pw == NULL)) {
+ return -NPY_ENOMEM;
+ } else {
+ return 0;
+ }
+}
+
+
+static npy_intp
+npy_count_run(char *arr, npy_intp l, npy_intp num, npy_intp minrun,
+ char *vp, size_t len, PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp sz;
+ char *pl, *pi, *pj, *pr;
+
+ if (NPY_UNLIKELY(num - l == 1)) {
+ return 1;
+ }
+
+ pl = arr + l * len;
+
+ /* (not strictly) ascending sequence */
+ if (cmp(pl, pl + len, py_arr) <= 0) {
+ for (pi = pl + len; pi < arr + (num - 1) * len
+ && cmp(pi, pi + len, py_arr) <= 0; pi += len) {
+ }
+ } else { /* (strictly) descending sequence */
+ for (pi = pl + len; pi < arr + (num - 1) * len
+ && cmp(pi + len, pi, py_arr) < 0; pi += len) {
+ }
+
+ for (pj = pl, pr = pi; pj < pr; pj += len, pr -= len) {
+ GENERIC_SWAP(pj, pr, len);
+ }
+ }
+
+ pi += len;
+ sz = (pi - pl) / len;
+
+ if (sz < minrun) {
+ if (l + minrun < num) {
+ sz = minrun;
+ } else {
+ sz = num - l;
+ }
+
+ pr = pl + sz * len;
+
+ /* insertion sort */
+ for (; pi < pr; pi += len) {
+ GENERIC_COPY(vp, pi, len);
+ pj = pi;
+
+ while (pl < pj && cmp(vp, pj - len, py_arr) < 0) {
+ GENERIC_COPY(pj, pj - len, len);
+ pj -= len;
+ }
+
+ GENERIC_COPY(pj, vp, len);
+ }
+ }
+
+ return sz;
+}
+
+
+static npy_intp
+npy_gallop_right(const char *arr, const npy_intp size, const char *key,
+ size_t len, PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp last_ofs, ofs, m;
+
+ if (cmp(key, arr, py_arr) < 0) {
+ return 0;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size; /* arr[ofs] is never accessed */
+ break;
+ }
+
+ if (cmp(key, arr + ofs * len, py_arr) < 0) {
+ break;
+ } else {
+ last_ofs = ofs;
+ /* ofs = 1, 3, 7, 15... */
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[last_ofs*len] <= key < arr[ofs*len] */
+ while (last_ofs + 1 < ofs) {
+ m = last_ofs + ((ofs - last_ofs) >> 1);
+
+ if (cmp(key, arr + m * len, py_arr) < 0) {
+ ofs = m;
+ } else {
+ last_ofs = m;
+ }
+ }
+
+ /* now that arr[(ofs-1)*len] <= key < arr[ofs*len] */
+ return ofs;
+}
+
+
+
+static npy_intp
+npy_gallop_left(const char *arr, const npy_intp size, const char *key,
+ size_t len, PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp last_ofs, ofs, l, m, r;
+
+ if (cmp(arr + (size - 1) * len, key, py_arr) < 0) {
+ return size;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size;
+ break;
+ }
+
+ if (cmp(arr + (size - ofs - 1) * len, key, py_arr) < 0) {
+ break;
+ } else {
+ last_ofs = ofs;
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[(size-ofs-1)*len] < key <= arr[(size-last_ofs-1)*len] */
+ l = size - ofs - 1;
+ r = size - last_ofs - 1;
+
+ while (l + 1 < r) {
+ m = l + ((r - l) >> 1);
+
+ if (cmp(arr + m * len, key, py_arr) < 0) {
+ l = m;
+ } else {
+ r = m;
+ }
+ }
+
+ /* now that arr[(r-1)*len] < key <= arr[r*len] */
+ return r;
+}
+
+
+static void
+npy_merge_left(char *p1, npy_intp l1, char *p2, npy_intp l2,
+ char *p3, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ char *end = p2 + l2 * len;
+ memcpy(p3, p1, sizeof(char) * l1 * len);
+ /* first element must be in p2 otherwise skipped in the caller */
+ GENERIC_COPY(p1, p2, len);
+ p1 += len;
+ p2 += len;
+
+ while (p1 < p2 && p2 < end) {
+ if (cmp(p2, p3, py_arr) < 0) {
+ GENERIC_COPY(p1, p2, len);
+ p1 += len;
+ p2 += len;
+ } else {
+ GENERIC_COPY(p1, p3, len);
+ p1 += len;
+ p3 += len;
+ }
+ }
+
+ if (p1 != p2) {
+ memcpy(p1, p3, sizeof(char) * (p2 - p1));
+ }
+}
+
+
+static void
+npy_merge_right(char *p1, npy_intp l1, char *p2, npy_intp l2,
+ char *p3, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp ofs;
+ char *start = p1 - len;
+ memcpy(p3, p2, sizeof(char) * l2 * len);
+ p1 += (l1 - 1) * len;
+ p2 += (l2 - 1) * len;
+ p3 += (l2 - 1) * len;
+ /* first element must be in p1 otherwise skipped in the caller */
+ GENERIC_COPY(p2, p1, len);
+ p2 -= len;
+ p1 -= len;
+
+ while (p1 < p2 && start < p1) {
+ if (cmp(p3, p1, py_arr) < 0) {
+ GENERIC_COPY(p2, p1, len);
+ p2 -= len;
+ p1 -= len;
+ } else {
+ GENERIC_COPY(p2, p3, len);
+ p2 -= len;
+ p3 -= len;
+ }
+ }
+
+ if (p1 != p2) {
+ ofs = p2 - start;
+ memcpy(start + len, p3 - ofs + len, sizeof(char) * ofs);
+ }
+}
+
+
+
+static int
+npy_merge_at(char *arr, const run *stack, const npy_intp at,
+ buffer_char *buffer, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ int ret;
+ npy_intp s1, l1, s2, l2, k;
+ char *p1, *p2;
+ s1 = stack[at].s;
+ l1 = stack[at].l;
+ s2 = stack[at + 1].s;
+ l2 = stack[at + 1].l;
+ /* arr[s2] belongs to arr[s1+k] */
+ GENERIC_COPY(buffer->pw, arr + s2 * len, len);
+ k = npy_gallop_right(arr + s1 * len, l1, buffer->pw, len, cmp, py_arr);
+
+ if (l1 == k) {
+ /* already sorted */
+ return 0;
+ }
+
+ p1 = arr + (s1 + k) * len;
+ l1 -= k;
+ p2 = arr + s2 * len;
+ /* arr[s2-1] belongs to arr[s2+l2] */
+ GENERIC_COPY(buffer->pw, arr + (s2 - 1) * len, len);
+ l2 = npy_gallop_left(arr + s2 * len, l2, buffer->pw, len, cmp, py_arr);
+
+ if (l2 < l1) {
+ ret = resize_buffer_char(buffer, l2);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ npy_merge_right(p1, l1, p2, l2, buffer->pw, len, cmp, py_arr);
+ } else {
+ ret = resize_buffer_char(buffer, l1);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ npy_merge_left(p1, l1, p2, l2, buffer->pw, len, cmp, py_arr);
+ }
+
+ return 0;
+}
+
+
+static int
+npy_try_collapse(char *arr, run *stack, npy_intp *stack_ptr,
+ buffer_char *buffer, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ int ret;
+ npy_intp A, B, C, top;
+ top = *stack_ptr;
+
+ while (1 < top) {
+ B = stack[top - 2].l;
+ C = stack[top - 1].l;
+
+ if ((2 < top && stack[top - 3].l <= B + C) ||
+ (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) {
+ A = stack[top - 3].l;
+
+ if (A <= C) {
+ ret = npy_merge_at(arr, stack, top - 3, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += B;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = npy_merge_at(arr, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ }
+ } else if (1 < top && B <= C) {
+ ret = npy_merge_at(arr, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ } else {
+ break;
+ }
+ }
+
+ *stack_ptr = top;
+ return 0;
+}
+
+
+static int
+npy_force_collapse(char *arr, run *stack, npy_intp *stack_ptr,
+ buffer_char *buffer, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ int ret;
+ npy_intp top = *stack_ptr;
+
+ while (2 < top) {
+ if (stack[top - 3].l <= stack[top - 1].l) {
+ ret = npy_merge_at(arr, stack, top - 3, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += stack[top - 2].l;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = npy_merge_at(arr, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += stack[top - 1].l;
+ --top;
+ }
+ }
+
+ if (1 < top) {
+ ret = npy_merge_at(arr, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+ }
+
+ return 0;
+}
+
+
+int
+npy_timsort(void *start, npy_intp num, void *varr)
+{
+ PyArrayObject *arr = varr;
+ size_t len = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ int ret;
+ npy_intp l, n, stack_ptr, minrun;
+ run stack[TIMSORT_STACK_SIZE];
+ buffer_char buffer;
+
+ /* Items that have zero size don't make sense to sort */
+ if (len == 0) {
+ return 0;
+ }
+
+ buffer.pw = NULL;
+ buffer.size = 0;
+ buffer.len = len;
+ stack_ptr = 0;
+ minrun = compute_min_run_short(num);
+
+ /* used for insertion sort and gallop key */
+ ret = resize_buffer_char(&buffer, len);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ for (l = 0; l < num;) {
+ n = npy_count_run(start, l, num, minrun, buffer.pw, len, cmp, arr);
+
+ /* both s and l are scaled by len */
+ stack[stack_ptr].s = l;
+ stack[stack_ptr].l = n;
+ ++stack_ptr;
+ ret = npy_try_collapse(start, stack, &stack_ptr, &buffer, len, cmp, arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ l += n;
+ }
+
+ ret = npy_force_collapse(start, stack, &stack_ptr, &buffer, len, cmp, arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ ret = 0;
+
+cleanup:
+ if (buffer.pw != NULL) {
+ free(buffer.pw);
+ }
+ return ret;
+}
+
+
+/* argsort */
+
+static npy_intp
+npy_acount_run(char *arr, npy_intp *tosort, npy_intp l, npy_intp num,
+ npy_intp minrun, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp sz;
+ npy_intp vi;
+ npy_intp *pl, *pi, *pj, *pr;
+
+ if (NPY_UNLIKELY(num - l == 1)) {
+ return 1;
+ }
+
+ pl = tosort + l;
+
+ /* (not strictly) ascending sequence */
+ if (cmp(arr + (*pl) * len, arr + (*(pl + 1)) * len, py_arr) <= 0) {
+ for (pi = pl + 1; pi < tosort + num - 1
+ && cmp(arr + (*pi) * len, arr + (*(pi + 1)) * len, py_arr) <= 0; ++pi) {
+ }
+ } else { /* (strictly) descending sequence */
+ for (pi = pl + 1; pi < tosort + num - 1
+ && cmp(arr + (*(pi + 1)) * len, arr + (*pi) * len, py_arr) < 0; ++pi) {
+ }
+
+ for (pj = pl, pr = pi; pj < pr; ++pj, --pr) {
+ INTP_SWAP(*pj, *pr);
+ }
+ }
+
+ ++pi;
+ sz = pi - pl;
+
+ if (sz < minrun) {
+ if (l + minrun < num) {
+ sz = minrun;
+ } else {
+ sz = num - l;
+ }
+
+ pr = pl + sz;
+
+ /* insertion sort */
+ for (; pi < pr; ++pi) {
+ vi = *pi;
+ pj = pi;
+
+ while (pl < pj && cmp(arr + vi * len, arr + (*(pj - 1)) * len, py_arr) < 0) {
+ *pj = *(pj - 1);
+ --pj;
+ }
+
+ *pj = vi;
+ }
+ }
+
+ return sz;
+}
+
+
+static npy_intp
+npy_agallop_left(const char *arr, const npy_intp *tosort,
+ const npy_intp size, const char *key, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp last_ofs, ofs, l, m, r;
+
+ if (cmp(arr + tosort[size - 1] * len, key, py_arr) < 0) {
+ return size;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size;
+ break;
+ }
+
+ if (cmp(arr + tosort[size - ofs - 1] * len, key, py_arr) < 0) {
+ break;
+ } else {
+ last_ofs = ofs;
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[tosort[size-ofs-1]*len] < key <= arr[tosort[size-last_ofs-1]*len] */
+ l = size - ofs - 1;
+ r = size - last_ofs - 1;
+
+ while (l + 1 < r) {
+ m = l + ((r - l) >> 1);
+
+ if (cmp(arr + tosort[m] * len, key, py_arr) < 0) {
+ l = m;
+ } else {
+ r = m;
+ }
+ }
+
+ /* now that arr[tosort[r-1]*len] < key <= arr[tosort[r]*len] */
+ return r;
+}
+
+
+static npy_intp
+npy_agallop_right(const char *arr, const npy_intp *tosort,
+ const npy_intp size, const char *key, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp last_ofs, ofs, m;
+
+ if (cmp(key, arr + tosort[0] * len, py_arr) < 0) {
+ return 0;
+ }
+
+ last_ofs = 0;
+ ofs = 1;
+
+ for (;;) {
+ if (size <= ofs || ofs < 0) {
+ ofs = size; /* arr[ofs] is never accessed */
+ break;
+ }
+
+ if (cmp(key, arr + tosort[ofs] * len, py_arr) < 0) {
+ break;
+ } else {
+ last_ofs = ofs;
+ /* ofs = 1, 3, 7, 15... */
+ ofs = (ofs << 1) + 1;
+ }
+ }
+
+ /* now that arr[tosort[last_ofs]*len] <= key < arr[tosort[ofs]*len] */
+ while (last_ofs + 1 < ofs) {
+ m = last_ofs + ((ofs - last_ofs) >> 1);
+
+ if (cmp(key, arr + tosort[m] * len, py_arr) < 0) {
+ ofs = m;
+ } else {
+ last_ofs = m;
+ }
+ }
+
+ /* now that arr[tosort[ofs-1]*len] <= key < arr[tosort[ofs]*len] */
+ return ofs;
+}
+
+
+static void
+npy_amerge_left(char *arr, npy_intp *p1, npy_intp l1, npy_intp *p2,
+ npy_intp l2, npy_intp *p3, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp *end = p2 + l2;
+ memcpy(p3, p1, sizeof(npy_intp) * l1);
+ /* first element must be in p2 otherwise skipped in the caller */
+ *p1++ = *p2++;
+
+ while (p1 < p2 && p2 < end) {
+ if (cmp(arr + (*p2) * len, arr + (*p3) * len, py_arr) < 0) {
+ *p1++ = *p2++;
+ } else {
+ *p1++ = *p3++;
+ }
+ }
+
+ if (p1 != p2) {
+ memcpy(p1, p3, sizeof(npy_intp) * (p2 - p1));
+ }
+}
+
+
+static void
+npy_amerge_right(char *arr, npy_intp* p1, npy_intp l1, npy_intp *p2,
+ npy_intp l2, npy_intp *p3, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ npy_intp ofs;
+ npy_intp *start = p1 - 1;
+ memcpy(p3, p2, sizeof(npy_intp) * l2);
+ p1 += l1 - 1;
+ p2 += l2 - 1;
+ p3 += l2 - 1;
+ /* first element must be in p1 otherwise skipped in the caller */
+ *p2-- = *p1--;
+
+ while (p1 < p2 && start < p1) {
+ if (cmp(arr + (*p3) * len, arr + (*p1) * len, py_arr) < 0) {
+ *p2-- = *p1--;
+ } else {
+ *p2-- = *p3--;
+ }
+ }
+
+ if (p1 != p2) {
+ ofs = p2 - start;
+ memcpy(start + 1, p3 - ofs + 1, sizeof(npy_intp) * ofs);
+ }
+}
+
+
+
+static int
+npy_amerge_at(char *arr, npy_intp *tosort, const run *stack,
+ const npy_intp at, buffer_intp *buffer, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ int ret;
+ npy_intp s1, l1, s2, l2, k;
+ npy_intp *p1, *p2;
+ s1 = stack[at].s;
+ l1 = stack[at].l;
+ s2 = stack[at + 1].s;
+ l2 = stack[at + 1].l;
+ /* tosort[s2] belongs to tosort[s1+k] */
+ k = npy_agallop_right(arr, tosort + s1, l1, arr + tosort[s2] * len, len, cmp,
+ py_arr);
+
+ if (l1 == k) {
+ /* already sorted */
+ return 0;
+ }
+
+ p1 = tosort + s1 + k;
+ l1 -= k;
+ p2 = tosort + s2;
+ /* tosort[s2-1] belongs to tosort[s2+l2] */
+ l2 = npy_agallop_left(arr, tosort + s2, l2, arr + tosort[s2 - 1] * len,
+ len, cmp, py_arr);
+
+ if (l2 < l1) {
+ ret = resize_buffer_intp(buffer, l2);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ npy_amerge_right(arr, p1, l1, p2, l2, buffer->pw, len, cmp, py_arr);
+ } else {
+ ret = resize_buffer_intp(buffer, l1);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ npy_amerge_left(arr, p1, l1, p2, l2, buffer->pw, len, cmp, py_arr);
+ }
+
+ return 0;
+}
+
+
+static int
+npy_atry_collapse(char *arr, npy_intp *tosort, run *stack,
+ npy_intp *stack_ptr, buffer_intp *buffer, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ int ret;
+ npy_intp A, B, C, top;
+ top = *stack_ptr;
+
+ while (1 < top) {
+ B = stack[top - 2].l;
+ C = stack[top - 1].l;
+
+ if ((2 < top && stack[top - 3].l <= B + C) ||
+ (3 < top && stack[top - 4].l <= stack[top - 3].l + B)) {
+ A = stack[top - 3].l;
+
+ if (A <= C) {
+ ret = npy_amerge_at(arr, tosort, stack, top - 3, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += B;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = npy_amerge_at(arr, tosort, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ }
+ } else if (1 < top && B <= C) {
+ ret = npy_amerge_at(arr, tosort, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += C;
+ --top;
+ } else {
+ break;
+ }
+ }
+
+ *stack_ptr = top;
+ return 0;
+}
+
+
+static int
+npy_aforce_collapse(char *arr, npy_intp *tosort, run *stack,
+ npy_intp *stack_ptr, buffer_intp *buffer, size_t len,
+ PyArray_CompareFunc *cmp, PyArrayObject *py_arr)
+{
+ int ret;
+ npy_intp top = *stack_ptr;
+
+ while (2 < top) {
+ if (stack[top - 3].l <= stack[top - 1].l) {
+ ret = npy_amerge_at(arr, tosort, stack, top - 3, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 3].l += stack[top - 2].l;
+ stack[top - 2] = stack[top - 1];
+ --top;
+ } else {
+ ret = npy_amerge_at(arr, tosort, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+
+ stack[top - 2].l += stack[top - 1].l;
+ --top;
+ }
+ }
+
+ if (1 < top) {
+ ret = npy_amerge_at(arr, tosort, stack, top - 2, buffer, len, cmp, py_arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { return ret; }
+ }
+
+ return 0;
+}
+
+
+int
+npy_atimsort(void *start, npy_intp *tosort, npy_intp num, void *varr)
+{
+ PyArrayObject *arr = varr;
+ size_t len = PyArray_ITEMSIZE(arr);
+ PyArray_CompareFunc *cmp = PyArray_DESCR(arr)->f->compare;
+ int ret;
+ npy_intp l, n, stack_ptr, minrun;
+ run stack[TIMSORT_STACK_SIZE];
+ buffer_intp buffer;
+
+ /* Items that have zero size don't make sense to sort */
+ if (len == 0) {
+ return 0;
+ }
+
+ buffer.pw = NULL;
+ buffer.size = 0;
+ stack_ptr = 0;
+ minrun = compute_min_run_short(num);
+
+ for (l = 0; l < num;) {
+ n = npy_acount_run(start, tosort, l, num, minrun, len, cmp, arr);
+ /* both s and l are scaled by len */
+ stack[stack_ptr].s = l;
+ stack[stack_ptr].l = n;
+ ++stack_ptr;
+ ret = npy_atry_collapse(start, tosort, stack, &stack_ptr, &buffer, len, cmp,
+ arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ l += n;
+ }
+
+ ret = npy_aforce_collapse(start, tosort, stack, &stack_ptr, &buffer, len,
+ cmp, arr);
+
+ if (NPY_UNLIKELY(ret < 0)) { goto cleanup; }
+
+ ret = 0;
+
+cleanup:
+ if (buffer.pw != NULL) {
+ free(buffer.pw);
+ }
+ return ret;
+}
diff --git a/numpy/core/src/umath/_rational_tests.c.src b/numpy/core/src/umath/_rational_tests.c.src
index 9e74845df..615e395c7 100644
--- a/numpy/core/src/umath/_rational_tests.c.src
+++ b/numpy/core/src/umath/_rational_tests.c.src
@@ -539,11 +539,11 @@ static PyObject*
pyrational_str(PyObject* self) {
rational x = ((PyRational*)self)->r;
if (d(x)!=1) {
- return PyString_FromFormat(
+ return PyUString_FromFormat(
"%ld/%ld",(long)x.n,(long)d(x));
}
else {
- return PyString_FromFormat(
+ return PyUString_FromFormat(
"%ld",(long)x.n);
}
}
diff --git a/numpy/core/src/umath/_struct_ufunc_tests.c.src b/numpy/core/src/umath/_struct_ufunc_tests.c.src
index b831d5c2a..3eaac73e1 100644
--- a/numpy/core/src/umath/_struct_ufunc_tests.c.src
+++ b/numpy/core/src/umath/_struct_ufunc_tests.c.src
@@ -17,12 +17,6 @@
* docs.python.org .
*/
-static PyMethodDef StructUfuncTestMethods[] = {
- {NULL, NULL, 0, NULL}
-};
-
-/* The loop definition must precede the PyMODINIT_FUNC. */
-
static void add_uint64_triplet(char **args, npy_intp *dimensions,
npy_intp* steps, void* data)
{
@@ -53,6 +47,59 @@ static void add_uint64_triplet(char **args, npy_intp *dimensions,
}
}
+static PyObject*
+register_fail(PyObject* NPY_UNUSED(self), PyObject* NPY_UNUSED(args))
+{
+ PyObject *add_triplet;
+ PyObject *dtype_dict;
+ PyArray_Descr *dtype;
+ PyArray_Descr *dtypes[3];
+ int retval;
+
+ add_triplet = PyUFunc_FromFuncAndData(NULL, NULL, NULL, 0, 2, 1,
+ PyUFunc_None, "add_triplet",
+ "add_triplet_docstring", 0);
+
+ dtype_dict = Py_BuildValue("[(s, s), (s, s), (s, s)]",
+ "f0", "u8", "f1", "u8", "f2", "u8");
+ PyArray_DescrConverter(dtype_dict, &dtype);
+ Py_DECREF(dtype_dict);
+
+ dtypes[0] = dtype;
+ dtypes[1] = dtype;
+ dtypes[2] = dtype;
+
+ retval = PyUFunc_RegisterLoopForDescr((PyUFuncObject *)add_triplet,
+ dtype,
+ &add_uint64_triplet,
+ dtypes,
+ NULL);
+
+ if (retval < 0) {
+ Py_DECREF(add_triplet);
+ Py_DECREF(dtype);
+ return NULL;
+ }
+ retval = PyUFunc_RegisterLoopForDescr((PyUFuncObject *)add_triplet,
+ dtype,
+ &add_uint64_triplet,
+ dtypes,
+ NULL);
+ Py_DECREF(add_triplet);
+ Py_DECREF(dtype);
+ if (retval < 0) {
+ return NULL;
+ }
+ Py_RETURN_NONE;
+}
+
+static PyMethodDef StructUfuncTestMethods[] = {
+ {"register_fail",
+ register_fail,
+ METH_NOARGS, NULL},
+ {NULL, NULL, 0, NULL}
+};
+
#if defined(NPY_PY3K)
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
@@ -100,7 +147,7 @@ PyMODINIT_FUNC init_struct_ufunc_tests(void)
"add_triplet_docstring", 0);
dtype_dict = Py_BuildValue("[(s, s), (s, s), (s, s)]",
- "f0", "u8", "f1", "u8", "f2", "u8");
+ "f0", "u8", "f1", "u8", "f2", "u8");
PyArray_DescrConverter(dtype_dict, &dtype);
Py_DECREF(dtype_dict);
@@ -114,6 +161,7 @@ PyMODINIT_FUNC init_struct_ufunc_tests(void)
dtypes,
NULL);
+ Py_DECREF(dtype);
d = PyModule_GetDict(m);
PyDict_SetItemString(d, "add_triplet", add_triplet);
diff --git a/numpy/core/src/umath/_umath_tests.c.src b/numpy/core/src/umath/_umath_tests.c.src
index fcbdbe330..6c3bcce71 100644
--- a/numpy/core/src/umath/_umath_tests.c.src
+++ b/numpy/core/src/umath/_umath_tests.c.src
@@ -128,6 +128,8 @@ static void
/**end repeat**/
char *matrix_multiply_signature = "(m,n),(n,p)->(m,p)";
+/* for use with matrix_multiply code, but different signature */
+char *matmul_signature = "(m?,n),(n,p?)->(m?,p?)";
/**begin repeat
@@ -195,6 +197,45 @@ static void
/**end repeat**/
+char *cross1d_signature = "(3),(3)->(3)";
+
+/**begin repeat
+
+ #TYPE=LONG,DOUBLE#
+ #typ=npy_long, npy_double#
+*/
+
+/*
+ * This implements the cross product:
+ * out[n, 0] = in1[n, 1]*in2[n, 2] - in1[n, 2]*in2[n, 1]
+ * out[n, 1] = in1[n, 2]*in2[n, 0] - in1[n, 0]*in2[n, 2]
+ * out[n, 2] = in1[n, 0]*in2[n, 1] - in1[n, 1]*in2[n, 0]
+ */
+static void
+@TYPE@_cross1d(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ INIT_OUTER_LOOP_3
+ npy_intp is1=steps[0], is2=steps[1], os = steps[2];
+ BEGIN_OUTER_LOOP_3
+ @typ@ i1_x = *(@typ@ *)(args[0] + 0*is1);
+ @typ@ i1_y = *(@typ@ *)(args[0] + 1*is1);
+ @typ@ i1_z = *(@typ@ *)(args[0] + 2*is1);
+
+ @typ@ i2_x = *(@typ@ *)(args[1] + 0*is2);
+ @typ@ i2_y = *(@typ@ *)(args[1] + 1*is2);
+ @typ@ i2_z = *(@typ@ *)(args[1] + 2*is2);
+ char *op = args[2];
+
+ *(@typ@ *)op = i1_y * i2_z - i1_z * i2_y;
+ op += os;
+ *(@typ@ *)op = i1_z * i2_x - i1_x * i2_z;
+ op += os;
+ *(@typ@ *)op = i1_x * i2_y - i1_y * i2_x;
+ END_OUTER_LOOP
+}
+
+/**end repeat**/
+
char *euclidean_pdist_signature = "(n,d)->(p)";
/**begin repeat
@@ -285,17 +326,39 @@ static void
/**end repeat**/
+/* The following lines were generated using a slightly modified
+ version of code_generators/generate_umath.py and adding these
+ lines to defdict:
+
+defdict = {
+'inner1d' :
+ Ufunc(2, 1, None_,
+ r'''inner on the last dimension and broadcast on the rest \n"
+ " \"(i),(i)->()\" \n''',
+ TD('ld'),
+ ),
+'innerwt' :
+ Ufunc(3, 1, None_,
+ r'''inner1d with a weight argument \n"
+ " \"(i),(i),(i)->()\" \n''',
+ TD('ld'),
+ ),
+}
+
+*/
static PyUFuncGenericFunction inner1d_functions[] = { LONG_inner1d, DOUBLE_inner1d };
-static void * inner1d_data[] = { (void *)NULL, (void *)NULL };
+static void *inner1d_data[] = { (void *)NULL, (void *)NULL };
static char inner1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction innerwt_functions[] = { LONG_innerwt, DOUBLE_innerwt };
-static void * innerwt_data[] = { (void *)NULL, (void *)NULL };
+static void *innerwt_data[] = { (void *)NULL, (void *)NULL };
static char innerwt_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction matrix_multiply_functions[] = { LONG_matrix_multiply, FLOAT_matrix_multiply, DOUBLE_matrix_multiply };
static void *matrix_multiply_data[] = { (void *)NULL, (void *)NULL, (void *)NULL };
static char matrix_multiply_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_FLOAT, NPY_FLOAT, NPY_FLOAT, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
-
+static PyUFuncGenericFunction cross1d_functions[] = { LONG_cross1d, DOUBLE_cross1d };
+static void *cross1d_data[] = { (void *)NULL, (void *)NULL };
+static char cross1d_signatures[] = { NPY_LONG, NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction euclidean_pdist_functions[] =
{ FLOAT_euclidean_pdist, DOUBLE_euclidean_pdist };
static void *eucldiean_pdist_data[] = { (void *)NULL, (void *)NULL };
@@ -303,7 +366,7 @@ static char euclidean_pdist_signatures[] = { NPY_FLOAT, NPY_FLOAT,
NPY_DOUBLE, NPY_DOUBLE };
static PyUFuncGenericFunction cumsum_functions[] = { LONG_cumsum, DOUBLE_cumsum };
-static void * cumsum_data[] = { (void *)NULL, (void *)NULL };
+static void *cumsum_data[] = { (void *)NULL, (void *)NULL };
static char cumsum_signatures[] = { NPY_LONG, NPY_LONG, NPY_DOUBLE, NPY_DOUBLE };
@@ -346,6 +409,17 @@ addUfuncs(PyObject *dictionary) {
}
PyDict_SetItemString(dictionary, "matrix_multiply", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(matrix_multiply_functions,
+ matrix_multiply_data, matrix_multiply_signatures,
+ 3, 2, 1, PyUFunc_None, "matmul",
+ "matmul on last two dimensions, with some being optional\n"
+ " \"(m?,n),(n,p?)->(m?,p?)\" \n",
+ 0, matmul_signature);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "matmul", f);
+ Py_DECREF(f);
f = PyUFunc_FromFuncAndDataAndSignature(euclidean_pdist_functions,
eucldiean_pdist_data, euclidean_pdist_signatures,
2, 1, 1, PyUFunc_None, "euclidean_pdist",
@@ -376,6 +450,16 @@ addUfuncs(PyObject *dictionary) {
}
PyDict_SetItemString(dictionary, "inner1d_no_doc", f);
Py_DECREF(f);
+ f = PyUFunc_FromFuncAndDataAndSignature(cross1d_functions, cross1d_data,
+ cross1d_signatures, 2, 2, 1, PyUFunc_None, "cross1d",
+ "cross product on the last dimension and broadcast on the rest \n"\
+ " \"(3),(3)->(3)\" \n",
+ 0, cross1d_signature);
+ if (f == NULL) {
+ return -1;
+ }
+ PyDict_SetItemString(dictionary, "cross1d", f);
+ Py_DECREF(f);
return 0;
}
@@ -385,9 +469,10 @@ static PyObject *
UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
int nin, nout, i;
- PyObject *signature, *sig_str;
- PyUFuncObject *f = NULL;
- PyObject *core_num_dims = NULL, *core_dim_ixs = NULL;
+ PyObject *signature=NULL, *sig_str=NULL;
+ PyUFuncObject *f=NULL;
+ PyObject *core_num_dims=NULL, *core_dim_ixs=NULL;
+ PyObject *core_dim_flags=NULL, *core_dim_sizes=NULL;
int core_enabled;
int core_num_ixs = 0;
@@ -442,7 +527,7 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
goto fail;
}
for (i = 0; i < core_num_ixs; i++) {
- PyObject * val = PyLong_FromLong(f->core_dim_ixs[i]);
+ PyObject *val = PyLong_FromLong(f->core_dim_ixs[i]);
PyTuple_SET_ITEM(core_dim_ixs, i, val);
}
}
@@ -450,13 +535,44 @@ UMath_Tests_test_signature(PyObject *NPY_UNUSED(dummy), PyObject *args)
Py_INCREF(Py_None);
core_dim_ixs = Py_None;
}
+ if (f->core_dim_flags != NULL) {
+ core_dim_flags = PyTuple_New(f->core_num_dim_ix);
+ if (core_dim_flags == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < f->core_num_dim_ix; i++) {
+ PyObject *val = PyLong_FromLong(f->core_dim_flags[i]);
+ PyTuple_SET_ITEM(core_dim_flags, i, val);
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_dim_flags = Py_None;
+ }
+ if (f->core_dim_sizes != NULL) {
+ core_dim_sizes = PyTuple_New(f->core_num_dim_ix);
+ if (core_dim_sizes == NULL) {
+ goto fail;
+ }
+ for (i = 0; i < f->core_num_dim_ix; i++) {
+ PyObject *val = PyLong_FromLong(f->core_dim_sizes[i]);
+ PyTuple_SET_ITEM(core_dim_sizes, i, val);
+ }
+ }
+ else {
+ Py_INCREF(Py_None);
+ core_dim_sizes = Py_None;
+ }
Py_DECREF(f);
- return Py_BuildValue("iOO", core_enabled, core_num_dims, core_dim_ixs);
+ return Py_BuildValue("iNNNN", core_enabled, core_num_dims,
+ core_dim_ixs, core_dim_flags, core_dim_sizes);
fail:
Py_XDECREF(f);
Py_XDECREF(core_num_dims);
Py_XDECREF(core_dim_ixs);
+ Py_XDECREF(core_dim_flags);
+ Py_XDECREF(core_dim_sizes);
return NULL;
}
@@ -464,8 +580,8 @@ static PyMethodDef UMath_TestsMethods[] = {
{"test_signature", UMath_Tests_test_signature, METH_VARARGS,
"Test signature parsing of ufunc. \n"
"Arguments: nin nout signature \n"
- "If fails, it returns NULL. Otherwise it will returns 0 for scalar ufunc "
- "and 1 for generalized ufunc. \n",
+ "If fails, it returns NULL. Otherwise it returns a tuple of ufunc "
+ "internals. \n",
},
{NULL, NULL, 0, NULL} /* Sentinel */
};
@@ -504,6 +620,7 @@ PyMODINIT_FUNC init_umath_tests(void) {
if (m == NULL) {
return RETVAL(NULL);
}
+
import_array();
import_ufunc();
diff --git a/numpy/core/src/umath/clip.c.src b/numpy/core/src/umath/clip.c.src
new file mode 100644
index 000000000..30fa3d2b3
--- /dev/null
+++ b/numpy/core/src/umath/clip.c.src
@@ -0,0 +1,119 @@
+/**
+ * This module provides the inner loops for the clip ufunc
+ */
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+
+#include "numpy/halffloat.h"
+#include "numpy/npy_math.h"
+#include "numpy/ndarraytypes.h"
+#include "numpy/npy_common.h"
+#include "numpy/utils.h"
+#include "fast_loop_macros.h"
+
+/*
+ * Produce macros that perform nan/nat-propagating min and max
+ */
+
+/**begin repeat
+ * #name = BOOL,
+ * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG#
+ */
+#define _NPY_@name@_MIN(a, b) PyArray_MIN(a, b)
+#define _NPY_@name@_MAX(a, b) PyArray_MAX(a, b)
+/**end repeat**/
+
+#define _NPY_HALF_MIN(a, b) (npy_half_isnan(a) || npy_half_le(a, b) ? (a) : (b))
+#define _NPY_HALF_MAX(a, b) (npy_half_isnan(a) || npy_half_ge(a, b) ? (a) : (b))
+
+/**begin repeat
+ * #name = FLOAT, DOUBLE, LONGDOUBLE#
+ */
+#define _NPY_@name@_MIN(a, b) (npy_isnan(a) ? (a) : PyArray_MIN(a, b))
+#define _NPY_@name@_MAX(a, b) (npy_isnan(a) ? (a) : PyArray_MAX(a, b))
+/**end repeat**/
+
+/**begin repeat
+ * #name = CFLOAT, CDOUBLE, CLONGDOUBLE#
+ */
+#define _NPY_@name@_MIN(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CLT(a, b) ? (a) : (b))
+#define _NPY_@name@_MAX(a, b) (npy_isnan((a).real) || npy_isnan((a).imag) || PyArray_CGT(a, b) ? (a) : (b))
+/**end repeat**/
+
+/**begin repeat
+ * #name = DATETIME, TIMEDELTA#
+ */
+#define _NPY_@name@_MIN(a, b) ( \
+ (a) == NPY_DATETIME_NAT ? (a) : \
+ (b) == NPY_DATETIME_NAT ? (b) : \
+ (a) < (b) ? (a) : (b) \
+)
+#define _NPY_@name@_MAX(a, b) ( \
+ (a) == NPY_DATETIME_NAT ? (a) : \
+ (b) == NPY_DATETIME_NAT ? (b) : \
+ (a) > (b) ? (a) : (b) \
+)
+/**end repeat**/
+
+/**begin repeat
+ *
+ * #name = BOOL,
+ * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG,
+ * HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * DATETIME, TIMEDELTA#
+ * #type = npy_bool,
+ * npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
+ * npy_long, npy_ulong, npy_longlong, npy_ulonglong,
+ * npy_half, npy_float, npy_double, npy_longdouble,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
+ * npy_datetime, npy_timedelta#
+ */
+
+#define _NPY_CLIP(x, min, max) \
+ _NPY_@name@_MIN(_NPY_@name@_MAX((x), (min)), (max))
+
+NPY_NO_EXPORT void
+@name@_clip(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ if (steps[1] == 0 && steps[2] == 0) {
+ /* min and max are constant throughout the loop, the most common case */
+ /* NOTE: it may be possible to optimize these checks for nan */
+ @type@ min_val = *(@type@ *)args[1];
+ @type@ max_val = *(@type@ *)args[2];
+
+ char *ip1 = args[0], *op1 = args[3];
+ npy_intp is1 = steps[0], os1 = steps[3];
+ npy_intp n = dimensions[0];
+
+ /* contiguous, branch to let the compiler optimize */
+ if (is1 == sizeof(@type@) && os1 == sizeof(@type@)) {
+ for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
+ *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val);
+ }
+ }
+ else {
+ for(npy_intp i = 0; i < n; i++, ip1 += is1, op1 += os1) {
+ *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, min_val, max_val);
+ }
+ }
+ }
+ else {
+ TERNARY_LOOP {
+ *(@type@ *)op1 = _NPY_CLIP(*(@type@ *)ip1, *(@type@ *)ip2, *(@type@ *)ip3);
+ }
+ }
+ npy_clear_floatstatus_barrier((char*)dimensions);
+}
+
+// clean up the macros we defined above
+#undef _NPY_CLIP
+#undef _NPY_@name@_MAX
+#undef _NPY_@name@_MIN
+
+/**end repeat**/
diff --git a/numpy/core/src/umath/clip.h.src b/numpy/core/src/umath/clip.h.src
new file mode 100644
index 000000000..d77971ad7
--- /dev/null
+++ b/numpy/core/src/umath/clip.h.src
@@ -0,0 +1,18 @@
+#ifndef _NPY_UMATH_CLIP_H_
+#define _NPY_UMATH_CLIP_H_
+
+
+/**begin repeat
+ *
+ * #name = BOOL,
+ * BYTE, UBYTE, SHORT, USHORT, INT, UINT,
+ * LONG, ULONG, LONGLONG, ULONGLONG,
+ * HALF, FLOAT, DOUBLE, LONGDOUBLE,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * DATETIME, TIMEDELTA#
+ */
+NPY_NO_EXPORT void
+@name@_clip(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat**/
+
+#endif
diff --git a/numpy/core/src/umath/cpuid.c b/numpy/core/src/umath/cpuid.c
index 6744ceb05..72c6493e8 100644
--- a/numpy/core/src/umath/cpuid.c
+++ b/numpy/core/src/umath/cpuid.c
@@ -11,6 +11,7 @@
#define XCR_XFEATURE_ENABLED_MASK 0x0
#define XSTATE_SSE 0x2
#define XSTATE_YMM 0x4
+#define XSTATE_ZMM 0x70
/*
* verify the OS supports avx instructions
@@ -33,6 +34,38 @@ int os_avx_support(void)
#endif
}
+static NPY_INLINE
+int os_avx512_support(void)
+{
+#if HAVE_XGETBV
+ unsigned int eax, edx;
+ unsigned int ecx = XCR_XFEATURE_ENABLED_MASK;
+ unsigned int xcr0 = XSTATE_ZMM | XSTATE_YMM | XSTATE_SSE;
+ __asm__("xgetbv" : "=a" (eax), "=d" (edx) : "c" (ecx));
+ return (eax & xcr0) == xcr0;
+#else
+ return 0;
+#endif
+}
+
+static NPY_INLINE
+int cpu_supports_fma(void)
+{
+#ifdef __x86_64__
+ unsigned int feature = 0x01;
+ unsigned int a, b, c, d;
+ __asm__ volatile (
+ "cpuid" "\n\t"
+ : "=a" (a), "=b" (b), "=c" (c), "=d" (d)
+ : "a" (feature));
+ /*
+ * FMA is the 12th bit of ECX
+ */
+ return (c >> 12) & 1;
+#else
+ return 0;
+#endif
+}
/*
* Primitive cpu feature detect function
@@ -42,7 +75,17 @@ NPY_NO_EXPORT int
npy_cpu_supports(const char * feature)
{
#ifdef HAVE___BUILTIN_CPU_SUPPORTS
- if (strcmp(feature, "avx2") == 0) {
+ if (strcmp(feature, "avx512f") == 0) {
+#ifdef HAVE___BUILTIN_CPU_SUPPORTS_AVX512F
+ return __builtin_cpu_supports("avx512f") && os_avx512_support();
+#else
+ return 0;
+#endif
+ }
+ else if (strcmp(feature, "fma") == 0) {
+ return cpu_supports_fma() && __builtin_cpu_supports("avx2") && os_avx_support();
+ }
+ else if (strcmp(feature, "avx2") == 0) {
return __builtin_cpu_supports("avx2") && os_avx_support();
}
else if (strcmp(feature, "avx") == 0) {
diff --git a/numpy/core/src/umath/fast_loop_macros.h b/numpy/core/src/umath/fast_loop_macros.h
new file mode 100644
index 000000000..ae6d69a3e
--- /dev/null
+++ b/numpy/core/src/umath/fast_loop_macros.h
@@ -0,0 +1,234 @@
+/**
+ * Macros to help build fast ufunc inner loops.
+ *
+ * These expect to have access to the arguments of a typical ufunc loop,
+ *
+ * char **args
+ * npy_intp *dimensions
+ * npy_intp *steps
+ */
+#ifndef _NPY_UMATH_FAST_LOOP_MACROS_H_
+#define _NPY_UMATH_FAST_LOOP_MACROS_H_
+
+#include "simd.inc"
+
+/**
+ * Simple unoptimized loop macros that iterate over the ufunc arguments in
+ * parallel.
+ * @{
+ */
+
+/** (<ignored>) -> (op1) */
+#define OUTPUT_LOOP\
+ char *op1 = args[1];\
+ npy_intp os1 = steps[1];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, op1 += os1)
+
+/** (ip1) -> (op1) */
+#define UNARY_LOOP\
+ char *ip1 = args[0], *op1 = args[1];\
+ npy_intp is1 = steps[0], os1 = steps[1];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, ip1 += is1, op1 += os1)
+
+/** (ip1) -> (op1, op2) */
+#define UNARY_LOOP_TWO_OUT\
+ char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\
+ npy_intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2)
+
+/** (ip1, ip2) -> (op1) */
+#define BINARY_LOOP\
+ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\
+ npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1)
+
+/** (ip1, ip2) -> (op1, op2) */
+#define BINARY_LOOP_TWO_OUT\
+ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\
+ npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2], os2 = steps[3];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2)
+
+/** (ip1, ip2, ip3) -> (op1) */
+#define TERNARY_LOOP\
+ char *ip1 = args[0], *ip2 = args[1], *ip3 = args[2], *op1 = args[3];\
+ npy_intp is1 = steps[0], is2 = steps[1], is3 = steps[2], os1 = steps[3];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, ip3 += is3, op1 += os1)
+
+/** @} */
+
+/* unary loop input and output contiguous */
+#define IS_UNARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
+ steps[1] == sizeof(tout))
+
+#define IS_OUTPUT_CONT(tout) (steps[1] == sizeof(tout))
+
+#define IS_BINARY_REDUCE ((args[0] == args[2])\
+ && (steps[0] == steps[2])\
+ && (steps[0] == 0))
+
+/* binary loop input and output contiguous */
+#define IS_BINARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
+ steps[1] == sizeof(tin) && \
+ steps[2] == sizeof(tout))
+
+/* binary loop input and output contiguous with first scalar */
+#define IS_BINARY_CONT_S1(tin, tout) (steps[0] == 0 && \
+ steps[1] == sizeof(tin) && \
+ steps[2] == sizeof(tout))
+
+/* binary loop input and output contiguous with second scalar */
+#define IS_BINARY_CONT_S2(tin, tout) (steps[0] == sizeof(tin) && \
+ steps[1] == 0 && \
+ steps[2] == sizeof(tout))
+
+/*
+ * loop with contiguous specialization
+ * op should be the code working on `tin in` and
+ * storing the result in `tout *out`
+ * combine with NPY_GCC_OPT_3 to allow autovectorization
+ * should only be used where its worthwhile to avoid code bloat
+ */
+#define BASE_UNARY_LOOP(tin, tout, op) \
+ UNARY_LOOP { \
+ const tin in = *(tin *)ip1; \
+ tout *out = (tout *)op1; \
+ op; \
+ }
+
+#define UNARY_LOOP_FAST(tin, tout, op) \
+ do { \
+ /* condition allows compiler to optimize the generic macro */ \
+ if (IS_UNARY_CONT(tin, tout)) { \
+ if (args[0] == args[1]) { \
+ BASE_UNARY_LOOP(tin, tout, op) \
+ } \
+ else { \
+ BASE_UNARY_LOOP(tin, tout, op) \
+ } \
+ } \
+ else { \
+ BASE_UNARY_LOOP(tin, tout, op) \
+ } \
+ } \
+ while (0)
+
+/*
+ * loop with contiguous specialization
+ * op should be the code working on `tin in1`, `tin in2` and
+ * storing the result in `tout *out`
+ * combine with NPY_GCC_OPT_3 to allow autovectorization
+ * should only be used where its worthwhile to avoid code bloat
+ */
+#define BASE_BINARY_LOOP(tin, tout, op) \
+ BINARY_LOOP { \
+ const tin in1 = *(tin *)ip1; \
+ const tin in2 = *(tin *)ip2; \
+ tout *out = (tout *)op1; \
+ op; \
+ }
+
+/*
+ * unfortunately gcc 6/7 regressed and we need to give it additional hints to
+ * vectorize inplace operations (PR80198)
+ * must only be used after op1 == ip1 or ip2 has been checked
+ * TODO: using ivdep might allow other compilers to vectorize too
+ */
+#if __GNUC__ >= 6
+#define IVDEP_LOOP _Pragma("GCC ivdep")
+#else
+#define IVDEP_LOOP
+#endif
+#define BASE_BINARY_LOOP_INP(tin, tout, op) \
+ char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\
+ npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\
+ npy_intp n = dimensions[0];\
+ npy_intp i;\
+ IVDEP_LOOP \
+ for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { \
+ const tin in1 = *(tin *)ip1; \
+ const tin in2 = *(tin *)ip2; \
+ tout *out = (tout *)op1; \
+ op; \
+ }
+
+#define BASE_BINARY_LOOP_S(tin, tout, cin, cinp, vin, vinp, op) \
+ const tin cin = *(tin *)cinp; \
+ BINARY_LOOP { \
+ const tin vin = *(tin *)vinp; \
+ tout *out = (tout *)op1; \
+ op; \
+ }
+
+/* PR80198 again, scalar works without the pragma */
+#define BASE_BINARY_LOOP_S_INP(tin, tout, cin, cinp, vin, vinp, op) \
+ const tin cin = *(tin *)cinp; \
+ BINARY_LOOP { \
+ const tin vin = *(tin *)vinp; \
+ tout *out = (tout *)vinp; \
+ op; \
+ }
+
+#define BINARY_LOOP_FAST(tin, tout, op) \
+ do { \
+ /* condition allows compiler to optimize the generic macro */ \
+ if (IS_BINARY_CONT(tin, tout)) { \
+ if (abs_ptrdiff(args[2], args[0]) == 0 && \
+ abs_ptrdiff(args[2], args[1]) >= NPY_MAX_SIMD_SIZE) { \
+ BASE_BINARY_LOOP_INP(tin, tout, op) \
+ } \
+ else if (abs_ptrdiff(args[2], args[1]) == 0 && \
+ abs_ptrdiff(args[2], args[0]) >= NPY_MAX_SIMD_SIZE) { \
+ BASE_BINARY_LOOP_INP(tin, tout, op) \
+ } \
+ else { \
+ BASE_BINARY_LOOP(tin, tout, op) \
+ } \
+ } \
+ else if (IS_BINARY_CONT_S1(tin, tout)) { \
+ if (abs_ptrdiff(args[2], args[1]) == 0) { \
+ BASE_BINARY_LOOP_S_INP(tin, tout, in1, args[0], in2, ip2, op) \
+ } \
+ else { \
+ BASE_BINARY_LOOP_S(tin, tout, in1, args[0], in2, ip2, op) \
+ } \
+ } \
+ else if (IS_BINARY_CONT_S2(tin, tout)) { \
+ if (abs_ptrdiff(args[2], args[0]) == 0) { \
+ BASE_BINARY_LOOP_S_INP(tin, tout, in2, args[1], in1, ip1, op) \
+ } \
+ else { \
+ BASE_BINARY_LOOP_S(tin, tout, in2, args[1], in1, ip1, op) \
+ }\
+ } \
+ else { \
+ BASE_BINARY_LOOP(tin, tout, op) \
+ } \
+ } \
+ while (0)
+
+#define BINARY_REDUCE_LOOP_INNER\
+ char *ip2 = args[1]; \
+ npy_intp is2 = steps[1]; \
+ npy_intp n = dimensions[0]; \
+ npy_intp i; \
+ for(i = 0; i < n; i++, ip2 += is2)
+
+#define BINARY_REDUCE_LOOP(TYPE)\
+ char *iop1 = args[0]; \
+ TYPE io1 = *(TYPE *)iop1; \
+ BINARY_REDUCE_LOOP_INNER
+
+
+#endif /* _NPY_UMATH_FAST_LOOP_MACROS_H_ */
diff --git a/numpy/core/src/umath/funcs.inc.src b/numpy/core/src/umath/funcs.inc.src
index da2ab07f8..10ed66e50 100644
--- a/numpy/core/src/umath/funcs.inc.src
+++ b/numpy/core/src/umath/funcs.inc.src
@@ -160,6 +160,39 @@ npy_ObjectLogicalNot(PyObject *i1)
}
static PyObject *
+npy_ObjectFloor(PyObject *obj) {
+ static PyObject *math_floor_func = NULL;
+
+ npy_cache_import("math", "floor", &math_floor_func);
+ if (math_floor_func == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(math_floor_func, "O", obj);
+}
+
+static PyObject *
+npy_ObjectCeil(PyObject *obj) {
+ static PyObject *math_ceil_func = NULL;
+
+ npy_cache_import("math", "ceil", &math_ceil_func);
+ if (math_ceil_func == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(math_ceil_func, "O", obj);
+}
+
+static PyObject *
+npy_ObjectTrunc(PyObject *obj) {
+ static PyObject *math_trunc_func = NULL;
+
+ npy_cache_import("math", "trunc", &math_trunc_func);
+ if (math_trunc_func == NULL) {
+ return NULL;
+ }
+ return PyObject_CallFunction(math_trunc_func, "O", obj);
+}
+
+static PyObject *
npy_ObjectGCD(PyObject *i1, PyObject *i2)
{
PyObject *gcd = NULL;
@@ -195,7 +228,8 @@ npy_ObjectGCD(PyObject *i1, PyObject *i2)
return NULL;
}
/* _gcd has some unusual behaviour regarding sign */
- return PyNumber_Absolute(gcd);
+ Py_SETREF(gcd, PyNumber_Absolute(gcd));
+ return gcd;
}
}
@@ -213,17 +247,30 @@ npy_ObjectLCM(PyObject *i1, PyObject *i2)
* no remainder
*/
tmp = PyNumber_FloorDivide(i1, gcd);
+ Py_DECREF(gcd);
if(tmp == NULL) {
return NULL;
}
- tmp = PyNumber_Multiply(tmp, i2);
+ Py_SETREF(tmp, PyNumber_Multiply(tmp, i2));
if(tmp == NULL) {
return NULL;
}
/* even though we fix gcd to be positive, we need to do it again here */
- return PyNumber_Absolute(tmp);
+ Py_SETREF(tmp, PyNumber_Absolute(tmp));
+ return tmp;
+}
+
+
+static PyObject *
+npy_ObjectClip(PyObject *arr, PyObject *min, PyObject *max) {
+ PyObject *o = npy_ObjectMax(arr, min);
+ if (o == NULL) {
+ return NULL;
+ }
+ Py_SETREF(o, npy_ObjectMin(o, max));
+ return o;
}
/*
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index e62942efd..d948e25bb 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -46,200 +46,9 @@
*/
#include "simd.inc"
+/** Provides the various *_LOOP macros */
+#include "fast_loop_macros.h"
-/*
- *****************************************************************************
- ** UFUNC LOOPS **
- *****************************************************************************
- */
-
-/* unary loop input and output contiguous */
-#define IS_UNARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
- steps[1] == sizeof(tout))
-
-#define IS_BINARY_REDUCE ((args[0] == args[2])\
- && (steps[0] == steps[2])\
- && (steps[0] == 0))
-
-/* binary loop input and output contiguous */
-#define IS_BINARY_CONT(tin, tout) (steps[0] == sizeof(tin) && \
- steps[1] == sizeof(tin) && \
- steps[2] == sizeof(tout))
-/* binary loop input and output contiguous with first scalar */
-#define IS_BINARY_CONT_S1(tin, tout) (steps[0] == 0 && \
- steps[1] == sizeof(tin) && \
- steps[2] == sizeof(tout))
-/* binary loop input and output contiguous with second scalar */
-#define IS_BINARY_CONT_S2(tin, tout) (steps[0] == sizeof(tin) && \
- steps[1] == 0 && \
- steps[2] == sizeof(tout))
-
-#define OUTPUT_LOOP\
- char *op1 = args[1];\
- npy_intp os1 = steps[1];\
- npy_intp n = dimensions[0];\
- npy_intp i;\
- for(i = 0; i < n; i++, op1 += os1)
-
-#define UNARY_LOOP\
- char *ip1 = args[0], *op1 = args[1];\
- npy_intp is1 = steps[0], os1 = steps[1];\
- npy_intp n = dimensions[0];\
- npy_intp i;\
- for(i = 0; i < n; i++, ip1 += is1, op1 += os1)
-
-/*
- * loop with contiguous specialization
- * op should be the code working on `tin in` and
- * storing the result in `tout * out`
- * combine with NPY_GCC_OPT_3 to allow autovectorization
- * should only be used where its worthwhile to avoid code bloat
- */
-#define BASE_UNARY_LOOP(tin, tout, op) \
- UNARY_LOOP { \
- const tin in = *(tin *)ip1; \
- tout * out = (tout *)op1; \
- op; \
- }
-#define UNARY_LOOP_FAST(tin, tout, op) \
- do { \
- /* condition allows compiler to optimize the generic macro */ \
- if (IS_UNARY_CONT(tin, tout)) { \
- if (args[0] == args[1]) { \
- BASE_UNARY_LOOP(tin, tout, op) \
- } \
- else { \
- BASE_UNARY_LOOP(tin, tout, op) \
- } \
- } \
- else { \
- BASE_UNARY_LOOP(tin, tout, op) \
- } \
- } \
- while (0)
-
-#define UNARY_LOOP_TWO_OUT\
- char *ip1 = args[0], *op1 = args[1], *op2 = args[2];\
- npy_intp is1 = steps[0], os1 = steps[1], os2 = steps[2];\
- npy_intp n = dimensions[0];\
- npy_intp i;\
- for(i = 0; i < n; i++, ip1 += is1, op1 += os1, op2 += os2)
-
-#define BINARY_LOOP\
- char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\
- npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\
- npy_intp n = dimensions[0];\
- npy_intp i;\
- for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1)
-
-/*
- * loop with contiguous specialization
- * op should be the code working on `tin in1`, `tin in2` and
- * storing the result in `tout * out`
- * combine with NPY_GCC_OPT_3 to allow autovectorization
- * should only be used where its worthwhile to avoid code bloat
- */
-#define BASE_BINARY_LOOP(tin, tout, op) \
- BINARY_LOOP { \
- const tin in1 = *(tin *)ip1; \
- const tin in2 = *(tin *)ip2; \
- tout * out = (tout *)op1; \
- op; \
- }
-/*
- * unfortunately gcc 6/7 regressed and we need to give it additional hints to
- * vectorize inplace operations (PR80198)
- * must only be used after op1 == ip1 or ip2 has been checked
- * TODO: using ivdep might allow other compilers to vectorize too
- */
-#if __GNUC__ >= 6
-#define IVDEP_LOOP _Pragma("GCC ivdep")
-#else
-#define IVDEP_LOOP
-#endif
-#define BASE_BINARY_LOOP_INP(tin, tout, op) \
- char *ip1 = args[0], *ip2 = args[1], *op1 = args[2];\
- npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2];\
- npy_intp n = dimensions[0];\
- npy_intp i;\
- IVDEP_LOOP \
- for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1) { \
- const tin in1 = *(tin *)ip1; \
- const tin in2 = *(tin *)ip2; \
- tout * out = (tout *)op1; \
- op; \
- }
-#define BASE_BINARY_LOOP_S(tin, tout, cin, cinp, vin, vinp, op) \
- const tin cin = *(tin *)cinp; \
- BINARY_LOOP { \
- const tin vin = *(tin *)vinp; \
- tout * out = (tout *)op1; \
- op; \
- }
-/* PR80198 again, scalar works without the pragma */
-#define BASE_BINARY_LOOP_S_INP(tin, tout, cin, cinp, vin, vinp, op) \
- const tin cin = *(tin *)cinp; \
- BINARY_LOOP { \
- const tin vin = *(tin *)vinp; \
- tout * out = (tout *)vinp; \
- op; \
- }
-#define BINARY_LOOP_FAST(tin, tout, op) \
- do { \
- /* condition allows compiler to optimize the generic macro */ \
- if (IS_BINARY_CONT(tin, tout)) { \
- if (abs_ptrdiff(args[2], args[0]) == 0 && \
- abs_ptrdiff(args[2], args[1]) >= NPY_MAX_SIMD_SIZE) { \
- BASE_BINARY_LOOP_INP(tin, tout, op) \
- } \
- else if (abs_ptrdiff(args[2], args[1]) == 0 && \
- abs_ptrdiff(args[2], args[0]) >= NPY_MAX_SIMD_SIZE) { \
- BASE_BINARY_LOOP_INP(tin, tout, op) \
- } \
- else { \
- BASE_BINARY_LOOP(tin, tout, op) \
- } \
- } \
- else if (IS_BINARY_CONT_S1(tin, tout)) { \
- if (abs_ptrdiff(args[2], args[1]) == 0) { \
- BASE_BINARY_LOOP_S_INP(tin, tout, in1, args[0], in2, ip2, op) \
- } \
- else { \
- BASE_BINARY_LOOP_S(tin, tout, in1, args[0], in2, ip2, op) \
- } \
- } \
- else if (IS_BINARY_CONT_S2(tin, tout)) { \
- if (abs_ptrdiff(args[2], args[0]) == 0) { \
- BASE_BINARY_LOOP_S_INP(tin, tout, in2, args[1], in1, ip1, op) \
- } \
- else { \
- BASE_BINARY_LOOP_S(tin, tout, in2, args[1], in1, ip1, op) \
- }\
- } \
- else { \
- BASE_BINARY_LOOP(tin, tout, op) \
- } \
- } \
- while (0)
-
-#define BINARY_REDUCE_LOOP_INNER\
- char *ip2 = args[1]; \
- npy_intp is2 = steps[1]; \
- npy_intp n = dimensions[0]; \
- npy_intp i; \
- for(i = 0; i < n; i++, ip2 += is2)
-
-#define BINARY_REDUCE_LOOP(TYPE)\
- char *iop1 = args[0]; \
- TYPE io1 = *(TYPE *)iop1; \
- BINARY_REDUCE_LOOP_INNER
-
-#define BINARY_LOOP_TWO_OUT\
- char *ip1 = args[0], *ip2 = args[1], *op1 = args[2], *op2 = args[3];\
- npy_intp is1 = steps[0], is2 = steps[1], os1 = steps[2], os2 = steps[3];\
- npy_intp n = dimensions[0];\
- npy_intp i;\
- for(i = 0; i < n; i++, ip1 += is1, ip2 += is2, op1 += os1, op2 += os2)
/******************************************************************************
** GENERIC FLOAT LOOPS **
@@ -566,16 +375,38 @@ NPY_NO_EXPORT void
PyUFunc_O_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
char *meth = (char *)func;
+ PyObject *tup = PyTuple_New(0);
+ if (tup == NULL) {
+ return;
+ }
UNARY_LOOP {
PyObject *in1 = *(PyObject **)ip1;
PyObject **out = (PyObject **)op1;
- PyObject *ret = PyObject_CallMethod(in1 ? in1 : Py_None, meth, NULL);
+ PyObject *ret, *func;
+ func = PyObject_GetAttrString(in1 ? in1 : Py_None, meth);
+ if (func == NULL || !PyCallable_Check(func)) {
+ PyObject *exc, *val, *tb;
+ PyTypeObject *type = in1 ? Py_TYPE(in1) : Py_TYPE(Py_None);
+ PyErr_Fetch(&exc, &val, &tb);
+ PyErr_Format(PyExc_TypeError,
+ "loop of ufunc does not support argument %d of "
+ "type %s which has no callable %s method",
+ i, type->tp_name, meth);
+ npy_PyErr_ChainExceptionsCause(exc, val, tb);
+ Py_DECREF(tup);
+ Py_XDECREF(func);
+ return;
+ }
+ ret = PyObject_Call(func, tup, NULL);
+ Py_DECREF(func);
if (ret == NULL) {
+ Py_DECREF(tup);
return;
}
Py_XDECREF(*out);
*out = ret;
}
+ Py_DECREF(tup);
}
/*UFUNC_API*/
@@ -596,6 +427,28 @@ PyUFunc_OO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
}
}
+NPY_NO_EXPORT void
+PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+{
+ ternaryfunc f = (ternaryfunc)func;
+ TERNARY_LOOP {
+ PyObject *in1 = *(PyObject **)ip1;
+ PyObject *in2 = *(PyObject **)ip2;
+ PyObject *in3 = *(PyObject **)ip3;
+ PyObject **out = (PyObject **)op1;
+ PyObject *ret = f(
+ in1 ? in1 : Py_None,
+ in2 ? in2 : Py_None,
+ in3 ? in3 : Py_None
+ );
+ if (ret == NULL) {
+ return;
+ }
+ Py_XDECREF(*out);
+ *out = ret;
+ }
+}
+
/*UFUNC_API*/
NPY_NO_EXPORT void
PyUFunc_OO_O_method(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
@@ -815,6 +668,23 @@ BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
}
+/**begin repeat
+ * #kind = isnan, isinf, isfinite#
+ * #func = npy_isnan, npy_isinf, npy_isfinite#
+ * #val = NPY_FALSE, NPY_FALSE, NPY_TRUE#
+ **/
+NPY_NO_EXPORT void
+BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ /*
+ * The (void)in; suppresses an unused variable warning raised by gcc and allows
+ * us to re-use this macro even though we do not depend on in
+ */
+ UNARY_LOOP_FAST(npy_bool, npy_bool, (void)in; *out = @val@);
+}
+
+/**end repeat**/
+
/*
*****************************************************************************
** INTEGER LOOPS
@@ -829,6 +699,7 @@ BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UN
* #ftype = npy_float, npy_float, npy_float, npy_float, npy_double, npy_double,
* npy_double, npy_double, npy_double, npy_double#
* #SIGNED = 1, 0, 1, 0, 1, 0, 1, 0, 1, 0#
+ * #c = hh,uhh,h,uh,,u,l,ul,ll,ull#
*/
#define @TYPE@_floor_divide @TYPE@_divide
@@ -906,16 +777,15 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
/**begin repeat2
* Arithmetic
- * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor,
- * left_shift, right_shift#
- * #OP = +, -,*, &, |, ^, <<, >>#
+ * #kind = add, subtract, multiply, bitwise_and, bitwise_or, bitwise_xor#
+ * #OP = +, -, *, &, |, ^#
*/
#if @CHK@
NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
@TYPE@_@kind@@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- if(IS_BINARY_REDUCE) {
+ if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
io1 @OP@= *(@type@ *)ip2;
}
@@ -929,6 +799,47 @@ NPY_NO_EXPORT NPY_GCC_OPT_3 @ATTR@ void
/**end repeat2**/
+/*
+ * Arithmetic bit shift operations.
+ *
+ * Intel hardware masks bit shift values, so large shifts wrap around
+ * and can produce surprising results. The special handling ensures that
+ * behavior is independent of compiler or hardware.
+ * TODO: We could implement consistent behavior for negative shifts,
+ * which is undefined in C.
+ */
+
+#define INT_left_shift_needs_clear_floatstatus
+#define UINT_left_shift_needs_clear_floatstatus
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_left_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_FAST(@type@, @type@, *out = npy_lshift@c@(in1, in2));
+
+#ifdef @TYPE@_left_shift_needs_clear_floatstatus
+ // For some reason, our macOS CI sets an "invalid" flag here, but only
+ // for some types.
+ npy_clear_floatstatus_barrier((char*)dimensions);
+#endif
+}
+
+#undef INT_left_shift_needs_clear_floatstatus
+#undef UINT_left_shift_needs_clear_floatstatus
+
+NPY_NO_EXPORT
+#ifndef NPY_DO_NOT_OPTIMIZE_@TYPE@_right_shift
+NPY_GCC_OPT_3
+#endif
+void
+@TYPE@_right_shift@isa@(char **args, npy_intp *dimensions, npy_intp *steps,
+ void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_FAST(@type@, @type@, *out = npy_rshift@c@(in1, in2));
+}
+
+
/**begin repeat2
* #kind = equal, not_equal, greater, greater_equal, less, less_equal,
* logical_and, logical_or#
@@ -1046,6 +957,22 @@ NPY_NO_EXPORT void
}
}
+/**begin repeat1
+ * #kind = isnan, isinf, isfinite#
+ * #func = npy_isnan, npy_isinf, npy_isfinite#
+ * #val = NPY_FALSE, NPY_FALSE, NPY_TRUE#
+ **/
+NPY_NO_EXPORT void
+@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ /*
+ * The (void)in; suppresses an unused variable warning raised by gcc and allows
+ * us to re-use this macro even though we do not depend on in
+ */
+ UNARY_LOOP_FAST(@type@, npy_bool, (void)in; *out = @val@);
+}
+/**end repeat1**/
+
/**end repeat**/
/**begin repeat
@@ -1165,13 +1092,10 @@ NPY_NO_EXPORT void
* #c = u,u,u,ul,ull#
*/
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
@TYPE@_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const @type@ in1 = *(@type@ *)ip1;
- *((@type@ *)op1) = in1;
- }
+ UNARY_LOOP_FAST(@type@, @type@, *out = in);
}
NPY_NO_EXPORT NPY_GCC_OPT_3 void
@@ -1313,6 +1237,15 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
+@TYPE@_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *((npy_bool *)op1) = (in1 != NPY_DATETIME_NAT);
+ }
+}
+
+NPY_NO_EXPORT void
@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
@@ -1591,6 +1524,90 @@ TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
}
}
+NPY_NO_EXPORT void
+TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const npy_timedelta in1 = *(npy_timedelta *)ip1;
+ const npy_timedelta in2 = *(npy_timedelta *)ip2;
+ if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+ *((npy_timedelta *)op1) = NPY_DATETIME_NAT;
+ }
+ else {
+ if (in2 == 0) {
+ npy_set_floatstatus_divbyzero();
+ *((npy_timedelta *)op1) = NPY_DATETIME_NAT;
+ }
+ else {
+ /* handle mixed case the way Python does */
+ const npy_timedelta rem = in1 % in2;
+ if ((in1 > 0) == (in2 > 0) || rem == 0) {
+ *((npy_timedelta *)op1) = rem;
+ }
+ else {
+ *((npy_timedelta *)op1) = rem + in2;
+ }
+ }
+ }
+ }
+}
+
+NPY_NO_EXPORT void
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const npy_timedelta in1 = *(npy_timedelta *)ip1;
+ const npy_timedelta in2 = *(npy_timedelta *)ip2;
+ if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+ npy_set_floatstatus_invalid();
+ *((npy_int64 *)op1) = 0;
+ }
+ else if (in2 == 0) {
+ npy_set_floatstatus_divbyzero();
+ *((npy_int64 *)op1) = 0;
+ }
+ else {
+ if (((in1 > 0) != (in2 > 0)) && (in1 % in2 != 0)) {
+ *((npy_int64 *)op1) = in1/in2 - 1;
+ }
+ else {
+ *((npy_int64 *)op1) = in1/in2;
+ }
+ }
+ }
+}
+
+NPY_NO_EXPORT void
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP_TWO_OUT {
+ const npy_timedelta in1 = *(npy_timedelta *)ip1;
+ const npy_timedelta in2 = *(npy_timedelta *)ip2;
+ if (in1 == NPY_DATETIME_NAT || in2 == NPY_DATETIME_NAT) {
+ npy_set_floatstatus_invalid();
+ *((npy_int64 *)op1) = 0;
+ *((npy_timedelta *)op2) = NPY_DATETIME_NAT;
+ }
+ else if (in2 == 0) {
+ npy_set_floatstatus_divbyzero();
+ *((npy_int64 *)op1) = 0;
+ *((npy_timedelta *)op2) = NPY_DATETIME_NAT;
+ }
+ else {
+ const npy_int64 quo = in1 / in2;
+ const npy_timedelta rem = in1 % in2;
+ if ((in1 > 0) == (in2 > 0) || rem == 0) {
+ *((npy_int64 *)op1) = quo;
+ *((npy_timedelta *)op2) = rem;
+ }
+ else {
+ *((npy_int64 *)op1) = quo - 1;
+ *((npy_timedelta *)op2) = rem + in2;
+ }
+ }
+ }
+}
+
/*
*****************************************************************************
** FLOAT LOOPS **
@@ -1617,6 +1634,176 @@ NPY_NO_EXPORT void
/**end repeat**/
+/**begin repeat
+ * #func = rint, ceil, floor, trunc#
+ * #scalarf = npy_rint, npy_ceil, npy_floor, npy_trunc#
+ */
+
+/**begin repeat1
+* #TYPE = FLOAT, DOUBLE#
+* #type = npy_float, npy_double#
+* #typesub = f, #
+*/
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = @scalarf@@typesub@(in1);
+ }
+}
+
+
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = sin, cos, exp, log#
+ * #scalarf = npy_sinf, npy_cosf, npy_expf, npy_logf#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ UNARY_LOOP {
+ const npy_float in1 = *(npy_float *)ip1;
+ *(npy_float *)op1 = @scalarf@(in1);
+ }
+}
+
+/**end repeat**/
+
+/**begin repeat
+ * #isa = avx512f, fma#
+ * #ISA = AVX512F, FMA#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS#
+ */
+
+/**begin repeat1
+ * #TYPE = FLOAT, DOUBLE#
+ * #type = npy_float, npy_double#
+ * #typesub = f, #
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_sqrt_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_sqrt_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = npy_sqrt@typesub@(in1);
+ }
+ }
+}
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_absolute_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_absolute_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ tmp = in1 > 0 ? in1 : -in1;
+ /* add 0 to clear -0.0 */
+ *((@type@ *)op1) = tmp + 0;
+ }
+ }
+ npy_clear_floatstatus_barrier((char*)dimensions);
+}
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_square_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_square_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = in1*in1;
+ }
+ }
+}
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_reciprocal_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_reciprocal_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = 1.0f/in1;
+ }
+ }
+}
+
+/**begin repeat2
+ * #func = rint, ceil, floor, trunc#
+ * #scalarf = npy_rint, npy_ceil, npy_floor, npy_trunc#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_@func@_@TYPE@(args, dimensions, steps)) {
+ UNARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = @scalarf@@typesub@(in1);
+ }
+ }
+}
+
+/**end repeat2**/
+/**end repeat1**/
+
+/**begin repeat1
+ * #func = exp, log#
+ * #scalarf = npy_expf, npy_logf#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_@func@_FLOAT(args, dimensions, steps)) {
+ UNARY_LOOP {
+ /*
+ * We use the AVX function to compute exp/log for scalar elements as well.
+ * This is needed to ensure the output of strided and non-strided
+ * cases match. SIMD code handles strided input cases, but not
+ * strided output.
+ */
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ @ISA@_@func@_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0]);
+#else
+ const npy_float in1 = *(npy_float *)ip1;
+ *(npy_float *)op1 = @scalarf@(in1);
+#endif
+ }
+ }
+}
+
+/**end repeat1**/
+
+/**begin repeat1
+ * #func = cos, sin#
+ * #enum = npy_compute_cos, npy_compute_sin#
+ * #scalarf = npy_cosf, npy_sinf#
+ */
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
+{
+ if (!run_unary_@isa@_sincos_FLOAT(args, dimensions, steps, @enum@)) {
+ UNARY_LOOP {
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ @ISA@_sincos_FLOAT((npy_float *)op1, (npy_float *)ip1, 1, steps[0], @enum@);
+#else
+ const npy_float in1 = *(npy_float *)ip1;
+ *(npy_float *)op1 = @scalarf@(in1);
+#endif
+ }
+ }
+}
+
+/**end repeat1**/
+/**end repeat**/
+
/**begin repeat
* Float types
@@ -1833,11 +2020,9 @@ NPY_NO_EXPORT void
if (!run_unary_reduce_simd_@kind@_@TYPE@(args, dimensions, steps)) {
BINARY_REDUCE_LOOP(@type@) {
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
io1 = (io1 @OP@ in2 || npy_isnan(io1)) ? io1 : in2;
}
- if (npy_isnan(io1)) {
- npy_set_floatstatus_invalid();
- }
*((@type@ *)iop1) = io1;
}
}
@@ -1845,13 +2030,12 @@ NPY_NO_EXPORT void
BINARY_LOOP {
@type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
in1 = (in1 @OP@ in2 || npy_isnan(in1)) ? in1 : in2;
- if (npy_isnan(in1)) {
- npy_set_floatstatus_invalid();
- }
*((@type@ *)op1) = in1;
}
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -1866,6 +2050,7 @@ NPY_NO_EXPORT void
if (IS_BINARY_REDUCE) {
BINARY_REDUCE_LOOP(@type@) {
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
io1 = (io1 @OP@ in2 || npy_isnan(in2)) ? io1 : in2;
}
*((@type@ *)iop1) = io1;
@@ -1874,6 +2059,7 @@ NPY_NO_EXPORT void
BINARY_LOOP {
const @type@ in1 = *(@type@ *)ip1;
const @type@ in2 = *(@type@ *)ip2;
+ /* Order of operations important for MSVC 2015 */
*((@type@ *)op1) = (in1 @OP@ in2 || npy_isnan(in2)) ? in1 : in2;
}
}
@@ -2195,14 +2381,11 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
{
/* */
BINARY_LOOP {
- npy_half in1 = *(npy_half *)ip1;
+ const npy_half in1 = *(npy_half *)ip1;
const npy_half in2 = *(npy_half *)ip2;
- in1 = (@OP@(in1, in2) || npy_half_isnan(in1)) ? in1 : in2;
- if (npy_half_isnan(in1)) {
- npy_set_floatstatus_invalid();
- }
- *((npy_half *)op1) = in1;
+ *((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in1)) ? in1 : in2;
}
+ /* npy_half_isnan will never set floatstatus_invalid, so do not clear */
}
/**end repeat**/
@@ -2219,7 +2402,7 @@ HALF_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
const npy_half in2 = *(npy_half *)ip2;
*((npy_half *)op1) = (@OP@(in1, in2) || npy_half_isnan(in2)) ? in1 : in2;
}
- npy_clear_floatstatus_barrier((char*)dimensions);
+ /* npy_half_isnan will never set floatstatus_invalid, so do not clear */
}
/**end repeat**/
@@ -2289,13 +2472,10 @@ HALF_conjugate(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNU
}
}
-NPY_NO_EXPORT void
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
HALF_absolute(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
{
- UNARY_LOOP {
- const npy_half in1 = *(npy_half *)ip1;
- *((npy_half *)op1) = in1&0x7fffu;
- }
+ UNARY_LOOP_FAST(npy_half, npy_half, *out = in&0x7fffu);
}
NPY_NO_EXPORT void
@@ -2761,16 +2941,14 @@ NPY_NO_EXPORT void
@ftype@ in1i = ((@ftype@ *)ip1)[1];
const @ftype@ in2r = ((@ftype@ *)ip2)[0];
const @ftype@ in2i = ((@ftype@ *)ip2)[1];
- if ( !(@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in1r) || npy_isnan(in1i))) {
+ if ( !(npy_isnan(in1r) || npy_isnan(in1i) || @OP@(in1r, in1i, in2r, in2i))) {
in1r = in2r;
in1i = in2i;
}
- if (npy_isnan(in1r) || npy_isnan(in1i)) {
- npy_set_floatstatus_invalid();
- }
((@ftype@ *)op1)[0] = in1r;
((@ftype@ *)op1)[1] = in1i;
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
@@ -2786,7 +2964,7 @@ NPY_NO_EXPORT void
const @ftype@ in1i = ((@ftype@ *)ip1)[1];
const @ftype@ in2r = ((@ftype@ *)ip2)[0];
const @ftype@ in2i = ((@ftype@ *)ip2)[1];
- if (@OP@(in1r, in1i, in2r, in2i) || npy_isnan(in2r) || npy_isnan(in2i)) {
+ if (npy_isnan(in2r) || npy_isnan(in2i) || @OP@(in1r, in1i, in2r, in2i)) {
((@ftype@ *)op1)[0] = in1r;
((@ftype@ *)op1)[1] = in1i;
}
@@ -2795,6 +2973,7 @@ NPY_NO_EXPORT void
((@ftype@ *)op1)[1] = in2i;
}
}
+ npy_clear_floatstatus_barrier((char*)dimensions);
}
/**end repeat1**/
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 5c2b2c22c..e98a1ac3c 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -38,6 +38,13 @@ BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED
NPY_NO_EXPORT void
BOOL__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+/**begin repeat
+ * #kind = isnan, isinf, isfinite#
+ **/
+NPY_NO_EXPORT void
+BOOL_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat**/
+
/*
*****************************************************************************
** INTEGER LOOPS
@@ -146,6 +153,13 @@ NPY_NO_EXPORT void
NPY_NO_EXPORT void
@S@@TYPE@_lcm(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**begin repeat2
+ * #kind = isnan, isinf, isfinite#
+ **/
+NPY_NO_EXPORT void
+@S@@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat2**/
+
/**end repeat1**/
/**end repeat**/
@@ -161,6 +175,55 @@ NPY_NO_EXPORT void
*/
NPY_NO_EXPORT void
@TYPE@_sqrt(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+/**begin repeat1
+ * #isa = avx512f, fma#
+ */
+
+/**begin repeat2
+ * #func = sqrt, absolute, square, reciprocal#
+ */
+NPY_NO_EXPORT void
+@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+/**end repeat2**/
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = sin, cos, exp, log#
+ */
+NPY_NO_EXPORT void
+FLOAT_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+/**begin repeat1
+ * #isa = avx512f, fma#
+ */
+
+NPY_NO_EXPORT void
+FLOAT_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #func = rint, ceil, floor, trunc#
+ */
+
+/**begin repeat1
+* #TYPE = FLOAT, DOUBLE#
+*/
+
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+
+/**begin repeat2
+ * #isa = avx512f, fma#
+ */
+NPY_NO_EXPORT NPY_GCC_OPT_3 void
+@TYPE@_@func@_@isa@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
+/**end repeat2**/
+/**end repeat1**/
/**end repeat**/
/**begin repeat
@@ -414,6 +477,9 @@ NPY_NO_EXPORT void
@TYPE@_isnat(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
+@TYPE@_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+NPY_NO_EXPORT void
@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
/**begin repeat1
@@ -473,6 +539,15 @@ TIMEDELTA_md_m_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *
NPY_NO_EXPORT void
TIMEDELTA_mm_d_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+TIMEDELTA_mm_q_floor_divide(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+NPY_NO_EXPORT void
+TIMEDELTA_mm_m_remainder(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+NPY_NO_EXPORT void
+TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
/* Special case equivalents to above functions */
#define TIMEDELTA_mq_m_true_divide TIMEDELTA_mq_m_divide
@@ -507,6 +582,9 @@ OBJECT@suffix@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *
NPY_NO_EXPORT void
OBJECT_sign(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+NPY_NO_EXPORT void
+PyUFunc_OOO_O(char **args, npy_intp *dimensions, npy_intp *steps, void *func);
+
/*
*****************************************************************************
** END LOOPS **
diff --git a/numpy/core/src/umath/matmul.c.src b/numpy/core/src/umath/matmul.c.src
new file mode 100644
index 000000000..b5204eca5
--- /dev/null
+++ b/numpy/core/src/umath/matmul.c.src
@@ -0,0 +1,504 @@
+/* -*- c -*- */
+
+#define _UMATHMODULE
+#define _MULTIARRAYMODULE
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+
+#include "npy_config.h"
+#include "numpy/npy_common.h"
+#include "numpy/arrayobject.h"
+#include "numpy/ufuncobject.h"
+#include "numpy/npy_math.h"
+#include "numpy/halffloat.h"
+#include "lowlevel_strided_loops.h"
+
+#include "npy_pycompat.h"
+
+#include "npy_cblas.h"
+#include "arraytypes.h" /* For TYPE_dot functions */
+
+#include <assert.h>
+
+/*
+ *****************************************************************************
+ ** BASICS **
+ *****************************************************************************
+ */
+
+/*
+ * -1 to be conservative, in case blas internally uses a for loop with an
+ * inclusive upper bound
+ */
+#define BLAS_MAXSIZE (NPY_MAX_INT - 1)
+
+/*
+ * Determine if a 2d matrix can be used by BLAS
+ * 1. Strides must not alias or overlap
+ * 2. The faster (second) axis must be contiguous
+ * 3. The slower (first) axis stride, in unit steps, must be larger than
+ * the faster axis dimension
+ */
+static NPY_INLINE npy_bool
+is_blasable2d(npy_intp byte_stride1, npy_intp byte_stride2,
+ npy_intp d1, npy_intp d2, npy_intp itemsize)
+{
+ npy_intp unit_stride1 = byte_stride1 / itemsize;
+ if (byte_stride2 != itemsize) {
+ return NPY_FALSE;
+ }
+ if ((byte_stride1 % itemsize ==0) &&
+ (unit_stride1 >= d2) &&
+ (unit_stride1 <= BLAS_MAXSIZE))
+ {
+ return NPY_TRUE;
+ }
+ return NPY_FALSE;
+}
+
+#if defined(HAVE_CBLAS)
+static const npy_cdouble oneD = {1.0, 0.0}, zeroD = {0.0, 0.0};
+static const npy_cfloat oneF = {1.0, 0.0}, zeroF = {0.0, 0.0};
+
+/**begin repeat
+ *
+ * #name = FLOAT, DOUBLE, CFLOAT, CDOUBLE#
+ * #ctype = npy_float, npy_double, npy_cfloat, npy_cdouble#
+ * #typ = npy_float, npy_double, npy_cfloat, npy_cdouble#
+ * #prefix = s, d, c, z#
+ * #step1 = 1.F, 1., &oneF, &oneD#
+ * #step0 = 0.F, 0., &zeroF, &zeroD#
+ */
+NPY_NO_EXPORT void
+@name@_gemv(void *ip1, npy_intp is1_m, npy_intp is1_n,
+ void *ip2, npy_intp is2_n, npy_intp NPY_UNUSED(is2_p),
+ void *op, npy_intp op_m, npy_intp NPY_UNUSED(op_p),
+ npy_intp m, npy_intp n, npy_intp NPY_UNUSED(p))
+{
+ /*
+ * Vector matrix multiplication -- Level 2 BLAS
+ * arguments
+ * ip1: contiguous data, m*n shape
+ * ip2: data in c order, n*1 shape
+ * op: data in c order, m shape
+ */
+ enum CBLAS_ORDER order;
+ int M, N, lda;
+
+ assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE);
+ assert (is_blasable2d(is2_n, sizeof(@typ@), n, 1, sizeof(@typ@)));
+ M = (int)m;
+ N = (int)n;
+
+ if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
+ order = CblasColMajor;
+ lda = (int)(is1_m / sizeof(@typ@));
+ }
+ else {
+ /* If not ColMajor, caller should have ensured we are RowMajor */
+ /* will not assert in release mode */
+ order = CblasRowMajor;
+ assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
+ lda = (int)(is1_n / sizeof(@typ@));
+ }
+ cblas_@prefix@gemv(order, CblasTrans, N, M, @step1@, ip1, lda, ip2,
+ is2_n / sizeof(@typ@), @step0@, op, op_m / sizeof(@typ@));
+}
+
+NPY_NO_EXPORT void
+@name@_matmul_matrixmatrix(void *ip1, npy_intp is1_m, npy_intp is1_n,
+ void *ip2, npy_intp is2_n, npy_intp is2_p,
+ void *op, npy_intp os_m, npy_intp os_p,
+ npy_intp m, npy_intp n, npy_intp p)
+{
+ /*
+ * matrix matrix multiplication -- Level 3 BLAS
+ */
+ enum CBLAS_ORDER order = CblasRowMajor;
+ enum CBLAS_TRANSPOSE trans1, trans2;
+ int M, N, P, lda, ldb, ldc;
+ assert(m <= BLAS_MAXSIZE && n <= BLAS_MAXSIZE && p <= BLAS_MAXSIZE);
+ M = (int)m;
+ N = (int)n;
+ P = (int)p;
+
+ assert(is_blasable2d(os_m, os_p, m, p, sizeof(@typ@)));
+ ldc = (int)(os_m / sizeof(@typ@));
+
+ if (is_blasable2d(is1_m, is1_n, m, n, sizeof(@typ@))) {
+ trans1 = CblasNoTrans;
+ lda = (int)(is1_m / sizeof(@typ@));
+ }
+ else {
+ /* If not ColMajor, caller should have ensured we are RowMajor */
+ /* will not assert in release mode */
+ assert(is_blasable2d(is1_n, is1_m, n, m, sizeof(@typ@)));
+ trans1 = CblasTrans;
+ lda = (int)(is1_n / sizeof(@typ@));
+ }
+
+ if (is_blasable2d(is2_n, is2_p, n, p, sizeof(@typ@))) {
+ trans2 = CblasNoTrans;
+ ldb = (int)(is2_n / sizeof(@typ@));
+ }
+ else {
+ /* If not ColMajor, caller should have ensured we are RowMajor */
+ /* will not assert in release mode */
+ assert(is_blasable2d(is2_p, is2_n, p, n, sizeof(@typ@)));
+ trans2 = CblasTrans;
+ ldb = (int)(is2_p / sizeof(@typ@));
+ }
+ /*
+ * Use syrk if we have a case of a matrix times its transpose.
+ * Otherwise, use gemm for all other cases.
+ */
+ if (
+ (ip1 == ip2) &&
+ (m == p) &&
+ (is1_m == is2_p) &&
+ (is1_n == is2_n) &&
+ (trans1 != trans2)
+ ) {
+ npy_intp i,j;
+ if (trans1 == CblasNoTrans) {
+ cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
+ ip1, lda, @step0@, op, ldc);
+ }
+ else {
+ cblas_@prefix@syrk(order, CblasUpper, trans1, P, N, @step1@,
+ ip1, ldb, @step0@, op, ldc);
+ }
+ /* Copy the triangle */
+ for (i = 0; i < P; i++) {
+ for (j = i + 1; j < P; j++) {
+ ((@typ@*)op)[j * ldc + i] = ((@typ@*)op)[i * ldc + j];
+ }
+ }
+
+ }
+ else {
+ cblas_@prefix@gemm(order, trans1, trans2, M, P, N, @step1@, ip1, lda,
+ ip2, ldb, @step0@, op, ldc);
+ }
+}
+
+/**end repeat**/
+#endif
+
+/*
+ * matmul loops
+ * signature is (m?,n),(n,p?)->(m?,p?)
+ */
+
+/**begin repeat
+ * #TYPE = LONGDOUBLE,
+ * FLOAT, DOUBLE, HALF,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG#
+ * #typ = npy_longdouble,
+ * npy_float,npy_double,npy_half,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*10#
+ * #IS_HALF = 0, 0, 0, 1, 0*13#
+ */
+
+NPY_NO_EXPORT void
+@TYPE@_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+ void *_ip2, npy_intp is2_n, npy_intp is2_p,
+ void *_op, npy_intp os_m, npy_intp os_p,
+ npy_intp dm, npy_intp dn, npy_intp dp)
+
+{
+ npy_intp m, n, p;
+ npy_intp ib1_n, ib2_n, ib2_p, ob_p;
+ char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+ ib1_n = is1_n * dn;
+ ib2_n = is2_n * dn;
+ ib2_p = is2_p * dp;
+ ob_p = os_p * dp;
+
+ for (m = 0; m < dm; m++) {
+ for (p = 0; p < dp; p++) {
+#if @IS_COMPLEX@ == 1
+ (*(@typ@ *)op).real = 0;
+ (*(@typ@ *)op).imag = 0;
+#elif @IS_HALF@
+ float sum = 0;
+#else
+ *(@typ@ *)op = 0;
+#endif
+ for (n = 0; n < dn; n++) {
+ @typ@ val1 = (*(@typ@ *)ip1);
+ @typ@ val2 = (*(@typ@ *)ip2);
+#if @IS_HALF@
+ sum += npy_half_to_float(val1) * npy_half_to_float(val2);
+#elif @IS_COMPLEX@ == 1
+ (*(@typ@ *)op).real += (val1.real * val2.real) -
+ (val1.imag * val2.imag);
+ (*(@typ@ *)op).imag += (val1.real * val2.imag) +
+ (val1.imag * val2.real);
+#else
+ *(@typ@ *)op += val1 * val2;
+#endif
+ ip2 += is2_n;
+ ip1 += is1_n;
+ }
+#if @IS_HALF@
+ *(@typ@ *)op = npy_float_to_half(sum);
+#endif
+ ip1 -= ib1_n;
+ ip2 -= ib2_n;
+ op += os_p;
+ ip2 += is2_p;
+ }
+ op -= ob_p;
+ ip2 -= ib2_p;
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
+
+/**end repeat**/
+NPY_NO_EXPORT void
+BOOL_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+ void *_ip2, npy_intp is2_n, npy_intp is2_p,
+ void *_op, npy_intp os_m, npy_intp os_p,
+ npy_intp dm, npy_intp dn, npy_intp dp)
+
+{
+ npy_intp m, n, p;
+ npy_intp ib2_p, ob_p;
+ char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+ ib2_p = is2_p * dp;
+ ob_p = os_p * dp;
+
+ for (m = 0; m < dm; m++) {
+ for (p = 0; p < dp; p++) {
+ char *ip1tmp = ip1;
+ char *ip2tmp = ip2;
+ *(npy_bool *)op = NPY_FALSE;
+ for (n = 0; n < dn; n++) {
+ npy_bool val1 = (*(npy_bool *)ip1tmp);
+ npy_bool val2 = (*(npy_bool *)ip2tmp);
+ if (val1 != 0 && val2 != 0) {
+ *(npy_bool *)op = NPY_TRUE;
+ break;
+ }
+ ip2tmp += is2_n;
+ ip1tmp += is1_n;
+ }
+ op += os_p;
+ ip2 += is2_p;
+ }
+ op -= ob_p;
+ ip2 -= ib2_p;
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
+
+NPY_NO_EXPORT void
+OBJECT_matmul_inner_noblas(void *_ip1, npy_intp is1_m, npy_intp is1_n,
+ void *_ip2, npy_intp is2_n, npy_intp is2_p,
+ void *_op, npy_intp os_m, npy_intp os_p,
+ npy_intp dm, npy_intp dn, npy_intp dp)
+{
+ char *ip1 = (char *)_ip1, *ip2 = (char *)_ip2, *op = (char *)_op;
+
+ npy_intp ib1_n = is1_n * dn;
+ npy_intp ib2_n = is2_n * dn;
+ npy_intp ib2_p = is2_p * dp;
+ npy_intp ob_p = os_p * dp;
+
+ PyObject *product, *sum_of_products = NULL;
+
+ for (npy_intp m = 0; m < dm; m++) {
+ for (npy_intp p = 0; p < dp; p++) {
+ if ( 0 == dn ) {
+ sum_of_products = PyLong_FromLong(0);
+ if (sum_of_products == NULL) {
+ return;
+ }
+ }
+
+ for (npy_intp n = 0; n < dn; n++) {
+ PyObject *obj1 = *(PyObject**)ip1, *obj2 = *(PyObject**)ip2;
+ if (obj1 == NULL) {
+ obj1 = Py_None;
+ }
+ if (obj2 == NULL) {
+ obj2 = Py_None;
+ }
+
+ product = PyNumber_Multiply(obj1, obj2);
+ if (product == NULL) {
+ Py_XDECREF(sum_of_products);
+ return;
+ }
+
+ if (n == 0) {
+ sum_of_products = product;
+ }
+ else {
+ Py_SETREF(sum_of_products, PyNumber_Add(sum_of_products, product));
+ Py_DECREF(product);
+ if (sum_of_products == NULL) {
+ return;
+ }
+ }
+
+ ip2 += is2_n;
+ ip1 += is1_n;
+ }
+
+ *((PyObject **)op) = sum_of_products;
+ ip1 -= ib1_n;
+ ip2 -= ib2_n;
+ op += os_p;
+ ip2 += is2_p;
+ }
+ op -= ob_p;
+ ip2 -= ib2_p;
+ ip1 += is1_m;
+ op += os_m;
+ }
+}
+
+
+/**begin repeat
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * BOOL, OBJECT#
+ * #typ = npy_float,npy_double,npy_longdouble, npy_half,
+ * npy_cfloat, npy_cdouble, npy_clongdouble,
+ * npy_ubyte, npy_ushort, npy_uint, npy_ulong, npy_ulonglong,
+ * npy_byte, npy_short, npy_int, npy_long, npy_longlong,
+ * npy_bool,npy_object#
+ * #IS_COMPLEX = 0, 0, 0, 0, 1, 1, 1, 0*12#
+ * #USEBLAS = 1, 1, 0, 0, 1, 1, 0*13#
+ */
+
+
+NPY_NO_EXPORT void
+@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ npy_intp dOuter = *dimensions++;
+ npy_intp iOuter;
+ npy_intp s0 = *steps++;
+ npy_intp s1 = *steps++;
+ npy_intp s2 = *steps++;
+ npy_intp dm = dimensions[0];
+ npy_intp dn = dimensions[1];
+ npy_intp dp = dimensions[2];
+ npy_intp is1_m=steps[0], is1_n=steps[1], is2_n=steps[2], is2_p=steps[3],
+ os_m=steps[4], os_p=steps[5];
+#if @USEBLAS@ && defined(HAVE_CBLAS)
+ npy_intp sz = sizeof(@typ@);
+ npy_bool special_case = (dm == 1 || dn == 1 || dp == 1);
+ npy_bool any_zero_dim = (dm == 0 || dn == 0 || dp == 0);
+ npy_bool scalar_out = (dm == 1 && dp == 1);
+ npy_bool scalar_vec = (dn == 1 && (dp == 1 || dm == 1));
+ npy_bool too_big_for_blas = (dm > BLAS_MAXSIZE || dn > BLAS_MAXSIZE ||
+ dp > BLAS_MAXSIZE);
+ npy_bool i1_c_blasable = is_blasable2d(is1_m, is1_n, dm, dn, sz);
+ npy_bool i2_c_blasable = is_blasable2d(is2_n, is2_p, dn, dp, sz);
+ npy_bool i1_f_blasable = is_blasable2d(is1_n, is1_m, dn, dm, sz);
+ npy_bool i2_f_blasable = is_blasable2d(is2_p, is2_n, dp, dn, sz);
+ npy_bool i1blasable = i1_c_blasable || i1_f_blasable;
+ npy_bool i2blasable = i2_c_blasable || i2_f_blasable;
+ npy_bool o_c_blasable = is_blasable2d(os_m, os_p, dm, dp, sz);
+ npy_bool o_f_blasable = is_blasable2d(os_p, os_m, dp, dm, sz);
+ npy_bool vector_matrix = ((dm == 1) && i2blasable &&
+ is_blasable2d(is1_n, sz, dn, 1, sz));
+ npy_bool matrix_vector = ((dp == 1) && i1blasable &&
+ is_blasable2d(is2_n, sz, dn, 1, sz));
+#endif
+
+ for (iOuter = 0; iOuter < dOuter; iOuter++,
+ args[0] += s0, args[1] += s1, args[2] += s2) {
+ void *ip1=args[0], *ip2=args[1], *op=args[2];
+#if @USEBLAS@ && defined(HAVE_CBLAS)
+ /*
+ * TODO: refactor this out to a inner_loop_selector, in
+ * PyUFunc_MatmulLoopSelector. But that call does not have access to
+ * n, m, p and strides.
+ */
+ if (too_big_for_blas || any_zero_dim) {
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ }
+ else if (special_case) {
+ /* Special case variants that have a 1 in the core dimensions */
+ if (scalar_out) {
+ /* row @ column, 1,1 output */
+ @TYPE@_dot(ip1, is1_n, ip2, is2_n, op, dn, NULL);
+ } else if (scalar_vec){
+ /*
+ * 1,1d @ vector or vector @ 1,1d
+ * could use cblas_Xaxy, but that requires 0ing output
+ * and would not be faster (XXX prove it)
+ */
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ } else if (vector_matrix) {
+ /* vector @ matrix, switch ip1, ip2, p and m */
+ @TYPE@_gemv(ip2, is2_p, is2_n, ip1, is1_n, is1_m,
+ op, os_p, os_m, dp, dn, dm);
+ } else if (matrix_vector) {
+ /* matrix @ vector */
+ @TYPE@_gemv(ip1, is1_m, is1_n, ip2, is2_n, is2_p,
+
+ op, os_m, os_p, dm, dn, dp);
+ } else {
+ /* column @ row, 2d output, no blas needed or non-blas-able input */
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ }
+ } else {
+ /* matrix @ matrix */
+ if (i1blasable && i2blasable && o_c_blasable) {
+ @TYPE@_matmul_matrixmatrix(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p,
+ dm, dn, dp);
+ } else if (i1blasable && i2blasable && o_f_blasable) {
+ /*
+ * Use transpose equivalence:
+ * matmul(a, b, o) == matmul(b.T, a.T, o.T)
+ */
+ @TYPE@_matmul_matrixmatrix(ip2, is2_p, is2_n,
+ ip1, is1_n, is1_m,
+ op, os_p, os_m,
+ dp, dn, dm);
+ } else {
+ /*
+ * If parameters are castable to int and we copy the
+ * non-blasable (or non-ccontiguous output)
+ * we could still use BLAS, see gh-12365.
+ */
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+ }
+ }
+#else
+ @TYPE@_matmul_inner_noblas(ip1, is1_m, is1_n,
+ ip2, is2_n, is2_p,
+ op, os_m, os_p, dm, dn, dp);
+
+#endif
+ }
+}
+
+/**end repeat**/
diff --git a/numpy/core/src/umath/matmul.h.src b/numpy/core/src/umath/matmul.h.src
new file mode 100644
index 000000000..a664b1b4e
--- /dev/null
+++ b/numpy/core/src/umath/matmul.h.src
@@ -0,0 +1,12 @@
+/**begin repeat
+ * #TYPE = FLOAT, DOUBLE, LONGDOUBLE, HALF,
+ * CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * UBYTE, USHORT, UINT, ULONG, ULONGLONG,
+ * BYTE, SHORT, INT, LONG, LONGLONG,
+ * BOOL, OBJECT#
+ **/
+NPY_NO_EXPORT void
+@TYPE@_matmul(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+/**end repeat**/
+
+
diff --git a/numpy/core/src/umath/override.c b/numpy/core/src/umath/override.c
index 4a381ba12..8d67f96ac 100644
--- a/numpy/core/src/umath/override.c
+++ b/numpy/core/src/umath/override.c
@@ -5,8 +5,98 @@
#include "numpy/ufuncobject.h"
#include "npy_import.h"
-#include "ufunc_override.h"
#include "override.h"
+#include "ufunc_override.h"
+
+/*
+ * For each positional argument and each argument in a possible "out"
+ * keyword, look for overrides of the standard ufunc behaviour, i.e.,
+ * non-default __array_ufunc__ methods.
+ *
+ * Returns the number of overrides, setting corresponding objects
+ * in PyObject array ``with_override`` and the corresponding
+ * __array_ufunc__ methods in ``methods`` (both using new references).
+ *
+ * Only the first override for a given class is returned.
+ *
+ * Returns -1 on failure.
+ */
+static int
+get_array_ufunc_overrides(PyObject *args, PyObject *kwds,
+ PyObject **with_override, PyObject **methods)
+{
+ int i;
+ int num_override_args = 0;
+ int narg, nout = 0;
+ PyObject *out_kwd_obj;
+ PyObject **arg_objs, **out_objs;
+
+ narg = PyTuple_Size(args);
+ if (narg < 0) {
+ return -1;
+ }
+ arg_objs = PySequence_Fast_ITEMS(args);
+
+ nout = PyUFuncOverride_GetOutObjects(kwds, &out_kwd_obj, &out_objs);
+ if (nout < 0) {
+ return -1;
+ }
+
+ for (i = 0; i < narg + nout; ++i) {
+ PyObject *obj;
+ int j;
+ int new_class = 1;
+
+ if (i < narg) {
+ obj = arg_objs[i];
+ }
+ else {
+ obj = out_objs[i - narg];
+ }
+ /*
+ * Have we seen this class before? If so, ignore.
+ */
+ for (j = 0; j < num_override_args; j++) {
+ new_class = (Py_TYPE(obj) != Py_TYPE(with_override[j]));
+ if (!new_class) {
+ break;
+ }
+ }
+ if (new_class) {
+ /*
+ * Now see if the object provides an __array_ufunc__. However, we should
+ * ignore the base ndarray.__ufunc__, so we skip any ndarray as well as
+ * any ndarray subclass instances that did not override __array_ufunc__.
+ */
+ PyObject *method = PyUFuncOverride_GetNonDefaultArrayUfunc(obj);
+ if (method == NULL) {
+ continue;
+ }
+ if (method == Py_None) {
+ PyErr_Format(PyExc_TypeError,
+ "operand '%.200s' does not support ufuncs "
+ "(__array_ufunc__=None)",
+ obj->ob_type->tp_name);
+ Py_DECREF(method);
+ goto fail;
+ }
+ Py_INCREF(obj);
+ with_override[num_override_args] = obj;
+ methods[num_override_args] = method;
+ ++num_override_args;
+ }
+ }
+ Py_DECREF(out_kwd_obj);
+ return num_override_args;
+
+fail:
+ for (i = 0; i < num_override_args; i++) {
+ Py_DECREF(with_override[i]);
+ Py_DECREF(methods[i]);
+ }
+ Py_DECREF(out_kwd_obj);
+ return -1;
+}
/*
* The following functions normalize ufunc arguments. The work done is similar
@@ -136,14 +226,14 @@ normalize_reduce_args(PyUFuncObject *ufunc, PyObject *args,
PyObject *obj;
static PyObject *NoValue = NULL;
static char *kwlist[] = {"array", "axis", "dtype", "out", "keepdims",
- "initial"};
+ "initial", "where"};
npy_cache_import("numpy", "_NoValue", &NoValue);
if (NoValue == NULL) return -1;
- if (nargs < 1 || nargs > 6) {
+ if (nargs < 1 || nargs > 7) {
PyErr_Format(PyExc_TypeError,
- "ufunc.reduce() takes from 1 to 6 positional "
+ "ufunc.reduce() takes from 1 to 7 positional "
"arguments but %"NPY_INTP_FMT" were given", nargs);
return -1;
}
@@ -359,7 +449,7 @@ PyUFunc_CheckOverride(PyUFuncObject *ufunc, char *method,
/*
* Check inputs for overrides
*/
- num_override_args = PyUFunc_WithOverride(
+ num_override_args = get_array_ufunc_overrides(
args, kwds, with_override, array_ufunc_methods);
if (num_override_args == -1) {
goto fail;
diff --git a/numpy/core/src/umath/reduction.c b/numpy/core/src/umath/reduction.c
index 6d04ce372..4ce8d8ab7 100644
--- a/numpy/core/src/umath/reduction.c
+++ b/numpy/core/src/umath/reduction.c
@@ -36,7 +36,7 @@
* If 'dtype' isn't NULL, this function steals its reference.
*/
static PyArrayObject *
-allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
+allocate_reduce_result(PyArrayObject *arr, const npy_bool *axis_flags,
PyArray_Descr *dtype, int subok)
{
npy_intp strides[NPY_MAXDIMS], stride;
@@ -54,7 +54,9 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
/* Build the new strides and shape */
stride = dtype->elsize;
- memcpy(shape, arr_shape, ndim * sizeof(shape[0]));
+ if (ndim) {
+ memcpy(shape, arr_shape, ndim * sizeof(shape[0]));
+ }
for (idim = ndim-1; idim >= 0; --idim) {
npy_intp i_perm = strideperm[idim].perm;
if (axis_flags[i_perm]) {
@@ -82,7 +84,7 @@ allocate_reduce_result(PyArrayObject *arr, npy_bool *axis_flags,
* The return value is a view into 'out'.
*/
static PyArrayObject *
-conform_reduce_result(int ndim, npy_bool *axis_flags,
+conform_reduce_result(int ndim, const npy_bool *axis_flags,
PyArrayObject *out, int keepdims, const char *funcname,
int need_copy)
{
@@ -186,7 +188,6 @@ conform_reduce_result(int ndim, npy_bool *axis_flags,
return NULL;
}
- Py_INCREF(ret);
if (PyArray_SetWritebackIfCopyBase(ret_copy, (PyArrayObject *)ret) < 0) {
Py_DECREF(ret);
Py_DECREF(ret_copy);
@@ -250,7 +251,7 @@ PyArray_CreateReduceResult(PyArrayObject *operand, PyArrayObject *out,
* Count the number of dimensions selected in 'axis_flags'
*/
static int
-count_axes(int ndim, npy_bool *axis_flags)
+count_axes(int ndim, const npy_bool *axis_flags)
{
int idim;
int naxes = 0;
@@ -298,7 +299,7 @@ count_axes(int ndim, npy_bool *axis_flags)
NPY_NO_EXPORT PyArrayObject *
PyArray_InitializeReduceResult(
PyArrayObject *result, PyArrayObject *operand,
- npy_bool *axis_flags,
+ const npy_bool *axis_flags,
npy_intp *out_skip_first_count, const char *funcname)
{
npy_intp *strides, *shape, shape_orig[NPY_MAXDIMS];
@@ -326,7 +327,9 @@ PyArray_InitializeReduceResult(
*/
shape = PyArray_SHAPE(op_view);
nreduce_axes = 0;
- memcpy(shape_orig, shape, ndim * sizeof(npy_intp));
+ if (ndim) {
+ memcpy(shape_orig, shape, ndim * sizeof(npy_intp));
+ }
for (idim = 0; idim < ndim; ++idim) {
if (axis_flags[idim]) {
if (shape[idim] == 0) {
@@ -444,9 +447,9 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
/* Iterator parameters */
NpyIter *iter = NULL;
- PyArrayObject *op[2];
- PyArray_Descr *op_dtypes[2];
- npy_uint32 flags, op_flags[2];
+ PyArrayObject *op[3];
+ PyArray_Descr *op_dtypes[3];
+ npy_uint32 flags, op_flags[3];
/* More than one axis means multiple orders are possible */
if (!reorderable && count_axes(PyArray_NDIM(operand), axis_flags) > 1) {
@@ -456,13 +459,12 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
funcname);
return NULL;
}
-
-
- /* Validate that the parameters for future expansion are NULL */
- if (wheremask != NULL) {
- PyErr_SetString(PyExc_RuntimeError,
- "Reduce operations in NumPy do not yet support "
- "a where mask");
+ /* Can only use where with an initial ( from identity or argument) */
+ if (wheremask != NULL && identity == Py_None) {
+ PyErr_Format(PyExc_ValueError,
+ "reduction operation '%s' does not have an identity, "
+ "so to use a where mask one has to specify 'initial'",
+ funcname);
return NULL;
}
@@ -524,8 +526,18 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
NPY_ITER_NO_SUBTYPE;
op_flags[1] = NPY_ITER_READONLY |
NPY_ITER_ALIGNED;
+ if (wheremask != NULL) {
+ op[2] = wheremask;
+ /* wheremask is guaranteed to be NPY_BOOL, so borrow its reference */
+ op_dtypes[2] = PyArray_DESCR(wheremask);
+ assert(op_dtypes[2]->type_num == NPY_BOOL);
+ if (op_dtypes[2] == NULL) {
+ goto fail;
+ }
+ op_flags[2] = NPY_ITER_READONLY;
+ }
- iter = NpyIter_AdvancedNew(2, op, flags,
+ iter = NpyIter_AdvancedNew(wheremask == NULL ? 2 : 3, op, flags,
NPY_KEEPORDER, casting,
op_flags,
op_dtypes,
@@ -568,7 +580,7 @@ PyUFunc_ReduceWrapper(PyArrayObject *operand, PyArrayObject *out,
goto fail;
}
}
-
+
/* Check whether any errors occurred during the loop */
if (PyErr_Occurred() ||
_check_ufunc_fperr(errormask, NULL, "reduce") < 0) {
diff --git a/numpy/core/src/umath/scalarmath.c.src b/numpy/core/src/umath/scalarmath.c.src
index e98d9f865..d5d8d659b 100644
--- a/numpy/core/src/umath/scalarmath.c.src
+++ b/numpy/core/src/umath/scalarmath.c.src
@@ -246,25 +246,26 @@ static void
/**end repeat**/
-
-/* QUESTION: Should we check for overflow / underflow in (l,r)shift? */
-
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong#
* #type = npy_byte, npy_ubyte, npy_short, npy_ushort, npy_int, npy_uint,
* npy_long, npy_ulong, npy_longlong, npy_ulonglong#
+ * #suffix = hh,uhh,h,uh,,u,l,ul,ll,ull#
*/
/**begin repeat1
- * #oper = and, xor, or, lshift, rshift#
- * #op = &, ^, |, <<, >>#
+ * #oper = and, xor, or#
+ * #op = &, ^, |#
*/
#define @name@_ctype_@oper@(arg1, arg2, out) *(out) = (arg1) @op@ (arg2)
/**end repeat1**/
+#define @name@_ctype_lshift(arg1, arg2, out) *(out) = npy_lshift@suffix@(arg1, arg2)
+#define @name@_ctype_rshift(arg1, arg2, out) *(out) = npy_rshift@suffix@(arg1, arg2)
+
/**end repeat**/
/**begin repeat
@@ -405,21 +406,22 @@ half_ctype_divmod(npy_half a, npy_half b, npy_half *out1, npy_half *out2) {
/**begin repeat
* #name = float, double, longdouble#
* #type = npy_float, npy_double, npy_longdouble#
+ * #c = f,,l#
*/
-static npy_@name@ (*_basic_@name@_pow)(@type@ a, @type@ b);
static void
@name@_ctype_power(@type@ a, @type@ b, @type@ *out)
{
- *out = _basic_@name@_pow(a, b);
+ *out = npy_pow@c@(a, b);
}
+
/**end repeat**/
static void
half_ctype_power(npy_half a, npy_half b, npy_half *out)
{
const npy_float af = npy_half_to_float(a);
const npy_float bf = npy_half_to_float(b);
- const npy_float outf = _basic_float_pow(af,bf);
+ const npy_float outf = npy_powf(af,bf);
*out = npy_float_to_half(outf);
}
@@ -476,14 +478,10 @@ static void
}
/**end repeat**/
-/*
- * Get the nc_powf, nc_pow, and nc_powl functions from
- * the data area of the power ufunc in umathmodule.
- */
-
/**begin repeat
* #name = cfloat, cdouble, clongdouble#
* #type = npy_cfloat, npy_cdouble, npy_clongdouble#
+ * #c = f,,l#
*/
static void
@name@_ctype_positive(@type@ a, @type@ *out)
@@ -492,12 +490,10 @@ static void
out->imag = a.imag;
}
-static void (*_basic_@name@_pow)(@type@ *, @type@ *, @type@ *);
-
static void
@name@_ctype_power(@type@ a, @type@ b, @type@ *out)
{
- _basic_@name@_pow(&a, &b, out);
+ *out = npy_cpow@c@(a, b);
}
/**end repeat**/
@@ -570,7 +566,7 @@ static void
* 1) Convert the types to the common type if both are scalars (0 return)
* 2) If both are not scalars use ufunc machinery (-2 return)
* 3) If both are scalars but cannot be cast to the right type
- * return NotImplmented (-1 return)
+ * return NotImplemented (-1 return)
*
* 4) Perform the function on the C-type.
* 5) If an error condition occurred, check to see
@@ -1429,24 +1425,53 @@ static PyObject *
/**begin repeat
*
+ * #name = byte, ubyte, short, ushort, int, uint,
+ * long, ulong, longlong, ulonglong,
+ * half, float, double, longdouble,
+ * cfloat, cdouble, clongdouble#
+ * #Name = Byte, UByte, Short, UShort, Int, UInt,
+ * Long, ULong, LongLong, ULongLong,
+ * Half, Float, Double, LongDouble,
+ * CFloat, CDouble, CLongDouble#
+ * #cmplx = 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1#
+ * #to_ctype = , , , , , , , , , , npy_half_to_double, , , , , , #
+ * #func = PyFloat_FromDouble*17#
+ */
+static NPY_INLINE PyObject *
+@name@_float(PyObject *obj)
+{
+#if @cmplx@
+ if (emit_complexwarning() < 0) {
+ return NULL;
+ }
+ return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@).real));
+#else
+ return @func@(@to_ctype@(PyArrayScalar_VAL(obj, @Name@)));
+#endif
+}
+/**end repeat**/
+
+
+#if !defined(NPY_PY3K)
+
+/**begin repeat
+ *
* #name = (byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
* half, float, double, longdouble,
- * cfloat, cdouble, clongdouble)*2#
+ * cfloat, cdouble, clongdouble)#
* #Name = (Byte, UByte, Short, UShort, Int, UInt,
* Long, ULong, LongLong, ULongLong,
* Half, Float, Double, LongDouble,
- * CFloat, CDouble, CLongDouble)*2#
- * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)*2#
- * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )*2#
- * #which = long*17, float*17#
+ * CFloat, CDouble, CLongDouble)#
+ * #cmplx = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1)#
+ * #to_ctype = (, , , , , , , , , , npy_half_to_double, , , , , , )#
* #func = (PyLong_FromLongLong, PyLong_FromUnsignedLongLong)*5,
* PyLong_FromDouble*3, npy_longdouble_to_PyLong,
- * PyLong_FromDouble*2, npy_longdouble_to_PyLong,
- * PyFloat_FromDouble*17#
+ * PyLong_FromDouble*2, npy_longdouble_to_PyLong#
*/
static NPY_INLINE PyObject *
-@name@_@which@(PyObject *obj)
+@name@_long(PyObject *obj)
{
#if @cmplx@
if (emit_complexwarning() < 0) {
@@ -1459,8 +1484,6 @@ static NPY_INLINE PyObject *
}
/**end repeat**/
-#if !defined(NPY_PY3K)
-
/**begin repeat
*
* #name = (byte, ubyte, short, ushort, int, uint,
@@ -1564,7 +1587,6 @@ static PyObject*
}
/**end repeat**/
-
/**begin repeat
* #name = byte, ubyte, short, ushort, int, uint,
* long, ulong, longlong, ulonglong,
@@ -1575,8 +1597,7 @@ static PyNumberMethods @name@_as_number = {
(binaryfunc)@name@_add, /*nb_add*/
(binaryfunc)@name@_subtract, /*nb_subtract*/
(binaryfunc)@name@_multiply, /*nb_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(binaryfunc)@name@_divide, /*nb_divide*/
#endif
(binaryfunc)@name@_remainder, /*nb_remainder*/
@@ -1596,8 +1617,7 @@ static PyNumberMethods @name@_as_number = {
(binaryfunc)@name@_and, /*nb_and*/
(binaryfunc)@name@_xor, /*nb_xor*/
(binaryfunc)@name@_or, /*nb_or*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*nb_coerce*/
#endif
(unaryfunc)@name@_int, /*nb_int*/
@@ -1607,16 +1627,14 @@ static PyNumberMethods @name@_as_number = {
(unaryfunc)@name@_long, /*nb_long*/
#endif
(unaryfunc)@name@_float, /*nb_float*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
(unaryfunc)@name@_oct, /*nb_oct*/
(unaryfunc)@name@_hex, /*nb_hex*/
#endif
0, /*inplace_add*/
0, /*inplace_subtract*/
0, /*inplace_multiply*/
-#if defined(NPY_PY3K)
-#else
+#if !defined(NPY_PY3K)
0, /*inplace_divide*/
#endif
0, /*inplace_remainder*/
@@ -1631,6 +1649,10 @@ static PyNumberMethods @name@_as_number = {
0, /*nb_inplace_floor_divide*/
0, /*nb_inplace_true_divide*/
(unaryfunc)NULL, /*nb_index*/
+#if PY_VERSION_HEX >= 0x03050000
+ 0, /*nb_matrix_multiply*/
+ 0, /*nb_inplace_matrix_multiply*/
+#endif
};
/**end repeat**/
@@ -1653,52 +1675,9 @@ add_scalarmath(void)
/**end repeat**/
}
-static int
-get_functions(PyObject * mm)
-{
- PyObject *obj;
- void **funcdata;
- char *signatures;
- int i, j;
- int ret = -1;
-
- /* Get the nc_pow functions */
- /* Get the pow functions */
- obj = PyObject_GetAttrString(mm, "power");
- if (obj == NULL) {
- goto fail;
- }
- funcdata = ((PyUFuncObject *)obj)->data;
- signatures = ((PyUFuncObject *)obj)->types;
-
- i = 0;
- j = 0;
- while (signatures[i] != NPY_FLOAT) {
- i += 3;
- j++;
- }
- _basic_float_pow = funcdata[j];
- _basic_double_pow = funcdata[j + 1];
- _basic_longdouble_pow = funcdata[j + 2];
- _basic_cfloat_pow = funcdata[j + 3];
- _basic_cdouble_pow = funcdata[j + 4];
- _basic_clongdouble_pow = funcdata[j + 5];
- Py_DECREF(obj);
-
- return ret = 0;
-
- fail:
- Py_DECREF(mm);
- return ret;
-}
-
NPY_NO_EXPORT int initscalarmath(PyObject * m)
{
- if (get_functions(m) < 0) {
- return -1;
- }
-
add_scalarmath();
return 0;
diff --git a/numpy/core/src/umath/simd.inc.src b/numpy/core/src/umath/simd.inc.src
index 5c0568c12..74f52cc9d 100644
--- a/numpy/core/src/umath/simd.inc.src
+++ b/numpy/core/src/umath/simd.inc.src
@@ -17,8 +17,6 @@
#include "lowlevel_strided_loops.h"
#include "numpy/npy_common.h"
-/* for NO_FLOATING_POINT_SUPPORT */
-#include "numpy/ufuncobject.h"
#include "numpy/npy_math.h"
#ifdef NPY_HAVE_SSE2_INTRINSICS
#include <emmintrin.h>
@@ -34,13 +32,14 @@
#include <float.h>
#include <string.h> /* for memcpy */
+#define VECTOR_SIZE_BYTES 16
+
static NPY_INLINE npy_uintp
abs_ptrdiff(char *a, char *b)
{
return (a > b) ? (a - b) : (b - a);
}
-
/*
* stride is equal to element size and input and destination are equal or
* don't overlap within one register. The check of the steps against
@@ -52,6 +51,15 @@ abs_ptrdiff(char *a, char *b)
((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \
((abs_ptrdiff(args[1], args[0]) == 0))))
+/*
+ * output should be contiguous, can handle strided input data
+ */
+#define IS_OUTPUT_BLOCKABLE_UNARY(esize, vsize) \
+ (steps[1] == (esize) && \
+ (npy_is_aligned(args[0], esize) && npy_is_aligned(args[1], esize)) && \
+ ((abs_ptrdiff(args[1], args[0]) >= (vsize)) || \
+ ((abs_ptrdiff(args[1], args[0]) == 0))))
+
#define IS_BLOCKABLE_REDUCE(esize, vsize) \
(steps[1] == (esize) && abs_ptrdiff(args[1], args[0]) >= (vsize) && \
npy_is_aligned(args[1], (esize)) && \
@@ -122,6 +130,93 @@ abs_ptrdiff(char *a, char *b)
*/
/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512f#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ * #REGISTER_SIZE = 32, 64#
+ */
+
+/* prototypes */
+
+/**begin repeat1
+ * #type = npy_float, npy_double#
+ * #TYPE = FLOAT, DOUBLE#
+ */
+
+/**begin repeat2
+ * #func = sqrt, absolute, square, reciprocal, rint, floor, ceil, trunc#
+ */
+
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE NPY_GCC_TARGET_@ISA@ void
+@ISA@_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n, const npy_intp stride);
+#endif
+
+static NPY_INLINE int
+run_unary_@isa@_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(@type@), @REGISTER_SIZE@)) {
+ @ISA@_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0], steps[0]);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat2**/
+/**end repeat1**/
+
+/**begin repeat1
+ * #func = exp, log#
+ */
+
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE void
+@ISA@_@func@_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp stride);
+#endif
+
+static NPY_INLINE int
+run_unary_@isa@_@func@_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps)
+{
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
+ @ISA@_@func@_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0]);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat1**/
+
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+static NPY_INLINE void
+@ISA@_sincos_FLOAT(npy_float *, npy_float *, const npy_intp n, const npy_intp steps, NPY_TRIG_OP);
+#endif
+
+static NPY_INLINE int
+run_unary_@isa@_sincos_FLOAT(char **args, npy_intp *dimensions, npy_intp *steps, NPY_TRIG_OP my_trig_op)
+{
+#if defined @CHK@ && defined NPY_HAVE_SSE2_INTRINSICS
+ if (IS_OUTPUT_BLOCKABLE_UNARY(sizeof(npy_float), @REGISTER_SIZE@)) {
+ @ISA@_sincos_FLOAT((npy_float*)args[1], (npy_float*)args[0], dimensions[0], steps[0], my_trig_op);
+ return 1;
+ }
+ else
+ return 0;
+#endif
+ return 0;
+}
+
+/**end repeat**/
+
+
+/**begin repeat
* Float types
* #type = npy_float, npy_double, npy_longdouble#
* #TYPE = FLOAT, DOUBLE, LONGDOUBLE#
@@ -132,7 +227,6 @@ abs_ptrdiff(char *a, char *b)
* #func = sqrt, absolute, negative, minimum, maximum#
* #check = IS_BLOCKABLE_UNARY*3, IS_BLOCKABLE_REDUCE*2 #
* #name = unary*3, unary_reduce*2#
- * #minmax = 0*3, 1*2#
*/
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
@@ -146,17 +240,13 @@ sse2_@func@_@TYPE@(@type@ *, @type@ *, const npy_intp n);
static NPY_INLINE int
run_@name@_simd_@func@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps)
{
-#if @minmax@ && (defined NO_FLOATING_POINT_SUPPORT)
- return 0;
-#else
#if @vector@ && defined NPY_HAVE_SSE2_INTRINSICS
- if (@check@(sizeof(@type@), 16)) {
+ if (@check@(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_@func@_@TYPE@((@type@*)args[1], (@type@*)args[0], dimensions[0]);
return 1;
}
#endif
return 0;
-#endif
}
/**end repeat1**/
@@ -189,17 +279,24 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps
@type@ * ip2 = (@type@ *)args[1];
@type@ * op = (@type@ *)args[2];
npy_intp n = dimensions[0];
+#if defined __AVX512F__
+ const npy_intp vector_size_bytes = 64;
+#elif defined __AVX2__
+ const npy_intp vector_size_bytes = 32;
+#else
+ const npy_intp vector_size_bytes = 32;
+#endif
/* argument one scalar */
- if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), 16)) {
+ if (IS_BLOCKABLE_BINARY_SCALAR1(sizeof(@type@), vector_size_bytes)) {
sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
/* argument two scalar */
- else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY_SCALAR2(sizeof(@type@), vector_size_bytes)) {
sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
- else if (IS_BLOCKABLE_BINARY(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY(sizeof(@type@), vector_size_bytes)) {
sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
@@ -239,16 +336,16 @@ run_binary_simd_@kind@_@TYPE@(char **args, npy_intp *dimensions, npy_intp *steps
npy_bool * op = (npy_bool *)args[2];
npy_intp n = dimensions[0];
/* argument one scalar */
- if (IS_BLOCKABLE_BINARY_SCALAR1_BOOL(sizeof(@type@), 16)) {
+ if (IS_BLOCKABLE_BINARY_SCALAR1_BOOL(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_binary_scalar1_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
/* argument two scalar */
- else if (IS_BLOCKABLE_BINARY_SCALAR2_BOOL(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY_SCALAR2_BOOL(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_binary_scalar2_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
- else if (IS_BLOCKABLE_BINARY_BOOL(sizeof(@type@), 16)) {
+ else if (IS_BLOCKABLE_BINARY_BOOL(sizeof(@type@), VECTOR_SIZE_BYTES)) {
sse2_binary_@kind@_@TYPE@(op, ip1, ip2, n);
return 1;
}
@@ -309,7 +406,8 @@ static NPY_INLINE int
run_binary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
- if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_BINARY(sizeof(npy_bool), 16)) {
+ if (sizeof(npy_bool) == 1 &&
+ IS_BLOCKABLE_BINARY(sizeof(npy_bool), VECTOR_SIZE_BYTES)) {
sse2_binary_@kind@_BOOL((npy_bool*)args[2], (npy_bool*)args[0],
(npy_bool*)args[1], dimensions[0]);
return 1;
@@ -323,7 +421,8 @@ static NPY_INLINE int
run_reduce_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
- if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_REDUCE(sizeof(npy_bool), 16)) {
+ if (sizeof(npy_bool) == 1 &&
+ IS_BLOCKABLE_REDUCE(sizeof(npy_bool), VECTOR_SIZE_BYTES)) {
sse2_reduce_@kind@_BOOL((npy_bool*)args[0], (npy_bool*)args[1],
dimensions[0]);
return 1;
@@ -347,7 +446,8 @@ static NPY_INLINE int
run_unary_simd_@kind@_BOOL(char **args, npy_intp *dimensions, npy_intp *steps)
{
#if defined NPY_HAVE_SSE2_INTRINSICS
- if (sizeof(npy_bool) == 1 && IS_BLOCKABLE_UNARY(sizeof(npy_bool), 16)) {
+ if (sizeof(npy_bool) == 1 &&
+ IS_BLOCKABLE_UNARY(sizeof(npy_bool), VECTOR_SIZE_BYTES)) {
sse2_@kind@_BOOL((npy_bool*)args[1], (npy_bool*)args[0], dimensions[0]);
return 1;
}
@@ -423,19 +523,20 @@ static void
sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
#ifdef __AVX512F__
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 64)
+ const npy_intp vector_size_bytes = 64;
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[i];
/* lots of specializations, to squeeze out max performance */
- if (npy_is_aligned(&ip1[i], 64) && npy_is_aligned(&ip2[i], 64)) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes) && npy_is_aligned(&ip2[i], vector_size_bytes)) {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@@ -443,16 +544,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
}
- else if (npy_is_aligned(&ip1[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ else if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
- else if (npy_is_aligned(&ip2[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ else if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@@ -461,14 +562,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
else {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, a);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@@ -477,19 +578,21 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
#elif __AVX2__
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 32)
+ const npy_intp vector_size_bytes = 32;
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[i];
/* lots of specializations, to squeeze out max performance */
- if (npy_is_aligned(&ip1[i], 32) && npy_is_aligned(&ip2[i], 32)) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes) &&
+ npy_is_aligned(&ip2[i], vector_size_bytes)) {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@@ -497,16 +600,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
}
- else if (npy_is_aligned(&ip1[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ else if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
- else if (npy_is_aligned(&ip2[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ else if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@@ -515,14 +618,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
else {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, a);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@@ -531,19 +634,20 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
#else
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[i] @OP@ ip2[i];
/* lots of specializations, to squeeze out max performance */
- if (npy_is_aligned(&ip1[i], 16) && npy_is_aligned(&ip2[i], 16)) {
+ if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES) &&
+ npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, a);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@@ -551,16 +655,16 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
}
}
- else if (npy_is_aligned(&ip1[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ else if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
}
}
- else if (npy_is_aligned(&ip2[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ else if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@@ -569,14 +673,14 @@ sse2_binary_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
}
else {
if (ip1 == ip2) {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, a);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@@ -595,18 +699,19 @@ static void
sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
#ifdef __AVX512F__
+ const npy_intp vector_size_bytes = 64;
const @vtype512@ a = @vpre512@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 64)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[0] @OP@ ip2[i];
- if (npy_is_aligned(&ip2[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ b = @vpre512@_load_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ b = @vpre512@_loadu_@vsuf@(&ip2[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
@@ -615,18 +720,19 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
#elif __AVX2__
+ const npy_intp vector_size_bytes = 32;
const @vtype256@ a = @vpre256@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 32)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[0] @OP@ ip2[i];
- if (npy_is_aligned(&ip2[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ if (npy_is_aligned(&ip2[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ b = @vpre256@_load_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ b = @vpre256@_loadu_@vsuf@(&ip2[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
@@ -634,17 +740,17 @@ sse2_binary_scalar1_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
}
#else
const @vtype@ a = @vpre@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[0] @OP@ ip2[i];
- if (npy_is_aligned(&ip2[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ if (npy_is_aligned(&ip2[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ b = @vpre@_load_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ b = @vpre@_loadu_@vsuf@(&ip2[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
@@ -661,18 +767,19 @@ static void
sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
#ifdef __AVX512F__
+ const npy_intp vector_size_bytes = 64;
const @vtype512@ b = @vpre512@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 64)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[0];
- if (npy_is_aligned(&ip1[i], 64)) {
- LOOP_BLOCKED(@type@, 64) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_load_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 64) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype512@ a = @vpre512@_loadu_@vsuf@(&ip1[i]);
@vtype512@ c = @vpre512@_@VOP@_@vsuf@(a, b);
@vpre512@_store_@vsuf@(&op[i], c);
@@ -680,18 +787,19 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
}
#elif __AVX2__
+ const npy_intp vector_size_bytes = 32;
const @vtype256@ b = @vpre256@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 32)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, vector_size_bytes)
op[i] = ip1[i] @OP@ ip2[0];
- if (npy_is_aligned(&ip1[i], 32)) {
- LOOP_BLOCKED(@type@, 32) {
+ if (npy_is_aligned(&ip1[i], vector_size_bytes)) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_load_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, vector_size_bytes) {
@vtype256@ a = @vpre256@_loadu_@vsuf@(&ip1[i]);
@vtype256@ c = @vpre256@_@VOP@_@vsuf@(a, b);
@vpre256@_store_@vsuf@(&op[i], c);
@@ -699,17 +807,17 @@ sse2_binary_scalar2_@kind@_@TYPE@(@type@ * op, @type@ * ip1, @type@ * ip2, npy_i
}
#else
const @vtype@ b = @vpre@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[i] @OP@ ip2[0];
- if (npy_is_aligned(&ip1[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ if (npy_is_aligned(&ip1[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip1[i]);
@vtype@ c = @vpre@_@VOP@_@vsuf@(a, b);
@vpre@_store_@vsuf@(&op[i], c);
@@ -749,10 +857,10 @@ sse2_compress4_to_byte_@TYPE@(@vtype@ r1, @vtype@ r2, @vtype@ r3, @vtype@ * r4,
static void
sse2_signbit_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = npy_signbit(ip1[i]) != 0;
}
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip1[i]);
int r = @vpre@_movemask_@vsuf@(a);
if (sizeof(@type@) == 8) {
@@ -790,14 +898,14 @@ sse2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, npy_intp n)
const @vtype@ fltmax = @vpre@_set1_@vsuf@(FLT_MAX);
#endif
#endif
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = npy_@kind@(ip1[i]) != 0;
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1, r2, r3, r4;
#if @var@ != 0 /* isinf/isfinite */
/* fabs via masking of sign bit */
@@ -860,18 +968,18 @@ sse2_ordered_cmp_@kind@_@TYPE@(const @type@ a, const @type@ b)
static void
sse2_binary_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[i]);
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
- @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a1 = @vpre@_load_@vsuf@(&ip1[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b1 = @vpre@_load_@vsuf@(&ip1[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c1 = @vpre@_load_@vsuf@(&ip1[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d1 = @vpre@_load_@vsuf@(&ip1[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ a2 = @vpre@_loadu_@vsuf@(&ip2[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b2 = @vpre@_loadu_@vsuf@(&ip2[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c2 = @vpre@_loadu_@vsuf@(&ip2[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d2 = @vpre@_loadu_@vsuf@(&ip2[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1 = @vpre@_@VOP@_@vsuf@(a1, a2);
@vtype@ r2 = @vpre@_@VOP@_@vsuf@(b1, b2);
@vtype@ r3 = @vpre@_@VOP@_@vsuf@(c1, c2);
@@ -888,14 +996,14 @@ static void
sse2_binary_scalar1_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
@vtype@ s = @vpre@_set1_@vsuf@(ip1[0]);
- LOOP_BLOCK_ALIGN_VAR(ip2, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip2, @type@, VECTOR_SIZE_BYTES) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[0], ip2[i]);
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip2[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip2[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip2[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip2[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip2[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip2[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip2[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip2[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1 = @vpre@_@VOP@_@vsuf@(s, a);
@vtype@ r2 = @vpre@_@VOP@_@vsuf@(s, b);
@vtype@ r3 = @vpre@_@VOP@_@vsuf@(s, c);
@@ -912,14 +1020,14 @@ static void
sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy_intp n)
{
@vtype@ s = @vpre@_set1_@vsuf@(ip2[0]);
- LOOP_BLOCK_ALIGN_VAR(ip1, @type@, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip1, @type@, VECTOR_SIZE_BYTES) {
op[i] = sse2_ordered_cmp_@kind@_@TYPE@(ip1[i], ip2[0]);
}
- LOOP_BLOCKED(@type@, 64) {
- @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * 16 / sizeof(@type@)]);
- @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * 16 / sizeof(@type@)]);
- @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * 16 / sizeof(@type@)]);
- @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * 16 / sizeof(@type@)]);
+ LOOP_BLOCKED(@type@, 4 * VECTOR_SIZE_BYTES) {
+ @vtype@ a = @vpre@_load_@vsuf@(&ip1[i + 0 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ b = @vpre@_load_@vsuf@(&ip1[i + 1 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ c = @vpre@_load_@vsuf@(&ip1[i + 2 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
+ @vtype@ d = @vpre@_load_@vsuf@(&ip1[i + 3 * VECTOR_SIZE_BYTES / sizeof(@type@)]);
@vtype@ r1 = @vpre@_@VOP@_@vsuf@(a, s);
@vtype@ r2 = @vpre@_@VOP@_@vsuf@(b, s);
@vtype@ r3 = @vpre@_@VOP@_@vsuf@(c, s);
@@ -935,19 +1043,20 @@ sse2_binary_scalar2_@kind@_@TYPE@(npy_bool * op, @type@ * ip1, @type@ * ip2, npy
static void
sse2_sqrt_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
{
- /* align output to 16 bytes */
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) {
+ /* align output to VECTOR_SIZE_BYTES bytes */
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalarf@(ip[i]);
}
- assert(n < (16 / sizeof(@type@)) || npy_is_aligned(&op[i], 16));
- if (npy_is_aligned(&ip[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
+ if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ d = @vpre@_load_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_sqrt_@vsuf@(d));
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ d = @vpre@_loadu_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_sqrt_@vsuf@(d));
}
@@ -986,19 +1095,20 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
*/
const @vtype@ mask = @vpre@_set1_@vsuf@(-0.@c@);
- /* align output to 16 bytes */
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16) {
+ /* align output to VECTOR_SIZE_BYTES bytes */
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES) {
op[i] = @scalar@_@type@(ip[i]);
}
- assert(n < (16 / sizeof(@type@)) || npy_is_aligned(&op[i], 16));
- if (npy_is_aligned(&ip[i], 16)) {
- LOOP_BLOCKED(@type@, 16) {
+ assert((npy_uintp)n < (VECTOR_SIZE_BYTES / sizeof(@type@)) ||
+ npy_is_aligned(&op[i], VECTOR_SIZE_BYTES));
+ if (npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES)) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_load_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_@VOP@_@vsuf@(mask, a));
}
}
else {
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vpre@_loadu_@vsuf@(&ip[i]);
@vpre@_store_@vsuf@(&op[i], @vpre@_@VOP@_@vsuf@(mask, a));
}
@@ -1019,11 +1129,12 @@ sse2_@kind@_@TYPE@(@type@ * op, @type@ * ip, const npy_intp n)
static void
sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
{
- const npy_intp stride = 16 / (npy_intp)sizeof(@type@);
- LOOP_BLOCK_ALIGN_VAR(ip, @type@, 16) {
+ const npy_intp stride = VECTOR_SIZE_BYTES / (npy_intp)sizeof(@type@);
+ LOOP_BLOCK_ALIGN_VAR(ip, @type@, VECTOR_SIZE_BYTES) {
+ /* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- assert(n < (stride) || npy_is_aligned(&ip[i], 16));
+ assert((npy_uintp)n < (stride) || npy_is_aligned(&ip[i], VECTOR_SIZE_BYTES));
if (i + 3 * stride <= n) {
/* load the first elements */
@vtype@ c1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@@ -1032,7 +1143,7 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
/* minps/minpd will set invalid flag if nan is encountered */
npy_clear_floatstatus_barrier((char*)&c1);
- LOOP_BLOCKED(@type@, 32) {
+ LOOP_BLOCKED(@type@, 2 * VECTOR_SIZE_BYTES) {
@vtype@ v1 = @vpre@_load_@vsuf@((@type@*)&ip[i]);
@vtype@ v2 = @vpre@_load_@vsuf@((@type@*)&ip[i + stride]);
c1 = @vpre@_@VOP@_@vsuf@(c1, v1);
@@ -1045,18 +1156,1106 @@ sse2_@kind@_@TYPE@(@type@ * ip, @type@ * op, const npy_intp n)
}
else {
@type@ tmp = sse2_horizontal_@VOP@_@vtype@(c1);
+ /* Order of operations important for MSVC 2015 */
*op = (*op @OP@ tmp || npy_isnan(*op)) ? *op : tmp;
}
}
LOOP_BLOCKED_END {
+ /* Order of operations important for MSVC 2015 */
*op = (*op @OP@ ip[i] || npy_isnan(*op)) ? *op : ip[i];
}
- if (npy_isnan(*op)) {
- npy_set_floatstatus_invalid();
+ npy_clear_floatstatus_barrier((char*)op);
+}
+/**end repeat1**/
+
+/**end repeat**/
+
+/* bunch of helper functions used in ISA_exp/log_FLOAT*/
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_full_load_mask_ps(void)
+{
+ return _mm256_set1_ps(-1.0);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i
+fma_get_full_load_mask_pd(void)
+{
+ return _mm256_castpd_si256(_mm256_set1_pd(-1.0));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_partial_load_mask_ps(const npy_int num_elem, const npy_int num_lanes)
+{
+ float maskint[16] = {-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,-1.0,
+ 1.0,1.0,1.0,1.0,1.0,1.0,1.0,1.0};
+ float* addr = maskint + num_lanes - num_elem;
+ return _mm256_loadu_ps(addr);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i
+fma_get_partial_load_mask_pd(const npy_int num_elem, const npy_int num_lanes)
+{
+ npy_int maskint[16] = {-1,-1,-1,-1,-1,-1,-1,-1,1,1,1,1,1,1,1,1};
+ npy_int* addr = maskint + 2*num_lanes - 2*num_elem;
+ return _mm256_loadu_si256((__m256i*) addr);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_masked_gather_ps(__m256 src,
+ npy_float* addr,
+ __m256i vindex,
+ __m256 mask)
+{
+ return _mm256_mask_i32gather_ps(src, addr, vindex, mask, 4);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d
+fma_masked_gather_pd(__m256d src,
+ npy_double* addr,
+ __m128i vindex,
+ __m256d mask)
+{
+ return _mm256_mask_i32gather_pd(src, addr, vindex, mask, 8);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_masked_load_ps(__m256 mask, npy_float* addr)
+{
+ return _mm256_maskload_ps(addr, _mm256_cvtps_epi32(mask));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d
+fma_masked_load_pd(__m256i mask, npy_double* addr)
+{
+ return _mm256_maskload_pd(addr, mask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_set_masked_lanes_ps(__m256 x, __m256 val, __m256 mask)
+{
+ return _mm256_blendv_ps(x, val, mask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256d
+fma_set_masked_lanes_pd(__m256d x, __m256d val, __m256d mask)
+{
+ return _mm256_blendv_pd(x, val, mask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_blend(__m256 x, __m256 y, __m256 ymask)
+{
+ return _mm256_blendv_ps(x, y, ymask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_invert_mask_ps(__m256 ymask)
+{
+ return _mm256_andnot_ps(ymask, _mm256_set1_ps(-1.0));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256i
+fma_invert_mask_pd(__m256i ymask)
+{
+ return _mm256_andnot_si256(ymask, _mm256_set1_epi32(0xFFFFFFFF));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_should_calculate_sine(__m256i k, __m256i andop, __m256i cmp)
+{
+ return _mm256_cvtepi32_ps(
+ _mm256_cmpeq_epi32(_mm256_and_si256(k, andop), cmp));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_should_negate(__m256i k, __m256i andop, __m256i cmp)
+{
+ return fma_should_calculate_sine(k, andop, cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_exponent(__m256 x)
+{
+ /*
+ * Special handling of denormals:
+ * 1) Multiply denormal elements with 2**100 (0x71800000)
+ * 2) Get the 8 bits of unbiased exponent
+ * 3) Subtract 100 from exponent of denormals
+ */
+
+ __m256 two_power_100 = _mm256_castsi256_ps(_mm256_set1_epi32(0x71800000));
+ __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ);
+ __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ);
+
+ __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask);
+ __m256 temp = _mm256_mul_ps(temp1, two_power_100);
+ x = _mm256_blendv_ps(x, temp, denormal_mask);
+
+ __m256 exp = _mm256_cvtepi32_ps(
+ _mm256_sub_epi32(
+ _mm256_srli_epi32(
+ _mm256_castps_si256(x), 23),_mm256_set1_epi32(0x7E)));
+
+ __m256 denorm_exp = _mm256_sub_ps(exp, _mm256_set1_ps(100.0f));
+ return _mm256_blendv_ps(exp, denorm_exp, denormal_mask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA __m256
+fma_get_mantissa(__m256 x)
+{
+ /*
+ * Special handling of denormals:
+ * 1) Multiply denormal elements with 2**100 (0x71800000)
+ * 2) Get the 23 bits of mantissa
+ * 3) Mantissa for denormals is not affected by the multiplication
+ */
+
+ __m256 two_power_100 = _mm256_castsi256_ps(_mm256_set1_epi32(0x71800000));
+ __m256 denormal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_LT_OQ);
+ __m256 normal_mask = _mm256_cmp_ps(x, _mm256_set1_ps(FLT_MIN), _CMP_GE_OQ);
+
+ __m256 temp1 = _mm256_blendv_ps(x, _mm256_set1_ps(0.0f), normal_mask);
+ __m256 temp = _mm256_mul_ps(temp1, two_power_100);
+ x = _mm256_blendv_ps(x, temp, denormal_mask);
+
+ __m256i mantissa_bits = _mm256_set1_epi32(0x7fffff);
+ __m256i exp_126_bits = _mm256_set1_epi32(126 << 23);
+ return _mm256_castsi256_ps(
+ _mm256_or_si256(
+ _mm256_and_si256(
+ _mm256_castps_si256(x), mantissa_bits), exp_126_bits));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX2 __m256
+fma_scalef_ps(__m256 poly, __m256 quadrant)
+{
+ /*
+ * Handle denormals (which occur when quadrant <= -125):
+ * 1) This function computes poly*(2^quad) by adding the exponent of
+ poly to quad
+ * 2) When quad <= -125, the output is a denormal and the above logic
+ breaks down
+ * 3) To handle such cases, we split quadrant: -125 + (quadrant + 125)
+ * 4) poly*(2^-125) is computed the usual way
+ * 5) 2^(quad-125) can be computed by: 2 << abs(quad-125)
+ * 6) The final div operation generates the denormal
+ */
+ __m256 minquadrant = _mm256_set1_ps(-125.0f);
+ __m256 denormal_mask = _mm256_cmp_ps(quadrant, minquadrant, _CMP_LE_OQ);
+ if (_mm256_movemask_ps(denormal_mask) != 0x0000) {
+ __m256 quad_diff = _mm256_sub_ps(quadrant, minquadrant);
+ quad_diff = _mm256_sub_ps(_mm256_setzero_ps(), quad_diff);
+ quad_diff = _mm256_blendv_ps(_mm256_setzero_ps(), quad_diff, denormal_mask);
+ __m256i two_power_diff = _mm256_sllv_epi32(
+ _mm256_set1_epi32(1), _mm256_cvtps_epi32(quad_diff));
+ quadrant = _mm256_max_ps(quadrant, minquadrant); //keep quadrant >= -126
+ __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23);
+ poly = _mm256_castsi256_ps(
+ _mm256_add_epi32(
+ _mm256_castps_si256(poly), exponent));
+ __m256 denorm_poly = _mm256_div_ps(poly, _mm256_cvtepi32_ps(two_power_diff));
+ return _mm256_blendv_ps(poly, denorm_poly, denormal_mask);
+ }
+ else {
+ __m256i exponent = _mm256_slli_epi32(_mm256_cvtps_epi32(quadrant), 23);
+ poly = _mm256_castsi256_ps(
+ _mm256_add_epi32(
+ _mm256_castps_si256(poly), exponent));
+ return poly;
+ }
+}
+
+/**begin repeat
+ * #vsub = ps, pd#
+ * #vtype = __m256, __m256d#
+ */
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_abs_@vsub@(@vtype@ x)
+{
+ return _mm256_andnot_@vsub@(_mm256_set1_@vsub@(-0.0), x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_reciprocal_@vsub@(@vtype@ x)
+{
+ return _mm256_div_@vsub@(_mm256_set1_@vsub@(1.0f), x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_rint_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_NEAREST_INT);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_floor_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_NEG_INF);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_ceil_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_POS_INF);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_FMA @vtype@
+fma_trunc_@vsub@(@vtype@ x)
+{
+ return _mm256_round_@vsub@(x, _MM_FROUND_TO_ZERO);
+}
+/**end repeat**/
+#endif
+
+#if defined HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_get_full_load_mask_ps(void)
+{
+ return 0xFFFF;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8
+avx512_get_full_load_mask_pd(void)
+{
+ return 0xFF;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_get_partial_load_mask_ps(const npy_int num_elem, const npy_int total_elem)
+{
+ return (0x0001 << num_elem) - 0x0001;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8
+avx512_get_partial_load_mask_pd(const npy_int num_elem, const npy_int total_elem)
+{
+ return (0x01 << num_elem) - 0x01;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_masked_gather_ps(__m512 src,
+ npy_float* addr,
+ __m512i vindex,
+ __mmask16 kmask)
+{
+ return _mm512_mask_i32gather_ps(src, kmask, vindex, addr, 4);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d
+avx512_masked_gather_pd(__m512d src,
+ npy_double* addr,
+ __m256i vindex,
+ __mmask8 kmask)
+{
+ return _mm512_mask_i32gather_pd(src, kmask, vindex, addr, 8);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_masked_load_ps(__mmask16 mask, npy_float* addr)
+{
+ return _mm512_maskz_loadu_ps(mask, (__m512 *)addr);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d
+avx512_masked_load_pd(__mmask8 mask, npy_double* addr)
+{
+ return _mm512_maskz_loadu_pd(mask, (__m512d *)addr);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_set_masked_lanes_ps(__m512 x, __m512 val, __mmask16 mask)
+{
+ return _mm512_mask_blend_ps(mask, x, val);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512d
+avx512_set_masked_lanes_pd(__m512d x, __m512d val, __mmask8 mask)
+{
+ return _mm512_mask_blend_pd(mask, x, val);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_blend(__m512 x, __m512 y, __mmask16 ymask)
+{
+ return _mm512_mask_mov_ps(x, ymask, y);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_invert_mask_ps(__mmask16 ymask)
+{
+ return _mm512_knot(ymask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask8
+avx512_invert_mask_pd(__mmask8 ymask)
+{
+ return _mm512_knot(ymask);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_should_calculate_sine(__m512i k, __m512i andop, __m512i cmp)
+{
+ return _mm512_cmpeq_epi32_mask(_mm512_and_epi32(k, andop), cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __mmask16
+avx512_should_negate(__m512i k, __m512i andop, __m512i cmp)
+{
+ return avx512_should_calculate_sine(k, andop, cmp);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_get_exponent(__m512 x)
+{
+ return _mm512_add_ps(_mm512_getexp_ps(x), _mm512_set1_ps(1.0f));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_get_mantissa(__m512 x)
+{
+ return _mm512_getmant_ps(x, _MM_MANT_NORM_p5_1, _MM_MANT_SIGN_src);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F __m512
+avx512_scalef_ps(__m512 poly, __m512 quadrant)
+{
+ return _mm512_scalef_ps(poly, quadrant);
+}
+/**begin repeat
+ * #vsub = ps, pd#
+ * #epi_vsub = epi32, epi64#
+ * #vtype = __m512, __m512d#
+ * #and_const = 0x7fffffff, 0x7fffffffffffffffLL#
+ */
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_abs_@vsub@(@vtype@ x)
+{
+ return (@vtype@) _mm512_and_@epi_vsub@((__m512i) x,
+ _mm512_set1_@epi_vsub@ (@and_const@));
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_reciprocal_@vsub@(@vtype@ x)
+{
+ return _mm512_div_@vsub@(_mm512_set1_@vsub@(1.0f), x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_rint_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x08);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_floor_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x09);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_ceil_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x0A);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_AVX512F @vtype@
+avx512_trunc_@vsub@(@vtype@ x)
+{
+ return _mm512_roundscale_@vsub@(x, 0x0B);
+}
+/**end repeat**/
+#endif
+
+/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
+ * #vtype = __m256, __m512#
+ * #vsize = 256, 512#
+ * #or = or_ps, kor#
+ * #vsub = , _mask#
+ * #mask = __m256, __mmask16#
+ * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ **/
+
+#if defined @CHK@
+
+/*
+ * Vectorized Cody-Waite range reduction technique
+ * Performs the reduction step x* = x - y*C in three steps:
+ * 1) x* = x - y*c1
+ * 2) x* = x - y*c2
+ * 3) x* = x - y*c3
+ * c1, c2 are exact floating points, c3 = C - c1 - c2 simulates higher precision
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_range_reduction(@vtype@ x, @vtype@ y, @vtype@ c1, @vtype@ c2, @vtype@ c3)
+{
+ @vtype@ reduced_x = @fmadd@(y, c1, x);
+ reduced_x = @fmadd@(y, c2, reduced_x);
+ reduced_x = @fmadd@(y, c3, reduced_x);
+ return reduced_x;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @mask@
+@isa@_in_range_mask(@vtype@ x, npy_float fmax, npy_float fmin)
+{
+ @mask@ m1 = _mm@vsize@_cmp_ps@vsub@(
+ x, _mm@vsize@_set1_ps(fmax), _CMP_GT_OQ);
+ @mask@ m2 = _mm@vsize@_cmp_ps@vsub@(
+ x, _mm@vsize@_set1_ps(fmin), _CMP_LT_OQ);
+ return _mm@vsize@_@or@(m1,m2);
+}
+
+/*
+ * Approximate cosine algorithm for x \in [-PI/4, PI/4]
+ * Maximum ULP across all 32-bit floats = 0.875
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_cosine(@vtype@ x2, @vtype@ invf8, @vtype@ invf6, @vtype@ invf4,
+ @vtype@ invf2, @vtype@ invf0)
+{
+ @vtype@ cos = @fmadd@(invf8, x2, invf6);
+ cos = @fmadd@(cos, x2, invf4);
+ cos = @fmadd@(cos, x2, invf2);
+ cos = @fmadd@(cos, x2, invf0);
+ return cos;
+}
+
+/*
+ * Approximate sine algorithm for x \in [-PI/4, PI/4]
+ * Maximum ULP across all 32-bit floats = 0.647
+ */
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_sine(@vtype@ x, @vtype@ x2, @vtype@ invf9, @vtype@ invf7,
+ @vtype@ invf5, @vtype@ invf3,
+ @vtype@ zero)
+{
+ @vtype@ sin = @fmadd@(invf9, x2, invf7);
+ sin = @fmadd@(sin, x2, invf5);
+ sin = @fmadd@(sin, x2, invf3);
+ sin = @fmadd@(sin, x2, zero);
+ sin = @fmadd@(sin, x, x);
+ return sin;
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_sqrt_ps(@vtype@ x)
+{
+ return _mm@vsize@_sqrt_ps(x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d
+@isa@_sqrt_pd(@vtype@d x)
+{
+ return _mm@vsize@_sqrt_pd(x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@
+@isa@_square_ps(@vtype@ x)
+{
+ return _mm@vsize@_mul_ps(x,x);
+}
+
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ @vtype@d
+@isa@_square_pd(@vtype@d x)
+{
+ return _mm@vsize@_mul_pd(x,x);
+}
+
+#endif
+/**end repeat**/
+
+
+/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
+ * #vsize = 256, 512#
+ * #BYTES = 32, 64#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #mask = __m256, __mmask16#
+ * #vsub = , _mask#
+ * #vtype = __m256, __m512#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ */
+
+/**begin repeat1
+ * #func = sqrt, absolute, square, reciprocal, rint, ceil, floor, trunc#
+ * #vectorf = sqrt, abs, square, reciprocal, rint, ceil, floor, trunc#
+ * #replace_0_with_1 = 0, 0, 0, 1, 0, 0, 0, 0#
+ */
+
+#if defined @CHK@
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_@func@_FLOAT(npy_float* op,
+ npy_float* ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_intp num_remaining_elements = array_size;
+ @vtype@ ones_f = _mm@vsize@_set1_ps(1.0f);
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
+#if @replace_0_with_1@
+ @mask@ inv_load_mask = @isa@_invert_mask_ps(load_mask);
+#endif
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
+#if @replace_0_with_1@
+ inv_load_mask = @isa@_invert_mask_ps(load_mask);
+#endif
+ }
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_ps(load_mask, ip);
+#if @replace_0_with_1@
+ /*
+ * Replace masked elements with 1.0f to avoid divide by zero fp
+ * exception in reciprocal
+ */
+ x = @isa@_set_masked_lanes_ps(x, ones_f, inv_load_mask);
+#endif
+ }
+ else {
+ x = @isa@_masked_gather_ps(ones_f, ip, vindex, load_mask);
+ }
+ @vtype@ out = @isa@_@vectorf@_ps(x);
+ @masked_store@(op, @cvtps_epi32@(load_mask), out);
+
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
}
}
+#endif
/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
+ * #vsize = 256, 512#
+ * #BYTES = 32, 64#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #mask = __m256i, __mmask8#
+ * #vsub = , _mask#
+ * #vtype = __m256d, __m512d#
+ * #vindextype = __m128i, __m256i#
+ * #vindexsize = 128, 256#
+ * #vindexload = _mm_loadu_si128, _mm256_loadu_si256#
+ * #cvtps_epi32 = _mm256_cvtpd_epi32, #
+ * #castmask = _mm256_castsi256_pd, #
+ * #masked_store = _mm256_maskstore_pd, _mm512_mask_storeu_pd#
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ */
+
+/**begin repeat1
+ * #func = sqrt, absolute, square, reciprocal, rint, ceil, floor, trunc#
+ * #vectorf = sqrt, abs, square, reciprocal, rint, ceil, floor, trunc#
+ * #replace_0_with_1 = 0, 0, 0, 1, 0, 0, 0, 0#
+ */
+#if defined @CHK@
+static NPY_INLINE NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_@func@_DOUBLE(npy_double* op,
+ npy_double* ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ const npy_intp stride = steps/sizeof(npy_double);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_double);
+ npy_intp num_remaining_elements = array_size;
+ @mask@ load_mask = @isa@_get_full_load_mask_pd();
+#if @replace_0_with_1@
+ @mask@ inv_load_mask = @isa@_invert_mask_pd(load_mask);
+#endif
+ @vtype@ ones_d = _mm@vsize@_set1_pd(1.0f);
+ npy_int indexarr[8];
+ for (npy_int ii = 0; ii < 8; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vindextype@ vindex = @vindexload@((@vindextype@*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_pd(num_remaining_elements,
+ num_lanes);
+#if @replace_0_with_1@
+ inv_load_mask = @isa@_invert_mask_pd(load_mask);
+#endif
+ }
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_pd(load_mask, ip);
+#if @replace_0_with_1@
+ /*
+ * Replace masked elements with 1.0f to avoid divide by zero fp
+ * exception in reciprocal
+ */
+ x = @isa@_set_masked_lanes_pd(x, ones_d, @castmask@(inv_load_mask));
+#endif
+ }
+ else {
+ x = @isa@_masked_gather_pd(ones_d, ip, vindex, @castmask@(load_mask));
+ }
+ @vtype@ out = @isa@_@vectorf@_pd(x);
+ @masked_store@(op, load_mask, out);
+
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+}
+#endif
+/**end repeat1**/
+/**end repeat**/
+
+/**begin repeat
+ * #ISA = FMA, AVX512F#
+ * #isa = fma, avx512#
+ * #vtype = __m256, __m512#
+ * #vsize = 256, 512#
+ * #BYTES = 32, 64#
+ * #mask = __m256, __mmask16#
+ * #vsub = , _mask#
+ * #or_masks =_mm256_or_ps, _mm512_kor#
+ * #and_masks =_mm256_and_ps, _mm512_kand#
+ * #xor_masks =_mm256_xor_ps, _mm512_kxor#
+ * #fmadd = _mm256_fmadd_ps, _mm512_fmadd_ps#
+ * #mask_to_int = _mm256_movemask_ps, #
+ * #full_mask= 0xFF, 0xFFFF#
+ * #masked_store = _mm256_maskstore_ps, _mm512_mask_storeu_ps#
+ * #cvtps_epi32 = _mm256_cvtps_epi32, #
+ * #CHK = HAVE_ATTRIBUTE_TARGET_AVX2_WITH_INTRINSICS, HAVE_ATTRIBUTE_TARGET_AVX512F_WITH_INTRINSICS#
+ */
+
+/*
+ * Vectorized approximate sine/cosine algorithms: The following code is a
+ * vectorized version of the algorithm presented here:
+ * https://stackoverflow.com/questions/30463616/payne-hanek-algorithm-implementation-in-c/30465751#30465751
+ * (1) Load data in ZMM/YMM registers and generate mask for elements that are
+ * within range [-71476.0625f, 71476.0625f] for cosine and [-117435.992f,
+ * 117435.992f] for sine.
+ * (2) For elements within range, perform range reduction using Cody-Waite's
+ * method: x* = x - y*PI/2, where y = rint(x*2/PI). x* \in [-PI/4, PI/4].
+ * (3) Map cos(x) to (+/-)sine or (+/-)cosine of x* based on the quadrant k =
+ * int(y).
+ * (4) For elements outside that range, Cody-Waite reduction peforms poorly
+ * leading to catastrophic cancellation. We compute cosine by calling glibc in
+ * a scalar fashion.
+ * (5) Vectorized implementation has a max ULP of 1.49 and performs at least
+ * 5-7x faster than scalar implementations when magnitude of all elements in
+ * the array < 71476.0625f (117435.992f for sine). Worst case performance is
+ * when all the elements are large leading to about 1-2% reduction in
+ * performance.
+ */
+
+#if defined @CHK@
+static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_sincos_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps,
+ NPY_TRIG_OP my_trig_op)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_float large_number = 71476.0625f;
+ if (my_trig_op == npy_compute_sin) {
+ large_number = 117435.992f;
+ }
+
+ /* Load up frequently used constants */
+ @vtype@i zeros = _mm@vsize@_set1_epi32(0);
+ @vtype@i ones = _mm@vsize@_set1_epi32(1);
+ @vtype@i twos = _mm@vsize@_set1_epi32(2);
+ @vtype@ two_over_pi = _mm@vsize@_set1_ps(NPY_TWO_O_PIf);
+ @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_HIGHf);
+ @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_MEDf);
+ @vtype@ codyw_c3 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_PI_O_2_LOWf);
+ @vtype@ cos_invf0 = _mm@vsize@_set1_ps(NPY_COEFF_INVF0_COSINEf);
+ @vtype@ cos_invf2 = _mm@vsize@_set1_ps(NPY_COEFF_INVF2_COSINEf);
+ @vtype@ cos_invf4 = _mm@vsize@_set1_ps(NPY_COEFF_INVF4_COSINEf);
+ @vtype@ cos_invf6 = _mm@vsize@_set1_ps(NPY_COEFF_INVF6_COSINEf);
+ @vtype@ cos_invf8 = _mm@vsize@_set1_ps(NPY_COEFF_INVF8_COSINEf);
+ @vtype@ sin_invf3 = _mm@vsize@_set1_ps(NPY_COEFF_INVF3_SINEf);
+ @vtype@ sin_invf5 = _mm@vsize@_set1_ps(NPY_COEFF_INVF5_SINEf);
+ @vtype@ sin_invf7 = _mm@vsize@_set1_ps(NPY_COEFF_INVF7_SINEf);
+ @vtype@ sin_invf9 = _mm@vsize@_set1_ps(NPY_COEFF_INVF9_SINEf);
+ @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf);
+ @vtype@ zero_f = _mm@vsize@_set1_ps(0.0f);
+ @vtype@ quadrant, reduced_x, reduced_x2, cos, sin;
+ @vtype@i iquadrant;
+ @mask@ nan_mask, glibc_mask, sine_mask, negate_mask;
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
+ npy_intp num_remaining_elements = array_size;
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
+
+ while (num_remaining_elements > 0) {
+
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
+ }
+
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_ps(load_mask, ip);
+ }
+ else {
+ x = @isa@_masked_gather_ps(zero_f, ip, vindex, load_mask);
+ }
+
+ /*
+ * For elements outside of this range, Cody-Waite's range reduction
+ * becomes inaccurate and we will call glibc to compute cosine for
+ * these numbers
+ */
+
+ glibc_mask = @isa@_in_range_mask(x, large_number,-large_number);
+ glibc_mask = @and_masks@(load_mask, glibc_mask);
+ nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ);
+ x = @isa@_set_masked_lanes_ps(x, zero_f, @or_masks@(nan_mask, glibc_mask));
+ npy_int iglibc_mask = @mask_to_int@(glibc_mask);
+
+ if (iglibc_mask != @full_mask@) {
+ quadrant = _mm@vsize@_mul_ps(x, two_over_pi);
+
+ /* round to nearest */
+ quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic);
+ quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic);
+
+ /* Cody-Waite's range reduction algorithm */
+ reduced_x = @isa@_range_reduction(x, quadrant,
+ codyw_c1, codyw_c2, codyw_c3);
+ reduced_x2 = _mm@vsize@_mul_ps(reduced_x, reduced_x);
+
+ /* compute cosine and sine */
+ cos = @isa@_cosine(reduced_x2, cos_invf8, cos_invf6, cos_invf4,
+ cos_invf2, cos_invf0);
+ sin = @isa@_sine(reduced_x, reduced_x2, sin_invf9, sin_invf7,
+ sin_invf5, sin_invf3, zero_f);
+
+ iquadrant = _mm@vsize@_cvtps_epi32(quadrant);
+ if (my_trig_op == npy_compute_cos) {
+ iquadrant = _mm@vsize@_add_epi32(iquadrant, ones);
+ }
+
+ /* blend sin and cos based on the quadrant */
+ sine_mask = @isa@_should_calculate_sine(iquadrant, ones, zeros);
+ cos = @isa@_blend(cos, sin, sine_mask);
+
+ /* multiply by -1 for appropriate elements */
+ negate_mask = @isa@_should_negate(iquadrant, twos, twos);
+ cos = @isa@_blend(cos, _mm@vsize@_sub_ps(zero_f, cos), negate_mask);
+ cos = @isa@_set_masked_lanes_ps(cos, _mm@vsize@_set1_ps(NPY_NANF), nan_mask);
+
+ @masked_store@(op, @cvtps_epi32@(load_mask), cos);
+ }
+
+ /* process elements using glibc for large elements */
+ if (my_trig_op == npy_compute_cos) {
+ for (int ii = 0; iglibc_mask != 0; ii++) {
+ if (iglibc_mask & 0x01) {
+ op[ii] = npy_cosf(ip[ii]);
+ }
+ iglibc_mask = iglibc_mask >> 1;
+ }
+ }
+ else {
+ for (int ii = 0; iglibc_mask != 0; ii++) {
+ if (iglibc_mask & 0x01) {
+ op[ii] = npy_sinf(ip[ii]);
+ }
+ iglibc_mask = iglibc_mask >> 1;
+ }
+ }
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+}
+
+/*
+ * Vectorized implementation of exp using AVX2 and AVX512:
+ * 1) if x >= xmax; return INF (overflow)
+ * 2) if x <= xmin; return 0.0f (underflow)
+ * 3) Range reduction (using Coyd-Waite):
+ * a) y = x - k*ln(2); k = rint(x/ln(2)); y \in [0, ln(2)]
+ * 4) Compute exp(y) = P/Q, ratio of 2 polynomials P and Q
+ * b) P = 5th order and Q = 2nd order polynomials obtained from Remez's
+ * algorithm (mini-max polynomial approximation)
+ * 5) Compute exp(x) = exp(y) * 2^k
+ * 6) Max ULP error measured across all 32-bit FP's = 2.52 (x = 0xc2781e37)
+ * 7) Max relative error measured across all 32-bit FP's= 2.1264E-07 (for the
+ * same x = 0xc2781e37)
+ */
+
+static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_exp_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_float xmax = 88.72283935546875f;
+ npy_float xmin = -103.97208404541015625f;
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+
+ /* Load up frequently used constants */
+ @vtype@ codyw_c1 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_HIGHf);
+ @vtype@ codyw_c2 = _mm@vsize@_set1_ps(NPY_CODY_WAITE_LOGE_2_LOWf);
+ @vtype@ exp_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_EXPf);
+ @vtype@ exp_p1 = _mm@vsize@_set1_ps(NPY_COEFF_P1_EXPf);
+ @vtype@ exp_p2 = _mm@vsize@_set1_ps(NPY_COEFF_P2_EXPf);
+ @vtype@ exp_p3 = _mm@vsize@_set1_ps(NPY_COEFF_P3_EXPf);
+ @vtype@ exp_p4 = _mm@vsize@_set1_ps(NPY_COEFF_P4_EXPf);
+ @vtype@ exp_p5 = _mm@vsize@_set1_ps(NPY_COEFF_P5_EXPf);
+ @vtype@ exp_q0 = _mm@vsize@_set1_ps(NPY_COEFF_Q0_EXPf);
+ @vtype@ exp_q1 = _mm@vsize@_set1_ps(NPY_COEFF_Q1_EXPf);
+ @vtype@ exp_q2 = _mm@vsize@_set1_ps(NPY_COEFF_Q2_EXPf);
+ @vtype@ cvt_magic = _mm@vsize@_set1_ps(NPY_RINT_CVT_MAGICf);
+ @vtype@ log2e = _mm@vsize@_set1_ps(NPY_LOG2Ef);
+ @vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF);
+ @vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f);
+ @vtype@ poly, num_poly, denom_poly, quadrant;
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)&indexarr[0]);
+
+ @mask@ xmax_mask, xmin_mask, nan_mask, inf_mask;
+ @mask@ overflow_mask = @isa@_get_partial_load_mask_ps(0, num_lanes);
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
+ npy_intp num_remaining_elements = array_size;
+
+ while (num_remaining_elements > 0) {
+
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
+ }
+
+ @vtype@ x;
+ if (stride == 1) {
+ x = @isa@_masked_load_ps(load_mask, ip);
+ }
+ else {
+ x = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask);
+ }
+
+ nan_mask = _mm@vsize@_cmp_ps@vsub@(x, x, _CMP_NEQ_UQ);
+ x = @isa@_set_masked_lanes_ps(x, zeros_f, nan_mask);
+
+ xmax_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmax), _CMP_GE_OQ);
+ xmin_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(xmin), _CMP_LE_OQ);
+ inf_mask = _mm@vsize@_cmp_ps@vsub@(x, inf, _CMP_EQ_OQ);
+ overflow_mask = @or_masks@(overflow_mask,
+ @xor_masks@(xmax_mask, inf_mask));
+
+ x = @isa@_set_masked_lanes_ps(x, zeros_f, @or_masks@(
+ @or_masks@(nan_mask, xmin_mask), xmax_mask));
+
+ quadrant = _mm@vsize@_mul_ps(x, log2e);
+
+ /* round to nearest */
+ quadrant = _mm@vsize@_add_ps(quadrant, cvt_magic);
+ quadrant = _mm@vsize@_sub_ps(quadrant, cvt_magic);
+
+ /* Cody-Waite's range reduction algorithm */
+ x = @isa@_range_reduction(x, quadrant, codyw_c1, codyw_c2, zeros_f);
+
+ num_poly = @fmadd@(exp_p5, x, exp_p4);
+ num_poly = @fmadd@(num_poly, x, exp_p3);
+ num_poly = @fmadd@(num_poly, x, exp_p2);
+ num_poly = @fmadd@(num_poly, x, exp_p1);
+ num_poly = @fmadd@(num_poly, x, exp_p0);
+ denom_poly = @fmadd@(exp_q2, x, exp_q1);
+ denom_poly = @fmadd@(denom_poly, x, exp_q0);
+ poly = _mm@vsize@_div_ps(num_poly, denom_poly);
+
+ /*
+ * compute val = poly * 2^quadrant; which is same as adding the
+ * exponent of quadrant to the exponent of poly. quadrant is an int,
+ * so extracting exponent is simply extracting 8 bits.
+ */
+ poly = @isa@_scalef_ps(poly, quadrant);
+
+ /*
+ * elem > xmax; return inf
+ * elem < xmin; return 0.0f
+ * elem = +/- nan, return nan
+ */
+ poly = @isa@_set_masked_lanes_ps(poly, _mm@vsize@_set1_ps(NPY_NANF), nan_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, inf, xmax_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, zeros_f, xmin_mask);
+
+ @masked_store@(op, @cvtps_epi32@(load_mask), poly);
+
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+
+ if (@mask_to_int@(overflow_mask)) {
+ npy_set_floatstatus_overflow();
+ }
+}
+
+/*
+ * Vectorized implementation of log using AVX2 and AVX512
+ * 1) if x < 0.0f; return -NAN (invalid input)
+ * 2) Range reduction: y = x/2^k;
+ * a) y = normalized mantissa, k is the exponent (0.5 <= y < 1)
+ * 3) Compute log(y) = P/Q, ratio of 2 polynomials P and Q
+ * b) P = 5th order and Q = 5th order polynomials obtained from Remez's
+ * algorithm (mini-max polynomial approximation)
+ * 5) Compute log(x) = log(y) + k*ln(2)
+ * 6) Max ULP error measured across all 32-bit FP's = 3.83 (x = 0x3f486945)
+ * 7) Max relative error measured across all 32-bit FP's = 2.359E-07 (for same
+ * x = 0x3f486945)
+ */
+
+static NPY_GCC_OPT_3 NPY_GCC_TARGET_@ISA@ void
+@ISA@_log_FLOAT(npy_float * op,
+ npy_float * ip,
+ const npy_intp array_size,
+ const npy_intp steps)
+{
+ const npy_intp stride = steps/sizeof(npy_float);
+ const npy_int num_lanes = @BYTES@/sizeof(npy_float);
+ npy_int indexarr[16];
+ for (npy_int ii = 0; ii < 16; ii++) {
+ indexarr[ii] = ii*stride;
+ }
+
+ /* Load up frequently used constants */
+ @vtype@ log_p0 = _mm@vsize@_set1_ps(NPY_COEFF_P0_LOGf);
+ @vtype@ log_p1 = _mm@vsize@_set1_ps(NPY_COEFF_P1_LOGf);
+ @vtype@ log_p2 = _mm@vsize@_set1_ps(NPY_COEFF_P2_LOGf);
+ @vtype@ log_p3 = _mm@vsize@_set1_ps(NPY_COEFF_P3_LOGf);
+ @vtype@ log_p4 = _mm@vsize@_set1_ps(NPY_COEFF_P4_LOGf);
+ @vtype@ log_p5 = _mm@vsize@_set1_ps(NPY_COEFF_P5_LOGf);
+ @vtype@ log_q0 = _mm@vsize@_set1_ps(NPY_COEFF_Q0_LOGf);
+ @vtype@ log_q1 = _mm@vsize@_set1_ps(NPY_COEFF_Q1_LOGf);
+ @vtype@ log_q2 = _mm@vsize@_set1_ps(NPY_COEFF_Q2_LOGf);
+ @vtype@ log_q3 = _mm@vsize@_set1_ps(NPY_COEFF_Q3_LOGf);
+ @vtype@ log_q4 = _mm@vsize@_set1_ps(NPY_COEFF_Q4_LOGf);
+ @vtype@ log_q5 = _mm@vsize@_set1_ps(NPY_COEFF_Q5_LOGf);
+ @vtype@ loge2 = _mm@vsize@_set1_ps(NPY_LOGE2f);
+ @vtype@ nan = _mm@vsize@_set1_ps(NPY_NANF);
+ @vtype@ neg_nan = _mm@vsize@_set1_ps(-NPY_NANF);
+ @vtype@ neg_inf = _mm@vsize@_set1_ps(-NPY_INFINITYF);
+ @vtype@ inf = _mm@vsize@_set1_ps(NPY_INFINITYF);
+ @vtype@ zeros_f = _mm@vsize@_set1_ps(0.0f);
+ @vtype@ ones_f = _mm@vsize@_set1_ps(1.0f);
+ @vtype@i vindex = _mm@vsize@_loadu_si@vsize@((@vtype@i*)indexarr);
+ @vtype@ poly, num_poly, denom_poly, exponent;
+
+ @mask@ inf_mask, nan_mask, sqrt2_mask, zero_mask, negx_mask;
+ @mask@ invalid_mask = @isa@_get_partial_load_mask_ps(0, num_lanes);
+ @mask@ divide_by_zero_mask = invalid_mask;
+ @mask@ load_mask = @isa@_get_full_load_mask_ps();
+ npy_intp num_remaining_elements = array_size;
+
+ while (num_remaining_elements > 0) {
+
+ if (num_remaining_elements < num_lanes) {
+ load_mask = @isa@_get_partial_load_mask_ps(num_remaining_elements,
+ num_lanes);
+ }
+
+ @vtype@ x_in;
+ if (stride == 1) {
+ x_in = @isa@_masked_load_ps(load_mask, ip);
+ }
+ else {
+ x_in = @isa@_masked_gather_ps(zeros_f, ip, vindex, load_mask);
+ }
+
+ negx_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_LT_OQ);
+ zero_mask = _mm@vsize@_cmp_ps@vsub@(x_in, zeros_f, _CMP_EQ_OQ);
+ inf_mask = _mm@vsize@_cmp_ps@vsub@(x_in, inf, _CMP_EQ_OQ);
+ nan_mask = _mm@vsize@_cmp_ps@vsub@(x_in, x_in, _CMP_NEQ_UQ);
+ divide_by_zero_mask = @or_masks@(divide_by_zero_mask,
+ @and_masks@(zero_mask, load_mask));
+ invalid_mask = @or_masks@(invalid_mask, negx_mask);
+
+ @vtype@ x = @isa@_set_masked_lanes_ps(x_in, zeros_f, negx_mask);
+
+ /* set x = normalized mantissa */
+ exponent = @isa@_get_exponent(x);
+ x = @isa@_get_mantissa(x);
+
+ /* if x < sqrt(2) {exp = exp-1; x = 2*x} */
+ sqrt2_mask = _mm@vsize@_cmp_ps@vsub@(x, _mm@vsize@_set1_ps(NPY_SQRT1_2f), _CMP_LE_OQ);
+ x = @isa@_blend(x, _mm@vsize@_add_ps(x,x), sqrt2_mask);
+ exponent = @isa@_blend(exponent,
+ _mm@vsize@_sub_ps(exponent,ones_f), sqrt2_mask);
+
+ /* x = x - 1 */
+ x = _mm@vsize@_sub_ps(x, ones_f);
+
+ /* Polynomial approximation for log(1+x) */
+ num_poly = @fmadd@(log_p5, x, log_p4);
+ num_poly = @fmadd@(num_poly, x, log_p3);
+ num_poly = @fmadd@(num_poly, x, log_p2);
+ num_poly = @fmadd@(num_poly, x, log_p1);
+ num_poly = @fmadd@(num_poly, x, log_p0);
+ denom_poly = @fmadd@(log_q5, x, log_q4);
+ denom_poly = @fmadd@(denom_poly, x, log_q3);
+ denom_poly = @fmadd@(denom_poly, x, log_q2);
+ denom_poly = @fmadd@(denom_poly, x, log_q1);
+ denom_poly = @fmadd@(denom_poly, x, log_q0);
+ poly = _mm@vsize@_div_ps(num_poly, denom_poly);
+ poly = @fmadd@(exponent, loge2, poly);
+
+ /*
+ * x < 0.0f; return -NAN
+ * x = +/- NAN; return NAN
+ * x = 0.0f; return -INF
+ */
+ poly = @isa@_set_masked_lanes_ps(poly, nan, nan_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, neg_nan, negx_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, neg_inf, zero_mask);
+ poly = @isa@_set_masked_lanes_ps(poly, inf, inf_mask);
+
+ @masked_store@(op, @cvtps_epi32@(load_mask), poly);
+
+ ip += num_lanes*stride;
+ op += num_lanes;
+ num_remaining_elements -= num_lanes;
+ }
+
+ if (@mask_to_int@(invalid_mask)) {
+ npy_set_floatstatus_invalid();
+ }
+ if (@mask_to_int@(divide_by_zero_mask)) {
+ npy_set_floatstatus_divbyzero();
+ }
+}
+#endif
/**end repeat**/
/*
@@ -1099,9 +2298,9 @@ static NPY_INLINE @vtype@ byte_to_true(@vtype@ v)
static void
sse2_binary_@kind@_BOOL(npy_bool * op, npy_bool * ip1, npy_bool * ip2, npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = ip1[i] @op@ ip2[i];
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vloadu@((@vtype@*)&ip1[i]);
@vtype@ b = @vloadu@((@vtype@*)&ip2[i]);
#if @and@
@@ -1126,16 +2325,16 @@ static void
sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, const npy_intp n)
{
const @vtype@ zero = @vpre@_setzero_@vsuf@();
- LOOP_BLOCK_ALIGN_VAR(ip, npy_bool, 16) {
+ LOOP_BLOCK_ALIGN_VAR(ip, npy_bool, VECTOR_SIZE_BYTES) {
*op = *op @op@ ip[i];
if (*op @sc@ 0) {
return;
}
}
/* unrolled once to replace a slow movmsk with a fast pmaxb */
- LOOP_BLOCKED(npy_bool, 32) {
+ LOOP_BLOCKED(npy_bool, 2 * VECTOR_SIZE_BYTES) {
@vtype@ v = @vload@((@vtype@*)&ip[i]);
- @vtype@ v2 = @vload@((@vtype@*)&ip[i + 16]);
+ @vtype@ v2 = @vload@((@vtype@*)&ip[i + VECTOR_SIZE_BYTES]);
v = @vpre@_cmpeq_epi8(v, zero);
v2 = @vpre@_cmpeq_epi8(v2, zero);
#if @and@
@@ -1173,9 +2372,9 @@ sse2_reduce_@kind@_BOOL(npy_bool * op, npy_bool * ip, const npy_intp n)
static void
sse2_@kind@_BOOL(@type@ * op, @type@ * ip, const npy_intp n)
{
- LOOP_BLOCK_ALIGN_VAR(op, @type@, 16)
+ LOOP_BLOCK_ALIGN_VAR(op, @type@, VECTOR_SIZE_BYTES)
op[i] = (ip[i] @op@ 0);
- LOOP_BLOCKED(@type@, 16) {
+ LOOP_BLOCKED(@type@, VECTOR_SIZE_BYTES) {
@vtype@ a = @vloadu@((@vtype@*)&ip[i]);
#if @not@
const @vtype@ zero = @vpre@_setzero_@vsuf@();
@@ -1196,6 +2395,8 @@ sse2_@kind@_BOOL(@type@ * op, @type@ * ip, const npy_intp n)
/**end repeat**/
+#undef VECTOR_SIZE_BYTES
+
#endif /* NPY_HAVE_SSE2_INTRINSICS */
#endif
diff --git a/numpy/core/src/umath/ufunc_object.c b/numpy/core/src/umath/ufunc_object.c
index 459b0a594..e4ad3dc84 100644
--- a/numpy/core/src/umath/ufunc_object.c
+++ b/numpy/core/src/umath/ufunc_object.c
@@ -46,6 +46,7 @@
#include "npy_import.h"
#include "extobj.h"
#include "common.h"
+#include "numpyos.h"
/********** PRINTF DEBUG TRACING **************/
#define NPY_UF_DBG_TRACING 0
@@ -307,6 +308,78 @@ _find_array_prepare(ufunc_full_args args,
return;
}
+#define NPY_UFUNC_DEFAULT_INPUT_FLAGS \
+ NPY_ITER_READONLY | \
+ NPY_ITER_ALIGNED | \
+ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+#define NPY_UFUNC_DEFAULT_OUTPUT_FLAGS \
+ NPY_ITER_ALIGNED | \
+ NPY_ITER_ALLOCATE | \
+ NPY_ITER_NO_BROADCAST | \
+ NPY_ITER_NO_SUBTYPE | \
+ NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE
+
+/* Called at module initialization to set the matmul ufunc output flags */
+NPY_NO_EXPORT int
+set_matmul_flags(PyObject *d)
+{
+ PyObject *matmul = PyDict_GetItemString(d, "matmul");
+ if (matmul == NULL) {
+ return -1;
+ }
+ /*
+ * The default output flag NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE allows
+ * perfectly overlapping input and output (in-place operations). While
+ * correct for the common mathematical operations, this assumption is
+ * incorrect in the general case and specifically in the case of matmul.
+ *
+ * NPY_ITER_UPDATEIFCOPY is added by default in
+ * PyUFunc_GeneralizedFunction, which is the variant called for gufuncs
+ * with a signature
+ *
+ * Enabling NPY_ITER_WRITEONLY can prevent a copy in some cases.
+ */
+ ((PyUFuncObject *)matmul)->op_flags[2] = (NPY_ITER_WRITEONLY |
+ NPY_ITER_UPDATEIFCOPY |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS) &
+ ~NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
+ return 0;
+}
+
+
+/*
+ * Set per-operand flags according to desired input or output flags.
+ * op_flags[i] for i in input (as determined by ufunc->nin) will be
+ * merged with op_in_flags, perhaps overriding per-operand flags set
+ * in previous stages.
+ * op_flags[i] for i in output will be set to op_out_flags only if previously
+ * unset.
+ * The input flag behavior preserves backward compatibility, while the
+ * output flag behaviour is the "correct" one for maximum flexibility.
+ */
+NPY_NO_EXPORT void
+_ufunc_setup_flags(PyUFuncObject *ufunc, npy_uint32 op_in_flags,
+ npy_uint32 op_out_flags, npy_uint32 *op_flags)
+{
+ int nin = ufunc->nin;
+ int nout = ufunc->nout;
+ int nop = nin + nout, i;
+ /* Set up the flags */
+ for (i = 0; i < nin; ++i) {
+ op_flags[i] = ufunc->op_flags[i] | op_in_flags;
+ /*
+ * If READWRITE flag has been set for this operand,
+ * then clear default READONLY flag
+ */
+ if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
+ op_flags[i] &= ~NPY_ITER_READONLY;
+ }
+ }
+ for (i = nin; i < nop; ++i) {
+ op_flags[i] = ufunc->op_flags[i] ? ufunc->op_flags[i] : op_out_flags;
+ }
+}
/*
* This function analyzes the input arguments
@@ -480,7 +553,27 @@ _is_alnum_underscore(char ch)
}
/*
- * Return the ending position of a variable name
+ * Convert a string into a number
+ */
+static npy_intp
+_get_size(const char* str)
+{
+ char *stop;
+ npy_longlong size = NumPyOS_strtoll(str, &stop, 10);
+
+ if (stop == str || _is_alpha_underscore(*stop)) {
+ /* not a well formed number */
+ return -1;
+ }
+ if (size >= NPY_MAX_INTP || size <= NPY_MIN_INTP) {
+ /* len(str) too long */
+ return -1;
+ }
+ return size;
+}
+
+/*
+ * Return the ending position of a variable name including optional modifier
*/
static int
_get_end_of_name(const char* str, int offset)
@@ -489,6 +582,9 @@ _get_end_of_name(const char* str, int offset)
while (_is_alnum_underscore(str[ret])) {
ret++;
}
+ if (str[ret] == '?') {
+ ret ++;
+ }
return ret;
}
@@ -530,7 +626,6 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
"_parse_signature with NULL signature");
return -1;
}
-
len = strlen(signature);
ufunc->core_signature = PyArray_malloc(sizeof(char) * (len+1));
if (ufunc->core_signature) {
@@ -546,13 +641,22 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
ufunc->core_enabled = 1;
ufunc->core_num_dim_ix = 0;
ufunc->core_num_dims = PyArray_malloc(sizeof(int) * ufunc->nargs);
- ufunc->core_dim_ixs = PyArray_malloc(sizeof(int) * len); /* shrink this later */
ufunc->core_offsets = PyArray_malloc(sizeof(int) * ufunc->nargs);
- if (ufunc->core_num_dims == NULL || ufunc->core_dim_ixs == NULL
- || ufunc->core_offsets == NULL) {
+ /* The next three items will be shrunk later */
+ ufunc->core_dim_ixs = PyArray_malloc(sizeof(int) * len);
+ ufunc->core_dim_sizes = PyArray_malloc(sizeof(npy_intp) * len);
+ ufunc->core_dim_flags = PyArray_malloc(sizeof(npy_uint32) * len);
+
+ if (ufunc->core_num_dims == NULL || ufunc->core_dim_ixs == NULL ||
+ ufunc->core_offsets == NULL ||
+ ufunc->core_dim_sizes == NULL ||
+ ufunc->core_dim_flags == NULL) {
PyErr_NoMemory();
goto fail;
}
+ for (size_t j = 0; j < len; j++) {
+ ufunc->core_dim_flags[j] = 0;
+ }
i = _next_non_white_space(signature, 0);
while (signature[i] != '\0') {
@@ -577,26 +681,70 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
i = _next_non_white_space(signature, i + 1);
while (signature[i] != ')') {
/* loop over core dimensions */
- int j = 0;
- if (!_is_alpha_underscore(signature[i])) {
- parse_error = "expect dimension name";
+ int ix, i_end;
+ npy_intp frozen_size;
+ npy_bool can_ignore;
+
+ if (signature[i] == '\0') {
+ parse_error = "unexpected end of signature string";
goto fail;
}
- while (j < ufunc->core_num_dim_ix) {
- if (_is_same_name(signature+i, var_names[j])) {
+ /*
+ * Is this a variable or a fixed size dimension?
+ */
+ if (_is_alpha_underscore(signature[i])) {
+ frozen_size = -1;
+ }
+ else {
+ frozen_size = (npy_intp)_get_size(signature + i);
+ if (frozen_size <= 0) {
+ parse_error = "expect dimension name or non-zero frozen size";
+ goto fail;
+ }
+ }
+ /* Is this dimension flexible? */
+ i_end = _get_end_of_name(signature, i);
+ can_ignore = (i_end > 0 && signature[i_end - 1] == '?');
+ /*
+ * Determine whether we already saw this dimension name,
+ * get its index, and set its properties
+ */
+ for(ix = 0; ix < ufunc->core_num_dim_ix; ix++) {
+ if (frozen_size > 0 ?
+ frozen_size == ufunc->core_dim_sizes[ix] :
+ _is_same_name(signature + i, var_names[ix])) {
break;
}
- j++;
}
- if (j >= ufunc->core_num_dim_ix) {
- var_names[j] = signature+i;
+ /*
+ * If a new dimension, store its properties; if old, check consistency.
+ */
+ if (ix == ufunc->core_num_dim_ix) {
ufunc->core_num_dim_ix++;
+ var_names[ix] = signature + i;
+ ufunc->core_dim_sizes[ix] = frozen_size;
+ if (frozen_size < 0) {
+ ufunc->core_dim_flags[ix] |= UFUNC_CORE_DIM_SIZE_INFERRED;
+ }
+ if (can_ignore) {
+ ufunc->core_dim_flags[ix] |= UFUNC_CORE_DIM_CAN_IGNORE;
+ }
+ } else {
+ if (can_ignore && !(ufunc->core_dim_flags[ix] &
+ UFUNC_CORE_DIM_CAN_IGNORE)) {
+ parse_error = "? cannot be used, name already seen without ?";
+ goto fail;
+ }
+ if (!can_ignore && (ufunc->core_dim_flags[ix] &
+ UFUNC_CORE_DIM_CAN_IGNORE)) {
+ parse_error = "? must be used, name already seen with ?";
+ goto fail;
+ }
}
- ufunc->core_dim_ixs[cur_core_dim] = j;
+ ufunc->core_dim_ixs[cur_core_dim] = ix;
cur_core_dim++;
nd++;
- i = _get_end_of_name(signature, i);
- i = _next_non_white_space(signature, i);
+ i = _next_non_white_space(signature, i_end);
if (signature[i] != ',' && signature[i] != ')') {
parse_error = "expect ',' or ')'";
goto fail;
@@ -633,7 +781,14 @@ _parse_signature(PyUFuncObject *ufunc, const char *signature)
goto fail;
}
ufunc->core_dim_ixs = PyArray_realloc(ufunc->core_dim_ixs,
- sizeof(int)*cur_core_dim);
+ sizeof(int) * cur_core_dim);
+ ufunc->core_dim_sizes = PyArray_realloc(
+ ufunc->core_dim_sizes,
+ sizeof(npy_intp) * ufunc->core_num_dim_ix);
+ ufunc->core_dim_flags = PyArray_realloc(
+ ufunc->core_dim_flags,
+ sizeof(npy_uint32) * ufunc->core_num_dim_ix);
+
/* check for trivial core-signature, e.g. "(),()->()" */
if (cur_core_dim == 0) {
ufunc->core_enabled = 0;
@@ -664,7 +819,7 @@ _set_out_array(PyObject *obj, PyArrayObject **store)
/* Translate None to NULL */
return 0;
}
- if PyArray_Check(obj) {
+ if (PyArray_Check(obj)) {
/* If it's an array, store it */
if (PyArray_FailUnlessWriteable((PyArrayObject *)obj,
"output array") < 0) {
@@ -753,7 +908,7 @@ parse_ufunc_keywords(PyUFuncObject *ufunc, PyObject *kwds, PyObject **kwnames, .
typedef int converter(PyObject *, void *);
while (PyDict_Next(kwds, &pos, &key, &value)) {
- int i;
+ npy_intp i;
converter *convert;
void *output = NULL;
npy_intp index = locate_key(kwnames, key);
@@ -1038,34 +1193,11 @@ get_ufunc_arguments(PyUFuncObject *ufunc,
}
}
else {
- /*
- * If the deprecated behavior is ever removed,
- * keep only the else branch of this if-else
- */
- if (PyArray_Check(out_kwd) || out_kwd == Py_None) {
- if (DEPRECATE("passing a single array to the "
- "'out' keyword argument of a "
- "ufunc with\n"
- "more than one output will "
- "result in an error in the "
- "future") < 0) {
- /* The future error message */
- PyErr_SetString(PyExc_TypeError,
- "'out' must be a tuple of arrays");
- goto fail;
- }
- if (_set_out_array(out_kwd, out_op+nin) < 0) {
- goto fail;
- }
- }
- else {
- PyErr_SetString(PyExc_TypeError,
- nout > 1 ? "'out' must be a tuple "
- "of arrays" :
- "'out' must be an array or a "
- "tuple of a single array");
- goto fail;
- }
+ PyErr_SetString(PyExc_TypeError,
+ nout > 1 ? "'out' must be a tuple of arrays" :
+ "'out' must be an array or a tuple with "
+ "a single array");
+ goto fail;
}
}
/*
@@ -1311,11 +1443,11 @@ iterator_loop(PyUFuncObject *ufunc,
PyObject **arr_prep,
ufunc_full_args full_args,
PyUFuncGenericFunction innerloop,
- void *innerloopdata)
+ void *innerloopdata,
+ npy_uint32 *op_flags)
{
npy_intp i, nin = ufunc->nin, nout = ufunc->nout;
npy_intp nop = nin + nout;
- npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
char *baseptrs[NPY_MAXARGS];
@@ -1329,29 +1461,6 @@ iterator_loop(PyUFuncObject *ufunc,
NPY_BEGIN_THREADS_DEF;
- /* Set up the flags */
- for (i = 0; i < nin; ++i) {
- op_flags[i] = NPY_ITER_READONLY |
- NPY_ITER_ALIGNED |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- /*
- * If READWRITE flag has been set for this operand,
- * then clear default READONLY flag
- */
- op_flags[i] |= ufunc->op_flags[i];
- if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
- op_flags[i] &= ~NPY_ITER_READONLY;
- }
- }
- for (i = nin; i < nop; ++i) {
- op_flags[i] = NPY_ITER_WRITEONLY |
- NPY_ITER_ALIGNED |
- NPY_ITER_ALLOCATE |
- NPY_ITER_NO_BROADCAST |
- NPY_ITER_NO_SUBTYPE |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- }
-
iter_flags = ufunc->iter_flags |
NPY_ITER_EXTERNAL_LOOP |
NPY_ITER_REFS_OK |
@@ -1455,15 +1564,15 @@ iterator_loop(PyUFuncObject *ufunc,
}
/*
+ * ufunc - the ufunc to call
* trivial_loop_ok - 1 if no alignment, data conversion, etc required
- * nin - number of inputs
- * nout - number of outputs
- * op - the operands (nin + nout of them)
+ * op - the operands (ufunc->nin + ufunc->nout of them)
+ * dtypes - the dtype of each operand
* order - the loop execution order/output memory order
* buffersize - how big of a buffer to use
* arr_prep - the __array_prepare__ functions for the outputs
- * innerloop - the inner loop function
- * innerloopdata - data to pass to the inner loop
+ * full_args - the original input, output PyObject *
+ * op_flags - per-operand flags, a combination of NPY_ITER_* constants
*/
static int
execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
@@ -1473,7 +1582,8 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- ufunc_full_args full_args)
+ ufunc_full_args full_args,
+ npy_uint32 *op_flags)
{
npy_intp nin = ufunc->nin, nout = ufunc->nout;
PyUFuncGenericFunction innerloop;
@@ -1608,7 +1718,7 @@ execute_legacy_ufunc_loop(PyUFuncObject *ufunc,
NPY_UF_DBG_PRINT("iterator loop\n");
if (iterator_loop(ufunc, op, dtypes, order,
buffersize, arr_prep, full_args,
- innerloop, innerloopdata) < 0) {
+ innerloop, innerloopdata, op_flags) < 0) {
return -1;
}
@@ -1634,14 +1744,13 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
NPY_ORDER order,
npy_intp buffersize,
PyObject **arr_prep,
- ufunc_full_args full_args)
+ ufunc_full_args full_args,
+ npy_uint32 *op_flags)
{
int i, nin = ufunc->nin, nout = ufunc->nout;
int nop = nin + nout;
- npy_uint32 op_flags[NPY_MAXARGS];
NpyIter *iter;
int needs_api;
- npy_intp default_op_in_flags = 0, default_op_out_flags = 0;
NpyIter_IterNextFunc *iternext;
char **dataptr;
@@ -1651,48 +1760,10 @@ execute_fancy_ufunc_loop(PyUFuncObject *ufunc,
PyArrayObject **op_it;
npy_uint32 iter_flags;
- if (wheremask != NULL) {
- if (nop + 1 > NPY_MAXARGS) {
- PyErr_SetString(PyExc_ValueError,
- "Too many operands when including where= parameter");
- return -1;
- }
- op[nop] = wheremask;
- dtypes[nop] = NULL;
- default_op_out_flags |= NPY_ITER_WRITEMASKED;
- }
-
- /* Set up the flags */
- for (i = 0; i < nin; ++i) {
- op_flags[i] = default_op_in_flags |
- NPY_ITER_READONLY |
- NPY_ITER_ALIGNED |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- /*
- * If READWRITE flag has been set for this operand,
- * then clear default READONLY flag
- */
- op_flags[i] |= ufunc->op_flags[i];
- if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
- op_flags[i] &= ~NPY_ITER_READONLY;
- }
- }
for (i = nin; i < nop; ++i) {
- /*
- * We don't write to all elements, and the iterator may make
- * UPDATEIFCOPY temporary copies. The output arrays (unless they are
- * allocated by the iterator itself) must be considered READWRITE by the
- * iterator, so that the elements we don't write to are copied to the
- * possible temporary array.
- */
- op_flags[i] = default_op_out_flags |
- (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY) |
- NPY_ITER_ALIGNED |
- NPY_ITER_ALLOCATE |
- NPY_ITER_NO_BROADCAST |
- NPY_ITER_NO_SUBTYPE |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
+ op_flags[i] |= (op[i] != NULL ? NPY_ITER_READWRITE : NPY_ITER_WRITEONLY);
}
+
if (wheremask != NULL) {
op_flags[nop] = NPY_ITER_READONLY | NPY_ITER_ARRAYMASK;
}
@@ -1935,6 +2006,72 @@ fail:
}
/*
+ * Validate that operands have enough dimensions, accounting for
+ * possible flexible dimensions that may be absent.
+ */
+static int
+_validate_num_dims(PyUFuncObject *ufunc, PyArrayObject **op,
+ npy_uint32 *core_dim_flags,
+ int *op_core_num_dims) {
+ int i, j;
+ int nin = ufunc->nin;
+ int nop = ufunc->nargs;
+
+ for (i = 0; i < nop; i++) {
+ if (op[i] != NULL) {
+ int op_ndim = PyArray_NDIM(op[i]);
+
+ if (op_ndim < op_core_num_dims[i]) {
+ int core_offset = ufunc->core_offsets[i];
+ /* We've too few, but some dimensions might be flexible */
+ for (j = core_offset;
+ j < core_offset + ufunc->core_num_dims[i]; j++) {
+ int core_dim_index = ufunc->core_dim_ixs[j];
+ if ((core_dim_flags[core_dim_index] &
+ UFUNC_CORE_DIM_CAN_IGNORE)) {
+ int i1, j1, k;
+ /*
+ * Found a dimension that can be ignored. Flag that
+ * it is missing, and unflag that it can be ignored,
+ * since we are doing so already.
+ */
+ core_dim_flags[core_dim_index] |= UFUNC_CORE_DIM_MISSING;
+ core_dim_flags[core_dim_index] ^= UFUNC_CORE_DIM_CAN_IGNORE;
+ /*
+ * Reduce the number of core dimensions for all
+ * operands that use this one (including ours),
+ * and check whether we're now OK.
+ */
+ for (i1 = 0, k=0; i1 < nop; i1++) {
+ for (j1 = 0; j1 < ufunc->core_num_dims[i1]; j1++) {
+ if (ufunc->core_dim_ixs[k++] == core_dim_index) {
+ op_core_num_dims[i1]--;
+ }
+ }
+ }
+ if (op_ndim == op_core_num_dims[i]) {
+ break;
+ }
+ }
+ }
+ if (op_ndim < op_core_num_dims[i]) {
+ PyErr_Format(PyExc_ValueError,
+ "%s: %s operand %d does not have enough "
+ "dimensions (has %d, gufunc core with "
+ "signature %s requires %d)",
+ ufunc_get_name_cstr(ufunc),
+ i < nin ? "Input" : "Output",
+ i < nin ? i : i - nin, PyArray_NDIM(op[i]),
+ ufunc->core_signature, op_core_num_dims[i]);
+ return -1;
+ }
+ }
+ }
+ }
+ return 0;
+}
+
+/*
* Check whether any of the outputs of a gufunc has core dimensions.
*/
static int
@@ -2007,7 +2144,7 @@ _check_keepdims_support(PyUFuncObject *ufunc) {
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
+_parse_axes_arg(PyUFuncObject *ufunc, int op_core_num_dims[], PyObject *axes,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nin = ufunc->nin;
int nop = ufunc->nargs;
@@ -2037,7 +2174,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
PyObject *op_axes_tuple, *axis_item;
int axis, op_axis;
- op_ncore = core_num_dims[iop];
+ op_ncore = op_core_num_dims[iop];
if (op[iop] != NULL) {
op_ndim = PyArray_NDIM(op[iop]);
op_nbroadcast = op_ndim - op_ncore;
@@ -2137,7 +2274,7 @@ _parse_axes_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axes,
* Returns 0 on success, and -1 on failure
*/
static int
-_parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
+_parse_axis_arg(PyUFuncObject *ufunc, const int core_num_dims[], PyObject *axis,
PyArrayObject **op, int broadcast_ndim, int **remap_axis) {
int nop = ufunc->nargs;
int iop, axis_int;
@@ -2191,57 +2328,72 @@ _parse_axis_arg(PyUFuncObject *ufunc, int core_num_dims[], PyObject *axis,
*
* Returns 0 on success, and -1 on failure
*
- * The behavior has been changed in NumPy 1.10.0, and the following
+ * The behavior has been changed in NumPy 1.16.0, and the following
* requirements must be fulfilled or an error will be raised:
* * Arguments, both input and output, must have at least as many
* dimensions as the corresponding number of core dimensions. In
- * previous versions, 1's were prepended to the shape as needed.
+ * versions before 1.10, 1's were prepended to the shape as needed.
* * Core dimensions with same labels must have exactly matching sizes.
- * In previous versions, core dimensions of size 1 would broadcast
+ * In versions before 1.10, core dimensions of size 1 would broadcast
* against other core dimensions with the same label.
* * All core dimensions must have their size specified by a passed in
- * input or output argument. In previous versions, core dimensions in
+ * input or output argument. In versions before 1.10, core dimensions in
* an output argument that were not specified in an input argument,
* and whose size could not be inferred from a passed in output
* argument, would have their size set to 1.
+ * * Core dimensions may be fixed, new in NumPy 1.16
*/
static int
_get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
- npy_intp* core_dim_sizes, int **remap_axis) {
+ const int *op_core_num_dims, npy_uint32 *core_dim_flags,
+ npy_intp *core_dim_sizes, int **remap_axis) {
int i;
int nin = ufunc->nin;
int nout = ufunc->nout;
int nop = nin + nout;
- for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
- core_dim_sizes[i] = -1;
- }
for (i = 0; i < nop; ++i) {
if (op[i] != NULL) {
int idim;
int dim_offset = ufunc->core_offsets[i];
- int num_dims = ufunc->core_num_dims[i];
- int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
+ int core_start_dim = PyArray_NDIM(op[i]) - op_core_num_dims[i];
+ int dim_delta = 0;
+
+ /* checked before this routine gets called */
+ assert(core_start_dim >= 0);
+
/*
* Make sure every core dimension exactly matches all other core
- * dimensions with the same label.
+ * dimensions with the same label. Note that flexible dimensions
+ * may have been removed at this point, if so, they are marked
+ * with UFUNC_CORE_DIM_MISSING.
*/
- for (idim = 0; idim < num_dims; ++idim) {
- int core_dim_index = ufunc->core_dim_ixs[dim_offset+idim];
- npy_intp op_dim_size = PyArray_DIM(
- op[i], REMAP_AXIS(i, core_start_dim+idim));
-
- if (core_dim_sizes[core_dim_index] == -1) {
+ for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
+ int core_index = dim_offset + idim;
+ int core_dim_index = ufunc->core_dim_ixs[core_index];
+ npy_intp core_dim_size = core_dim_sizes[core_dim_index];
+ npy_intp op_dim_size;
+
+ /* can only happen if flexible; dimension missing altogether */
+ if (core_dim_flags[core_dim_index] & UFUNC_CORE_DIM_MISSING) {
+ op_dim_size = 1;
+ dim_delta++; /* for indexing in dimensions */
+ }
+ else {
+ op_dim_size = PyArray_DIM(op[i],
+ REMAP_AXIS(i, core_start_dim + idim - dim_delta));
+ }
+ if (core_dim_sizes[core_dim_index] < 0) {
core_dim_sizes[core_dim_index] = op_dim_size;
}
- else if (op_dim_size != core_dim_sizes[core_dim_index]) {
+ else if (op_dim_size != core_dim_size) {
PyErr_Format(PyExc_ValueError,
"%s: %s operand %d has a mismatch in its "
"core dimension %d, with gufunc "
"signature %s (size %zd is different "
"from %zd)",
ufunc_get_name_cstr(ufunc), i < nin ? "Input" : "Output",
- i < nin ? i : i - nin, idim,
+ i < nin ? i : i - nin, idim - dim_delta,
ufunc->core_signature, op_dim_size,
core_dim_sizes[core_dim_index]);
return -1;
@@ -2253,39 +2405,29 @@ _get_coredim_sizes(PyUFuncObject *ufunc, PyArrayObject **op,
/*
* Make sure no core dimension is unspecified.
*/
- for (i = 0; i < ufunc->core_num_dim_ix; ++i) {
- if (core_dim_sizes[i] == -1) {
- break;
- }
- }
- if (i != ufunc->core_num_dim_ix) {
- /*
- * There is at least one core dimension missing, find in which
- * operand it comes up first (it has to be an output operand).
- */
- const int missing_core_dim = i;
- int out_op;
- for (out_op = nin; out_op < nop; ++out_op) {
- int first_idx = ufunc->core_offsets[out_op];
- int last_idx = first_idx + ufunc->core_num_dims[out_op];
- for (i = first_idx; i < last_idx; ++i) {
- if (ufunc->core_dim_ixs[i] == missing_core_dim) {
- break;
- }
- }
- if (i < last_idx) {
- /* Change index offsets for error message */
- out_op -= nin;
- i -= first_idx;
- break;
+ for (i = nin; i < nop; ++i) {
+ int idim;
+ int dim_offset = ufunc->core_offsets[i];
+
+ for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
+ int core_dim_index = ufunc->core_dim_ixs[dim_offset + idim];
+
+ /* check all cases where the size has not yet been set */
+ if (core_dim_sizes[core_dim_index] < 0) {
+ /*
+ * Oops, this dimension was never specified
+ * (can only happen if output op not given)
+ */
+ PyErr_Format(PyExc_ValueError,
+ "%s: Output operand %d has core dimension %d "
+ "unspecified, with gufunc signature %s",
+ ufunc_get_name_cstr(ufunc), i - nin, idim,
+ ufunc->core_signature);
+ return -1;
}
}
- PyErr_Format(PyExc_ValueError,
- "%s: Output operand %d has core dimension %d "
- "unspecified, with gufunc signature %s",
- ufunc_get_name_cstr(ufunc), out_op, i, ufunc->core_signature);
- return -1;
}
+
return 0;
}
@@ -2317,6 +2459,11 @@ _get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) {
*reorderable = 0;
Py_RETURN_NONE;
+ case PyUFunc_IdentityValue:
+ *reorderable = 1;
+ Py_INCREF(ufunc->identity_value);
+ return ufunc->identity_value;
+
default:
PyErr_Format(PyExc_ValueError,
"ufunc %s has an invalid identity", ufunc_get_name_cstr(ufunc));
@@ -2324,6 +2471,26 @@ _get_identity(PyUFuncObject *ufunc, npy_bool *reorderable) {
}
}
+/*
+ * Copy over parts of the ufunc structure that may need to be
+ * changed during execution. Returns 0 on success; -1 otherwise.
+ */
+static int
+_initialize_variable_parts(PyUFuncObject *ufunc,
+ int op_core_num_dims[],
+ npy_intp core_dim_sizes[],
+ npy_uint32 core_dim_flags[]) {
+ int i;
+
+ for (i = 0; i < ufunc->nargs; i++) {
+ op_core_num_dims[i] = ufunc->core_num_dims[i];
+ }
+ for (i = 0; i < ufunc->core_num_dim_ix; i++) {
+ core_dim_sizes[i] = ufunc->core_dim_sizes[i];
+ core_dim_flags[i] = ufunc->core_dim_flags[i];
+ }
+ return 0;
+}
static int
PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
@@ -2340,10 +2507,10 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Use remapped axes for generalized ufunc */
int broadcast_ndim, iter_ndim;
- int core_num_dims_array[NPY_MAXARGS];
- int *core_num_dims;
+ int op_core_num_dims[NPY_MAXARGS];
int op_axes_arrays[NPY_MAXARGS][NPY_MAXDIMS];
int *op_axes[NPY_MAXARGS];
+ npy_uint32 core_dim_flags[NPY_MAXARGS];
npy_uint32 op_flags[NPY_MAXARGS];
npy_intp iter_shape[NPY_MAXARGS];
@@ -2398,6 +2565,12 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
dtypes[i] = NULL;
arr_prep[i] = NULL;
}
+ /* Initialize possibly variable parts to the values from the ufunc */
+ retval = _initialize_variable_parts(ufunc, op_core_num_dims,
+ core_dim_sizes, core_dim_flags);
+ if (retval < 0) {
+ goto fail;
+ }
NPY_UF_DBG_PRINT("Getting arguments\n");
@@ -2429,41 +2602,28 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
}
/*
- * If keepdims is set and true, signal all dimensions will be the same.
+ * If keepdims is set and true, which means all input dimensions are
+ * the same, signal that all output dimensions will be the same too.
*/
if (keepdims == 1) {
- int num_dims = ufunc->core_num_dims[0];
- for (i = 0; i < nop; ++i) {
- core_num_dims_array[i] = num_dims;
+ int num_dims = op_core_num_dims[0];
+ for (i = nin; i < nop; ++i) {
+ op_core_num_dims[i] = num_dims;
}
- core_num_dims = core_num_dims_array;
}
else {
/* keepdims was not set or was false; no adjustment necessary */
- core_num_dims = ufunc->core_num_dims;
keepdims = 0;
}
/*
* Check that operands have the minimum dimensions required.
* (Just checks core; broadcast dimensions are tested by the iterator.)
*/
- for (i = 0; i < nop; i++) {
- if (op[i] != NULL && PyArray_NDIM(op[i]) < core_num_dims[i]) {
- PyErr_Format(PyExc_ValueError,
- "%s: %s operand %d does not have enough "
- "dimensions (has %d, gufunc core with "
- "signature %s requires %d)",
- ufunc_name,
- i < nin ? "Input" : "Output",
- i < nin ? i : i - nin,
- PyArray_NDIM(op[i]),
- ufunc->core_signature,
- core_num_dims[i]);
- retval = -1;
- goto fail;
- }
+ retval = _validate_num_dims(ufunc, op, core_dim_flags,
+ op_core_num_dims);
+ if (retval < 0) {
+ goto fail;
}
-
/*
* Figure out the number of iteration dimensions, which
* is the broadcast result of all the input non-core
@@ -2471,30 +2631,12 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
*/
broadcast_ndim = 0;
for (i = 0; i < nin; ++i) {
- int n = PyArray_NDIM(op[i]) - core_num_dims[i];
+ int n = PyArray_NDIM(op[i]) - op_core_num_dims[i];
if (n > broadcast_ndim) {
broadcast_ndim = n;
}
}
- /*
- * Figure out the number of iterator creation dimensions,
- * which is the broadcast dimensions + all the core dimensions of
- * the outputs, so that the iterator can allocate those output
- * dimensions following the rules of order='F', for example.
- */
- iter_ndim = broadcast_ndim;
- for (i = nin; i < nop; ++i) {
- iter_ndim += core_num_dims[i];
- }
- if (iter_ndim > NPY_MAXDIMS) {
- PyErr_Format(PyExc_ValueError,
- "too many dimensions for generalized ufunc %s",
- ufunc_name);
- retval = -1;
- goto fail;
- }
-
/* Possibly remap axes. */
if (axes != NULL || axis != NULL) {
remap_axis = PyArray_malloc(sizeof(remap_axis[0]) * nop);
@@ -2508,11 +2650,11 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
remap_axis[i] = remap_axis_memory + i * NPY_MAXDIMS;
}
if (axis) {
- retval = _parse_axis_arg(ufunc, core_num_dims, axis, op,
+ retval = _parse_axis_arg(ufunc, op_core_num_dims, axis, op,
broadcast_ndim, remap_axis);
}
else {
- retval = _parse_axes_arg(ufunc, core_num_dims, axes, op,
+ retval = _parse_axes_arg(ufunc, op_core_num_dims, axes, op,
broadcast_ndim, remap_axis);
}
if(retval < 0) {
@@ -2521,10 +2663,28 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
/* Collect the lengths of the labelled core dimensions */
- retval = _get_coredim_sizes(ufunc, op, core_dim_sizes, remap_axis);
+ retval = _get_coredim_sizes(ufunc, op, op_core_num_dims, core_dim_flags,
+ core_dim_sizes, remap_axis);
if(retval < 0) {
goto fail;
}
+ /*
+ * Figure out the number of iterator creation dimensions,
+ * which is the broadcast dimensions + all the core dimensions of
+ * the outputs, so that the iterator can allocate those output
+ * dimensions following the rules of order='F', for example.
+ */
+ iter_ndim = broadcast_ndim;
+ for (i = nin; i < nop; ++i) {
+ iter_ndim += op_core_num_dims[i];
+ }
+ if (iter_ndim > NPY_MAXDIMS) {
+ PyErr_Format(PyExc_ValueError,
+ "too many dimensions for generalized ufunc %s",
+ ufunc_name);
+ retval = -1;
+ goto fail;
+ }
/* Fill in the initial part of 'iter_shape' */
for (idim = 0; idim < broadcast_ndim; ++idim) {
@@ -2537,11 +2697,7 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
int n;
if (op[i]) {
- /*
- * Note that n may be negative if broadcasting
- * extends into the core dimensions.
- */
- n = PyArray_NDIM(op[i]) - core_num_dims[i];
+ n = PyArray_NDIM(op[i]) - op_core_num_dims[i];
}
else {
n = broadcast_ndim;
@@ -2565,24 +2721,49 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Except for when it belongs to this output */
if (i >= nin) {
int dim_offset = ufunc->core_offsets[i];
- int num_dims = core_num_dims[i];
+ int num_removed = 0;
/*
* Fill in 'iter_shape' and 'op_axes' for the core dimensions
* of this output. Here, we have to be careful: if keepdims
- * was used, then this axis is not a real core dimension,
- * but is being added back for broadcasting, so its size is 1.
+ * was used, then the axes are not real core dimensions, but
+ * are being added back for broadcasting, so their size is 1.
+ * If the axis was removed, we should skip altogether.
*/
- for (idim = 0; idim < num_dims; ++idim) {
- iter_shape[j] = keepdims ? 1 : core_dim_sizes[
- ufunc->core_dim_ixs[dim_offset + idim]];
- op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
- ++j;
+ if (keepdims) {
+ for (idim = 0; idim < op_core_num_dims[i]; ++idim) {
+ iter_shape[j] = 1;
+ op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim);
+ ++j;
+ }
+ }
+ else {
+ for (idim = 0; idim < ufunc->core_num_dims[i]; ++idim) {
+ int core_index = dim_offset + idim;
+ int core_dim_index = ufunc->core_dim_ixs[core_index];
+ if ((core_dim_flags[core_dim_index] &
+ UFUNC_CORE_DIM_MISSING)) {
+ /* skip it */
+ num_removed++;
+ continue;
+ }
+ iter_shape[j] = core_dim_sizes[ufunc->core_dim_ixs[core_index]];
+ op_axes_arrays[i][j] = REMAP_AXIS(i, n + idim - num_removed);
+ ++j;
+ }
}
}
op_axes[i] = op_axes_arrays[i];
}
+#if NPY_UF_DBG_TRACING
+ printf("iter shapes:");
+ for (j=0; j < iter_ndim; j++) {
+ printf(" %ld", iter_shape[j]);
+ }
+ printf("\n");
+#endif
+
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) {
retval = -1;
@@ -2597,6 +2778,18 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
if (retval < 0) {
goto fail;
}
+ /*
+ * We don't write to all elements, and the iterator may make
+ * UPDATEIFCOPY temporary copies. The output arrays (unless they are
+ * allocated by the iterator itself) must be considered READWRITE by the
+ * iterator, so that the elements we don't write to are copied to the
+ * possible temporary array.
+ */
+ _ufunc_setup_flags(ufunc, NPY_ITER_COPY | NPY_UFUNC_DEFAULT_INPUT_FLAGS,
+ NPY_ITER_UPDATEIFCOPY |
+ NPY_ITER_READWRITE |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS,
+ op_flags);
/* For the generalized ufunc, we get the loop right away too */
retval = ufunc->legacy_inner_loop_selector(ufunc, dtypes,
&innerloop, &innerloopdata, &needs_api);
@@ -2639,28 +2832,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
* Set up the iterator per-op flags. For generalized ufuncs, we
* can't do buffering, so must COPY or UPDATEIFCOPY.
*/
- for (i = 0; i < nin; ++i) {
- op_flags[i] = NPY_ITER_READONLY |
- NPY_ITER_COPY |
- NPY_ITER_ALIGNED |
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- /*
- * If READWRITE flag has been set for this operand,
- * then clear default READONLY flag
- */
- op_flags[i] |= ufunc->op_flags[i];
- if (op_flags[i] & (NPY_ITER_READWRITE | NPY_ITER_WRITEONLY)) {
- op_flags[i] &= ~NPY_ITER_READONLY;
- }
- }
- for (i = nin; i < nop; ++i) {
- op_flags[i] = NPY_ITER_READWRITE|
- NPY_ITER_UPDATEIFCOPY|
- NPY_ITER_ALIGNED|
- NPY_ITER_ALLOCATE|
- NPY_ITER_NO_BROADCAST|
- NPY_ITER_OVERLAP_ASSUME_ELEMENTWISE;
- }
iter_flags = ufunc->iter_flags |
NPY_ITER_MULTI_INDEX |
@@ -2680,13 +2851,15 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
}
/* Fill in any allocated outputs */
- for (i = nin; i < nop; ++i) {
- if (op[i] == NULL) {
- op[i] = NpyIter_GetOperandArray(iter)[i];
- Py_INCREF(op[i]);
+ {
+ PyArrayObject **operands = NpyIter_GetOperandArray(iter);
+ for (i = 0; i < nop; ++i) {
+ if (op[i] == NULL) {
+ op[i] = operands[i];
+ Py_INCREF(op[i]);
+ }
}
}
-
/*
* Set up the inner strides array. Because we're not doing
* buffering, the strides are fixed throughout the looping.
@@ -2705,8 +2878,6 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
/* Copy the strides after the first nop */
idim = nop;
for (i = 0; i < nop; ++i) {
- int num_dims = ufunc->core_num_dims[i];
- int core_start_dim = PyArray_NDIM(op[i]) - num_dims;
/*
* Need to use the arrays in the iterator, not op, because
* a copy with a different-sized type may have been made.
@@ -2714,20 +2885,31 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
PyArrayObject *arr = NpyIter_GetOperandArray(iter)[i];
npy_intp *shape = PyArray_SHAPE(arr);
npy_intp *strides = PyArray_STRIDES(arr);
- for (j = 0; j < num_dims; ++j) {
- if (core_start_dim + j >= 0) {
- /*
- * Force the stride to zero when the shape is 1, so
- * that the broadcasting works right.
- */
- int remapped_axis = REMAP_AXIS(i, core_start_dim + j);
+ /*
+ * Could be negative if flexible dims are used, but not for
+ * keepdims, since those dimensions are allocated in arr.
+ */
+ int core_start_dim = PyArray_NDIM(arr) - op_core_num_dims[i];
+ int num_removed = 0;
+ int dim_offset = ufunc->core_offsets[i];
+
+ for (j = 0; j < ufunc->core_num_dims[i]; ++j) {
+ int core_dim_index = ufunc->core_dim_ixs[dim_offset + j];
+ /*
+ * Force zero stride when the shape is 1 (always the case for
+ * for missing dimensions), so that broadcasting works right.
+ */
+ if (core_dim_flags[core_dim_index] & UFUNC_CORE_DIM_MISSING) {
+ num_removed++;
+ inner_strides[idim++] = 0;
+ }
+ else {
+ int remapped_axis = REMAP_AXIS(i, core_start_dim + j - num_removed);
if (shape[remapped_axis] != 1) {
inner_strides[idim++] = strides[remapped_axis];
} else {
inner_strides[idim++] = 0;
}
- } else {
- inner_strides[idim++] = 0;
}
}
}
@@ -2858,8 +3040,10 @@ PyUFunc_GeneralizedFunction(PyUFuncObject *ufunc,
Py_XDECREF(axis);
Py_XDECREF(full_args.in);
Py_XDECREF(full_args.out);
+ PyArray_free(remap_axis_memory);
+ PyArray_free(remap_axis);
- NPY_UF_DBG_PRINT1("Returning code %d\n", reval);
+ NPY_UF_DBG_PRINT1("Returning code %d\n", retval);
return retval;
@@ -2900,7 +3084,8 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
int i, nop;
const char *ufunc_name;
int retval = -1, subok = 1;
- int need_fancy = 0;
+ npy_uint32 op_flags[NPY_MAXARGS];
+ npy_intp default_op_out_flags;
PyArray_Descr *dtypes[NPY_MAXARGS];
@@ -2959,13 +3144,6 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
return retval;
}
- /*
- * Use the masked loop if a wheremask was specified.
- */
- if (wheremask != NULL) {
- need_fancy = 1;
- }
-
/* Get the buffersize and errormask */
if (_get_bufsize_errmask(extobj, ufunc_name, &buffersize, &errormask) < 0) {
retval = -1;
@@ -2980,16 +3158,20 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
goto fail;
}
- /* Only do the trivial loop check for the unmasked version. */
- if (!need_fancy) {
- /*
- * This checks whether a trivial loop is ok, making copies of
- * scalar and one dimensional operands if that will help.
- */
- trivial_loop_ok = check_for_trivial_loop(ufunc, op, dtypes, buffersize);
- if (trivial_loop_ok < 0) {
- goto fail;
- }
+ if (wheremask != NULL) {
+ /* Set up the flags. */
+ default_op_out_flags = NPY_ITER_NO_SUBTYPE |
+ NPY_ITER_WRITEMASKED |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS;
+ _ufunc_setup_flags(ufunc, NPY_UFUNC_DEFAULT_INPUT_FLAGS,
+ default_op_out_flags, op_flags);
+ }
+ else {
+ /* Set up the flags. */
+ default_op_out_flags = NPY_ITER_WRITEONLY |
+ NPY_UFUNC_DEFAULT_OUTPUT_FLAGS;
+ _ufunc_setup_flags(ufunc, NPY_UFUNC_DEFAULT_INPUT_FLAGS,
+ default_op_out_flags, op_flags);
}
#if NPY_UF_DBG_TRACING
@@ -3017,23 +3199,46 @@ PyUFunc_GenericFunction(PyUFuncObject *ufunc,
_find_array_prepare(full_args, arr_prep, nin, nout);
}
- /* Start with the floating-point exception flags cleared */
- npy_clear_floatstatus_barrier((char*)&ufunc);
/* Do the ufunc loop */
- if (need_fancy) {
+ if (wheremask != NULL) {
NPY_UF_DBG_PRINT("Executing fancy inner loop\n");
+ if (nop + 1 > NPY_MAXARGS) {
+ PyErr_SetString(PyExc_ValueError,
+ "Too many operands when including where= parameter");
+ return -1;
+ }
+ op[nop] = wheremask;
+ dtypes[nop] = NULL;
+
+ /* Set up the flags */
+
+ npy_clear_floatstatus_barrier((char*)&ufunc);
retval = execute_fancy_ufunc_loop(ufunc, wheremask,
op, dtypes, order,
- buffersize, arr_prep, full_args);
+ buffersize, arr_prep, full_args, op_flags);
}
else {
NPY_UF_DBG_PRINT("Executing legacy inner loop\n");
+ /*
+ * This checks whether a trivial loop is ok, making copies of
+ * scalar and one dimensional operands if that will help.
+ * Since it requires dtypes, it can only be called after
+ * ufunc->type_resolver
+ */
+ trivial_loop_ok = check_for_trivial_loop(ufunc, op, dtypes, buffersize);
+ if (trivial_loop_ok < 0) {
+ goto fail;
+ }
+
+ /* check_for_trivial_loop on half-floats can overflow */
+ npy_clear_floatstatus_barrier((char*)&ufunc);
+
retval = execute_legacy_ufunc_loop(ufunc, trivial_loop_ok,
op, dtypes, order,
- buffersize, arr_prep, full_args);
+ buffersize, arr_prep, full_args, op_flags);
}
if (retval < 0) {
goto fail;
@@ -3240,12 +3445,15 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp *strides,
PyUFuncObject *ufunc = (PyUFuncObject *)data;
char *dataptrs_copy[3];
npy_intp strides_copy[3];
+ npy_bool masked;
/* The normal selected inner loop */
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
NPY_BEGIN_THREADS_DEF;
+ /* Get the number of operands, to determine whether "where" is used */
+ masked = (NpyIter_GetNOp(iter) == 3);
/* Get the inner loop */
iter_dtypes = NpyIter_GetDescrArray(iter);
@@ -3305,8 +3513,36 @@ reduce_loop(NpyIter *iter, char **dataptrs, npy_intp *strides,
strides_copy[0] = strides[0];
strides_copy[1] = strides[1];
strides_copy[2] = strides[0];
- innerloop(dataptrs_copy, countptr,
- strides_copy, innerloopdata);
+
+ if (!masked) {
+ innerloop(dataptrs_copy, countptr,
+ strides_copy, innerloopdata);
+ }
+ else {
+ npy_intp count = *countptr;
+ char *maskptr = dataptrs[2];
+ npy_intp mask_stride = strides[2];
+ /* Optimization for when the mask is broadcast */
+ npy_intp n = mask_stride == 0 ? count : 1;
+ while (count) {
+ char mask = *maskptr;
+ maskptr += mask_stride;
+ while (n < count && mask == *maskptr) {
+ n++;
+ maskptr += mask_stride;
+ }
+ /* If mask set, apply inner loop on this contiguous region */
+ if (mask) {
+ innerloop(dataptrs_copy, &n,
+ strides_copy, innerloopdata);
+ }
+ dataptrs_copy[0] += n * strides[0];
+ dataptrs_copy[1] += n * strides[1];
+ dataptrs_copy[2] = dataptrs_copy[0];
+ count -= n;
+ n = 1;
+ }
+ }
} while (iternext(iter));
finish_loop:
@@ -3335,7 +3571,7 @@ finish_loop:
static PyArrayObject *
PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
int naxes, int *axes, PyArray_Descr *odtype, int keepdims,
- PyObject *initial)
+ PyObject *initial, PyArrayObject *wheremask)
{
int iaxes, ndim;
npy_bool reorderable;
@@ -3401,7 +3637,7 @@ PyUFunc_Reduce(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *out,
return NULL;
}
- result = PyUFunc_ReduceWrapper(arr, out, NULL, dtype, dtype,
+ result = PyUFunc_ReduceWrapper(arr, out, wheremask, dtype, dtype,
NPY_UNSAFE_CASTING,
axis_flags, reorderable,
keepdims, 0,
@@ -3794,14 +4030,14 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
int *op_axes[3] = {op_axes_arrays[0], op_axes_arrays[1],
op_axes_arrays[2]};
npy_uint32 op_flags[3];
- int i, idim, ndim, otype_final;
+ int idim, ndim, otype_final;
int need_outer_iterator = 0;
NpyIter *iter = NULL;
/* The reduceat indices - ind must be validated outside this call */
npy_intp *reduceat_ind;
- npy_intp ind_size, red_axis_size;
+ npy_intp i, ind_size, red_axis_size;
/* The selected inner loop */
PyUFuncGenericFunction innerloop = NULL;
void *innerloopdata = NULL;
@@ -3887,7 +4123,7 @@ PyUFunc_Reduceat(PyUFuncObject *ufunc, PyArrayObject *arr, PyArrayObject *ind,
#endif
/* Set up the op_axes for the outer loop */
- for (i = 0, idim = 0; idim < ndim; ++idim) {
+ for (idim = 0; idim < ndim; ++idim) {
/* Use the i-th iteration dimension to match up ind */
if (idim == axis) {
op_axes_arrays[0][idim] = axis;
@@ -4158,7 +4394,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
int i, naxes=0, ndim;
int axes[NPY_MAXDIMS];
PyObject *axes_in = NULL;
- PyArrayObject *mp = NULL, *ret = NULL;
+ PyArrayObject *mp = NULL, *wheremask = NULL, *ret = NULL;
PyObject *op;
PyObject *obj_ind, *context;
PyArrayObject *indices = NULL;
@@ -4167,7 +4403,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
int keepdims = 0;
PyObject *initial = NULL;
static char *reduce_kwlist[] = {
- "array", "axis", "dtype", "out", "keepdims", "initial", NULL};
+ "array", "axis", "dtype", "out", "keepdims", "initial", "where", NULL};
static char *accumulate_kwlist[] = {
"array", "axis", "dtype", "out", NULL};
static char *reduceat_kwlist[] = {
@@ -4230,22 +4466,23 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
}
else if (operation == UFUNC_ACCUMULATE) {
if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&:accumulate",
- accumulate_kwlist,
- &op,
- &axes_in,
- PyArray_DescrConverter2, &otype,
- PyArray_OutputConverter, &out)) {
+ accumulate_kwlist,
+ &op,
+ &axes_in,
+ PyArray_DescrConverter2, &otype,
+ PyArray_OutputConverter, &out)) {
goto fail;
}
}
else {
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&iO:reduce",
- reduce_kwlist,
- &op,
- &axes_in,
- PyArray_DescrConverter2, &otype,
- PyArray_OutputConverter, &out,
- &keepdims, &initial)) {
+ if (!PyArg_ParseTupleAndKeywords(args, kwds, "O|OO&O&iOO&:reduce",
+ reduce_kwlist,
+ &op,
+ &axes_in,
+ PyArray_DescrConverter2, &otype,
+ PyArray_OutputConverter, &out,
+ &keepdims, &initial,
+ _wheremask_converter, &wheremask)) {
goto fail;
}
}
@@ -4275,11 +4512,17 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
/* Convert the 'axis' parameter into a list of axes */
if (axes_in == NULL) {
- naxes = 1;
- axes[0] = 0;
+ /* apply defaults */
+ if (ndim == 0) {
+ naxes = 0;
+ }
+ else {
+ naxes = 1;
+ axes[0] = 0;
+ }
}
- /* Convert 'None' into all the axes */
else if (axes_in == Py_None) {
+ /* Convert 'None' into all the axes */
naxes = ndim;
for (i = 0; i < naxes; ++i) {
axes[i] = i;
@@ -4304,40 +4547,28 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
axes[i] = (int)axis;
}
}
- /* Try to interpret axis as an integer */
else {
+ /* Try to interpret axis as an integer */
int axis = PyArray_PyIntAsInt(axes_in);
/* TODO: PyNumber_Index would be good to use here */
if (error_converting(axis)) {
goto fail;
}
- /* Special case letting axis={0 or -1} slip through for scalars */
- if (ndim == 0 && (axis == 0 || axis == -1)) {
- axis = 0;
- }
- else if (check_and_adjust_axis(&axis, ndim) < 0) {
- goto fail;
- }
- axes[0] = (int)axis;
- naxes = 1;
- }
-
- /* Check to see if input is zero-dimensional. */
- if (ndim == 0) {
/*
- * A reduction with no axes is still valid but trivial.
* As a special case for backwards compatibility in 'sum',
- * 'prod', et al, also allow a reduction where axis=0, even
+ * 'prod', et al, also allow a reduction for scalars even
* though this is technically incorrect.
*/
- naxes = 0;
-
- if (!(operation == UFUNC_REDUCE &&
- (naxes == 0 || (naxes == 1 && axes[0] == 0)))) {
- PyErr_Format(PyExc_TypeError, "cannot %s on a scalar",
- _reduce_type[operation]);
+ if (ndim == 0 && (axis == 0 || axis == -1)) {
+ naxes = 0;
+ }
+ else if (check_and_adjust_axis(&axis, ndim) < 0) {
goto fail;
}
+ else {
+ axes[0] = (int)axis;
+ naxes = 1;
+ }
}
/*
@@ -4376,9 +4607,14 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
switch(operation) {
case UFUNC_REDUCE:
ret = PyUFunc_Reduce(ufunc, mp, out, naxes, axes,
- otype, keepdims, initial);
+ otype, keepdims, initial, wheremask);
+ Py_XDECREF(wheremask);
break;
case UFUNC_ACCUMULATE:
+ if (ndim == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot accumulate on a scalar");
+ goto fail;
+ }
if (naxes != 1) {
PyErr_SetString(PyExc_ValueError,
"accumulate does not allow multiple axes");
@@ -4388,6 +4624,10 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
otype->type_num);
break;
case UFUNC_REDUCEAT:
+ if (ndim == 0) {
+ PyErr_SetString(PyExc_TypeError, "cannot reduceat on a scalar");
+ goto fail;
+ }
if (naxes != 1) {
PyErr_SetString(PyExc_ValueError,
"reduceat does not allow multiple axes");
@@ -4434,6 +4674,7 @@ PyUFunc_GenericReduction(PyUFuncObject *ufunc, PyObject *args,
fail:
Py_XDECREF(otype);
Py_XDECREF(mp);
+ Py_XDECREF(wheremask);
return NULL;
}
@@ -4502,6 +4743,9 @@ ufunc_generic_call(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
wrapped = _apply_array_wrap(wraparr[i], mps[j], &context);
mps[j] = NULL; /* Prevent fail double-freeing this */
if (wrapped == NULL) {
+ for (j = 0; j < i; j++) {
+ Py_DECREF(retobj[j]);
+ }
goto fail;
}
@@ -4599,7 +4843,7 @@ ufunc_seterr(PyObject *NPY_UNUSED(dummy), PyObject *args)
NPY_NO_EXPORT int
PyUFunc_ReplaceLoopBySignature(PyUFuncObject *func,
PyUFuncGenericFunction newfunc,
- int *signature,
+ const int *signature,
PyUFuncGenericFunction *oldfunc)
{
int i, j;
@@ -4643,8 +4887,21 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
const char *name, const char *doc,
int unused, const char *signature)
{
- PyUFuncObject *ufunc;
+ return PyUFunc_FromFuncAndDataAndSignatureAndIdentity(
+ func, data, types, ntypes, nin, nout, identity, name, doc,
+ unused, signature, NULL);
+}
+/*UFUNC_API*/
+NPY_NO_EXPORT PyObject *
+PyUFunc_FromFuncAndDataAndSignatureAndIdentity(PyUFuncGenericFunction *func, void **data,
+ char *types, int ntypes,
+ int nin, int nout, int identity,
+ const char *name, const char *doc,
+ const int unused, const char *signature,
+ PyObject *identity_value)
+{
+ PyUFuncObject *ufunc;
if (nin + nout > NPY_MAXARGS) {
PyErr_Format(PyExc_ValueError,
"Cannot construct a ufunc with more than %d operands "
@@ -4653,27 +4910,46 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
return NULL;
}
- ufunc = PyArray_malloc(sizeof(PyUFuncObject));
+ ufunc = PyObject_GC_New(PyUFuncObject, &PyUFunc_Type);
+ /*
+ * We use GC_New here for ufunc->obj, but do not use GC_Track since
+ * ufunc->obj is still NULL at the end of this function.
+ * See ufunc_frompyfunc where ufunc->obj is set and GC_Track is called.
+ */
if (ufunc == NULL) {
return NULL;
}
- PyObject_Init((PyObject *)ufunc, &PyUFunc_Type);
-
- ufunc->reserved1 = 0;
- ufunc->reserved2 = NULL;
ufunc->nin = nin;
ufunc->nout = nout;
ufunc->nargs = nin+nout;
ufunc->identity = identity;
+ if (ufunc->identity == PyUFunc_IdentityValue) {
+ Py_INCREF(identity_value);
+ ufunc->identity_value = identity_value;
+ }
+ else {
+ ufunc->identity_value = NULL;
+ }
ufunc->functions = func;
ufunc->data = data;
ufunc->types = types;
ufunc->ntypes = ntypes;
- ufunc->ptr = NULL;
+ ufunc->core_signature = NULL;
+ ufunc->core_enabled = 0;
ufunc->obj = NULL;
- ufunc->userloops=NULL;
+ ufunc->core_num_dims = NULL;
+ ufunc->core_num_dim_ix = 0;
+ ufunc->core_offsets = NULL;
+ ufunc->core_dim_ixs = NULL;
+ ufunc->core_dim_sizes = NULL;
+ ufunc->core_dim_flags = NULL;
+ ufunc->userloops = NULL;
+ ufunc->ptr = NULL;
+ ufunc->reserved2 = NULL;
+ ufunc->reserved1 = 0;
+ ufunc->iter_flags = 0;
/* Type resolution and inner loop selection functions */
ufunc->type_resolver = &PyUFunc_DefaultTypeResolver;
@@ -4690,19 +4966,11 @@ PyUFunc_FromFuncAndDataAndSignature(PyUFuncGenericFunction *func, void **data,
ufunc->op_flags = PyArray_malloc(sizeof(npy_uint32)*ufunc->nargs);
if (ufunc->op_flags == NULL) {
+ Py_DECREF(ufunc);
return PyErr_NoMemory();
}
memset(ufunc->op_flags, 0, sizeof(npy_uint32)*ufunc->nargs);
- ufunc->iter_flags = 0;
-
- /* generalized ufunc */
- ufunc->core_enabled = 0;
- ufunc->core_num_dim_ix = 0;
- ufunc->core_num_dims = NULL;
- ufunc->core_dim_ixs = NULL;
- ufunc->core_offsets = NULL;
- ufunc->core_signature = NULL;
if (signature != NULL) {
if (_parse_signature(ufunc, signature) != 0) {
Py_DECREF(ufunc);
@@ -4824,11 +5092,14 @@ _loop1d_list_free(void *ptr)
* instead of dtype type num values. This allows a 1-d loop to be registered
* for a structured array dtype or a custom dtype. The ufunc is called
* whenever any of it's input arguments match the user_dtype argument.
- * ufunc - ufunc object created from call to PyUFunc_FromFuncAndData
+ *
+ * ufunc - ufunc object created from call to PyUFunc_FromFuncAndData
* user_dtype - dtype that ufunc will be registered with
- * function - 1-d loop function pointer
+ * function - 1-d loop function pointer
* arg_dtypes - array of dtype objects describing the ufunc operands
- * data - arbitrary data pointer passed in to loop function
+ * data - arbitrary data pointer passed in to loop function
+ *
+ * returns 0 on success, -1 for failure
*/
/*UFUNC_API*/
NPY_NO_EXPORT int
@@ -4892,7 +5163,7 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
}
current = current->next;
}
- if (cmp == 0 && current->arg_dtypes == NULL) {
+ if (cmp == 0 && current != NULL && current->arg_dtypes == NULL) {
current->arg_dtypes = PyArray_malloc(ufunc->nargs *
sizeof(PyArray_Descr*));
if (arg_dtypes != NULL) {
@@ -4910,6 +5181,8 @@ PyUFunc_RegisterLoopForDescr(PyUFuncObject *ufunc,
current->nargs = ufunc->nargs;
}
else {
+ PyErr_SetString(PyExc_RuntimeError,
+ "loop already registered");
result = -1;
}
}
@@ -4927,7 +5200,7 @@ NPY_NO_EXPORT int
PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
int usertype,
PyUFuncGenericFunction function,
- int *arg_types,
+ const int *arg_types,
void *data)
{
PyArray_Descr *descr;
@@ -5047,15 +5320,23 @@ PyUFunc_RegisterLoopForType(PyUFuncObject *ufunc,
static void
ufunc_dealloc(PyUFuncObject *ufunc)
{
+ PyObject_GC_UnTrack((PyObject *)ufunc);
PyArray_free(ufunc->core_num_dims);
PyArray_free(ufunc->core_dim_ixs);
+ PyArray_free(ufunc->core_dim_sizes);
+ PyArray_free(ufunc->core_dim_flags);
PyArray_free(ufunc->core_offsets);
PyArray_free(ufunc->core_signature);
PyArray_free(ufunc->ptr);
PyArray_free(ufunc->op_flags);
Py_XDECREF(ufunc->userloops);
- Py_XDECREF(ufunc->obj);
- PyArray_free(ufunc);
+ if (ufunc->identity == PyUFunc_IdentityValue) {
+ Py_DECREF(ufunc->identity_value);
+ }
+ if (ufunc->obj != NULL) {
+ Py_DECREF(ufunc->obj);
+ }
+ PyObject_GC_Del(ufunc);
}
static PyObject *
@@ -5064,6 +5345,15 @@ ufunc_repr(PyUFuncObject *ufunc)
return PyUString_FromFormat("<ufunc '%s'>", ufunc->name);
}
+static int
+ufunc_traverse(PyUFuncObject *self, visitproc visit, void *arg)
+{
+ Py_VISIT(self->obj);
+ if (self->identity == PyUFunc_IdentityValue) {
+ Py_VISIT(self->identity_value);
+ }
+ return 0;
+}
/******************************************************************************
*** UFUNC METHODS ***
@@ -5086,6 +5376,8 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
PyArrayObject *ap1 = NULL, *ap2 = NULL, *ap_new = NULL;
PyObject *new_args, *tmp;
PyObject *shape1, *shape2, *newshape;
+ static PyObject *_numpy_matrix;
+
errval = PyUFunc_CheckOverride(ufunc, "outer", args, kwds, &override);
if (errval) {
@@ -5118,7 +5410,18 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
if (tmp == NULL) {
return NULL;
}
- ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0);
+
+ npy_cache_import(
+ "numpy",
+ "matrix",
+ &_numpy_matrix);
+
+ if (PyObject_IsInstance(tmp, _numpy_matrix)) {
+ ap1 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0);
+ }
+ else {
+ ap1 = (PyArrayObject *) PyArray_FROM_O(tmp);
+ }
Py_DECREF(tmp);
if (ap1 == NULL) {
return NULL;
@@ -5127,7 +5430,12 @@ ufunc_outer(PyUFuncObject *ufunc, PyObject *args, PyObject *kwds)
if (tmp == NULL) {
return NULL;
}
- ap2 = (PyArrayObject *)PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0);
+ if (PyObject_IsInstance(tmp, _numpy_matrix)) {
+ ap2 = (PyArrayObject *) PyArray_FromObject(tmp, NPY_NOTYPE, 0, 0);
+ }
+ else {
+ ap2 = (PyArrayObject *) PyArray_FROM_O(tmp);
+ }
Py_DECREF(tmp);
if (ap2 == NULL) {
Py_DECREF(ap1);
@@ -5264,7 +5572,7 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
PyUFuncGenericFunction innerloop;
void *innerloopdata;
- int i;
+ npy_intp i;
int nop;
/* override vars */
@@ -5365,18 +5673,13 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
* Create dtypes array for either one or two input operands.
* The output operand is set to the first input operand
*/
- dtypes[0] = PyArray_DESCR(op1_array);
operands[0] = op1_array;
if (op2_array != NULL) {
- dtypes[1] = PyArray_DESCR(op2_array);
- dtypes[2] = dtypes[0];
operands[1] = op2_array;
operands[2] = op1_array;
nop = 3;
}
else {
- dtypes[1] = dtypes[0];
- dtypes[2] = NULL;
operands[1] = op1_array;
operands[2] = NULL;
nop = 2;
@@ -5533,9 +5836,10 @@ ufunc_at(PyUFuncObject *ufunc, PyObject *args)
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
- Py_XDECREF(array_operands[0]);
- Py_XDECREF(array_operands[1]);
- Py_XDECREF(array_operands[2]);
+ for (i = 0; i < 3; i++) {
+ Py_XDECREF(dtypes[i]);
+ Py_XDECREF(array_operands[i]);
+ }
if (needs_api && PyErr_Occurred()) {
return NULL;
@@ -5552,9 +5856,10 @@ fail:
Py_XDECREF(op2_array);
Py_XDECREF(iter);
Py_XDECREF(iter2);
- Py_XDECREF(array_operands[0]);
- Py_XDECREF(array_operands[1]);
- Py_XDECREF(array_operands[2]);
+ for (i = 0; i < 3; i++) {
+ Py_XDECREF(dtypes[i]);
+ Py_XDECREF(array_operands[i]);
+ }
return NULL;
}
@@ -5780,9 +6085,9 @@ NPY_NO_EXPORT PyTypeObject PyUFunc_Type = {
0, /* tp_getattro */
0, /* tp_setattro */
0, /* tp_as_buffer */
- Py_TPFLAGS_DEFAULT, /* tp_flags */
+ Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, /* tp_flags */
0, /* tp_doc */
- 0, /* tp_traverse */
+ (traverseproc)ufunc_traverse, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
diff --git a/numpy/core/src/umath/ufunc_type_resolution.c b/numpy/core/src/umath/ufunc_type_resolution.c
index 807b03512..9be7b63a0 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.c
+++ b/numpy/core/src/umath/ufunc_type_resolution.c
@@ -12,15 +12,44 @@
#define _MULTIARRAYMODULE
#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+#include <stdbool.h>
+
#include "Python.h"
#include "npy_config.h"
#include "npy_pycompat.h"
+#include "npy_import.h"
#include "numpy/ufuncobject.h"
#include "ufunc_type_resolution.h"
#include "ufunc_object.h"
#include "common.h"
+#include "convert_datatype.h"
+
+#include "mem_overlap.h"
+#if defined(HAVE_CBLAS)
+#include "cblasfuncs.h"
+#endif
+
+static PyObject *
+npy_casting_to_py_object(NPY_CASTING casting)
+{
+ switch (casting) {
+ case NPY_NO_CASTING:
+ return PyUString_FromString("no");
+ case NPY_EQUIV_CASTING:
+ return PyUString_FromString("equiv");
+ case NPY_SAFE_CASTING:
+ return PyUString_FromString("safe");
+ case NPY_SAME_KIND_CASTING:
+ return PyUString_FromString("same_kind");
+ case NPY_UNSAFE_CASTING:
+ return PyUString_FromString("unsafe");
+ default:
+ return PyInt_FromLong(casting);
+ }
+}
+
static const char *
npy_casting_to_string(NPY_CASTING casting)
@@ -40,6 +69,158 @@ npy_casting_to_string(NPY_CASTING casting)
return "<unknown>";
}
}
+
+/**
+ * Always returns -1 to indicate the exception was raised, for convenience
+ */
+static int
+raise_binary_type_reso_error(PyUFuncObject *ufunc, PyArrayObject **operands) {
+ static PyObject *exc_type = NULL;
+ PyObject *exc_value;
+
+ npy_cache_import(
+ "numpy.core._exceptions", "_UFuncBinaryResolutionError",
+ &exc_type);
+ if (exc_type == NULL) {
+ return -1;
+ }
+
+ /* produce an error object */
+ exc_value = Py_BuildValue(
+ "O(OO)", ufunc,
+ (PyObject *)PyArray_DESCR(operands[0]),
+ (PyObject *)PyArray_DESCR(operands[1])
+ );
+ if (exc_value == NULL){
+ return -1;
+ }
+ PyErr_SetObject(exc_type, exc_value);
+ Py_DECREF(exc_value);
+
+ return -1;
+}
+
+/** Helper function to raise UFuncNoLoopError
+ * Always returns -1 to indicate the exception was raised, for convenience
+ */
+static int
+raise_no_loop_found_error(
+ PyUFuncObject *ufunc, PyArray_Descr **dtypes)
+{
+ static PyObject *exc_type = NULL;
+ PyObject *exc_value;
+ PyObject *dtypes_tup;
+ npy_intp i;
+
+ npy_cache_import(
+ "numpy.core._exceptions", "_UFuncNoLoopError",
+ &exc_type);
+ if (exc_type == NULL) {
+ return -1;
+ }
+
+ /* convert dtypes to a tuple */
+ dtypes_tup = PyTuple_New(ufunc->nargs);
+ if (dtypes_tup == NULL) {
+ return -1;
+ }
+ for (i = 0; i < ufunc->nargs; ++i) {
+ Py_INCREF(dtypes[i]);
+ PyTuple_SET_ITEM(dtypes_tup, i, (PyObject *)dtypes[i]);
+ }
+
+ /* produce an error object */
+ exc_value = PyTuple_Pack(2, ufunc, dtypes_tup);
+ Py_DECREF(dtypes_tup);
+ if (exc_value == NULL){
+ return -1;
+ }
+ PyErr_SetObject(exc_type, exc_value);
+ Py_DECREF(exc_value);
+
+ return -1;
+}
+
+static int
+raise_casting_error(
+ PyObject *exc_type,
+ PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArray_Descr *from,
+ PyArray_Descr *to,
+ npy_intp i)
+{
+ PyObject *exc_value;
+ PyObject *casting_value;
+
+ casting_value = npy_casting_to_py_object(casting);
+ if (casting_value == NULL) {
+ return -1;
+ }
+
+ exc_value = Py_BuildValue(
+ "ONOOi",
+ ufunc,
+ casting_value,
+ (PyObject *)from,
+ (PyObject *)to,
+ i
+ );
+ if (exc_value == NULL){
+ return -1;
+ }
+ PyErr_SetObject(exc_type, exc_value);
+ Py_DECREF(exc_value);
+
+ return -1;
+}
+
+/** Helper function to raise UFuncInputCastingError
+ * Always returns -1 to indicate the exception was raised, for convenience
+ */
+static int
+raise_input_casting_error(
+ PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArray_Descr *from,
+ PyArray_Descr *to,
+ npy_intp i)
+{
+ static PyObject *exc_type = NULL;
+ npy_cache_import(
+ "numpy.core._exceptions", "_UFuncInputCastingError",
+ &exc_type);
+ if (exc_type == NULL) {
+ return -1;
+ }
+
+ return raise_casting_error(exc_type, ufunc, casting, from, to, i);
+}
+
+
+/** Helper function to raise UFuncOutputCastingError
+ * Always returns -1 to indicate the exception was raised, for convenience
+ */
+static int
+raise_output_casting_error(
+ PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArray_Descr *from,
+ PyArray_Descr *to,
+ npy_intp i)
+{
+ static PyObject *exc_type = NULL;
+ npy_cache_import(
+ "numpy.core._exceptions", "_UFuncOutputCastingError",
+ &exc_type);
+ if (exc_type == NULL) {
+ return -1;
+ }
+
+ return raise_casting_error(exc_type, ufunc, casting, from, to, i);
+}
+
+
/*UFUNC_API
*
* Validates that the input operands can be cast to
@@ -55,45 +236,18 @@ PyUFunc_ValidateCasting(PyUFuncObject *ufunc,
PyArray_Descr **dtypes)
{
int i, nin = ufunc->nin, nop = nin + ufunc->nout;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
for (i = 0; i < nop; ++i) {
if (i < nin) {
if (!PyArray_CanCastArrayTo(operands[i], dtypes[i], casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("Cannot cast ufunc %s "
- "input from ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[i])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)dtypes[i]));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" with casting rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
+ return raise_input_casting_error(
+ ufunc, casting, PyArray_DESCR(operands[i]), dtypes[i], i);
}
} else if (operands[i] != NULL) {
if (!PyArray_CanCastTypeTo(dtypes[i],
PyArray_DESCR(operands[i]), casting)) {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("Cannot cast ufunc %s "
- "output from ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)dtypes[i]));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" to "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[i])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromFormat(" with casting rule %s",
- npy_casting_to_string(casting)));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
+ return raise_output_casting_error(
+ ufunc, casting, dtypes[i], PyArray_DESCR(operands[i]), i);
}
}
}
@@ -266,99 +420,6 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
return 0;
}
-/*
- * This function applies special type resolution rules for the case
- * where all the functions have the pattern X->X, copying
- * the input descr directly so that metadata is maintained.
- *
- * Note that a simpler linear search through the functions loop
- * is still done, but switching to a simple array lookup for
- * built-in types would be better at some point.
- *
- * Returns 0 on success, -1 on error.
- */
-NPY_NO_EXPORT int
-PyUFunc_SimpleUnaryOperationTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes)
-{
- int i, type_num1;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
-
- if (ufunc->nin != 1 || ufunc->nout != 1) {
- PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
- "to use unary operation type resolution but has "
- "the wrong number of inputs or outputs",
- ufunc_name);
- return -1;
- }
-
- /*
- * Use the default type resolution if there's a custom data type
- * or object arrays.
- */
- type_num1 = PyArray_DESCR(operands[0])->type_num;
- if (type_num1 >= NPY_NTYPES || type_num1 == NPY_OBJECT) {
- return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
- type_tup, out_dtypes);
- }
-
- if (type_tup == NULL) {
- /* Input types are the result type */
- out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
- if (out_dtypes[0] == NULL) {
- return -1;
- }
- out_dtypes[1] = out_dtypes[0];
- Py_INCREF(out_dtypes[1]);
- }
- else {
- PyObject *item;
- PyArray_Descr *dtype = NULL;
-
- /*
- * If the type tuple isn't a single-element tuple, let the
- * default type resolution handle this one.
- */
- if (!PyTuple_Check(type_tup) || PyTuple_GET_SIZE(type_tup) != 1) {
- return PyUFunc_DefaultTypeResolver(ufunc, casting,
- operands, type_tup, out_dtypes);
- }
-
- item = PyTuple_GET_ITEM(type_tup, 0);
-
- if (item == Py_None) {
- PyErr_SetString(PyExc_ValueError,
- "require data type in the type tuple");
- return -1;
- }
- else if (!PyArray_DescrConverter(item, &dtype)) {
- return -1;
- }
-
- out_dtypes[0] = ensure_dtype_nbo(dtype);
- if (out_dtypes[0] == NULL) {
- return -1;
- }
- out_dtypes[1] = out_dtypes[0];
- Py_INCREF(out_dtypes[1]);
- }
-
- /* Check against the casting rules */
- if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
- for (i = 0; i < 2; ++i) {
- Py_DECREF(out_dtypes[i]);
- out_dtypes[i] = NULL;
- }
- return -1;
- }
-
- return 0;
-}
-
-
NPY_NO_EXPORT int
PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
@@ -367,7 +428,7 @@ PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes)
{
int ret;
- ret = PyUFunc_SimpleUnaryOperationTypeResolver(ufunc, casting, operands,
+ ret = PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting, operands,
type_tup, out_dtypes);
if (ret < 0) {
return ret;
@@ -397,16 +458,15 @@ PyUFunc_OnesLikeTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes)
{
- return PyUFunc_SimpleUnaryOperationTypeResolver(ufunc,
+ return PyUFunc_SimpleUniformOperationTypeResolver(ufunc,
NPY_UNSAFE_CASTING,
operands, type_tup, out_dtypes);
}
-
/*
* This function applies special type resolution rules for the case
- * where all the functions have the pattern XX->X, using
- * PyArray_ResultType instead of a linear search to get the best
+ * where all of the types in the signature are the same, eg XX->X or XX->XX.
+ * It uses PyArray_ResultType instead of a linear search to get the best
* loop.
*
* Note that a simpler linear search through the functions loop
@@ -416,45 +476,52 @@ PyUFunc_OnesLikeTypeResolver(PyUFuncObject *ufunc,
* Returns 0 on success, -1 on error.
*/
NPY_NO_EXPORT int
-PyUFunc_SimpleBinaryOperationTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes)
+PyUFunc_SimpleUniformOperationTypeResolver(
+ PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
{
- int i, type_num1, type_num2;
const char *ufunc_name = ufunc_get_name_cstr(ufunc);
- if (ufunc->nin != 2 || ufunc->nout != 1) {
+ if (ufunc->nin < 1) {
PyErr_Format(PyExc_RuntimeError, "ufunc %s is configured "
- "to use binary operation type resolution but has "
- "the wrong number of inputs or outputs",
+ "to use uniform operation type resolution but has "
+ "no inputs",
ufunc_name);
return -1;
}
+ int nop = ufunc->nin + ufunc->nout;
/*
- * Use the default type resolution if there's a custom data type
- * or object arrays.
+ * There's a custom data type or an object array
*/
- type_num1 = PyArray_DESCR(operands[0])->type_num;
- type_num2 = PyArray_DESCR(operands[1])->type_num;
- if (type_num1 >= NPY_NTYPES || type_num2 >= NPY_NTYPES ||
- type_num1 == NPY_OBJECT || type_num2 == NPY_OBJECT) {
+ bool has_custom_or_object = false;
+ for (int iop = 0; iop < ufunc->nin; iop++) {
+ int type_num = PyArray_DESCR(operands[iop])->type_num;
+ if (type_num >= NPY_NTYPES || type_num == NPY_OBJECT) {
+ has_custom_or_object = true;
+ break;
+ }
+ }
+
+ if (has_custom_or_object) {
return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
type_tup, out_dtypes);
}
if (type_tup == NULL) {
- /* Input types are the result type */
- out_dtypes[0] = PyArray_ResultType(2, operands, 0, NULL);
+ /* PyArray_ResultType forgets to force a byte order when n == 1 */
+ if (ufunc->nin == 1){
+ out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ }
+ else {
+ out_dtypes[0] = PyArray_ResultType(ufunc->nin, operands, 0, NULL);
+ }
if (out_dtypes[0] == NULL) {
return -1;
}
- out_dtypes[1] = out_dtypes[0];
- Py_INCREF(out_dtypes[1]);
- out_dtypes[2] = out_dtypes[0];
- Py_INCREF(out_dtypes[2]);
}
else {
PyObject *item;
@@ -481,20 +548,23 @@ PyUFunc_SimpleBinaryOperationTypeResolver(PyUFuncObject *ufunc,
}
out_dtypes[0] = ensure_dtype_nbo(dtype);
+ Py_DECREF(dtype);
if (out_dtypes[0] == NULL) {
return -1;
}
- out_dtypes[1] = out_dtypes[0];
- Py_INCREF(out_dtypes[1]);
- out_dtypes[2] = out_dtypes[0];
- Py_INCREF(out_dtypes[2]);
+ }
+
+ /* All types are the same - copy the first one to the rest */
+ for (int iop = 1; iop < nop; iop++) {
+ out_dtypes[iop] = out_dtypes[0];
+ Py_INCREF(out_dtypes[iop]);
}
/* Check against the casting rules */
if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
- for (i = 0; i < 3; ++i) {
- Py_DECREF(out_dtypes[i]);
- out_dtypes[i] = NULL;
+ for (int iop = 0; iop < nop; iop++) {
+ Py_DECREF(out_dtypes[iop]);
+ out_dtypes[iop] = NULL;
}
return -1;
}
@@ -522,7 +592,7 @@ PyUFunc_AbsoluteTypeResolver(PyUFuncObject *ufunc,
type_tup, out_dtypes);
}
else {
- return PyUFunc_SimpleUnaryOperationTypeResolver(ufunc, casting,
+ return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting,
operands, type_tup, out_dtypes);
}
}
@@ -553,6 +623,26 @@ PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc,
return 0;
}
+
+NPY_NO_EXPORT int
+PyUFunc_IsFiniteTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ if (!PyTypeNum_ISDATETIME(PyArray_DESCR(operands[0])->type_num)) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+ }
+
+ out_dtypes[0] = ensure_dtype_nbo(PyArray_DESCR(operands[0]));
+ out_dtypes[1] = PyArray_DescrFromType(NPY_BOOL);
+
+ return 0;
+}
+
+
/*
* Creates a new NPY_TIMEDELTA dtype, copying the datetime metadata
* from the given dtype.
@@ -605,14 +695,13 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
/* Use the default when datetime and timedelta are not involved */
if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
- return PyUFunc_SimpleBinaryOperationTypeResolver(ufunc, casting,
+ return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting,
operands, type_tup, out_dtypes);
}
@@ -661,7 +750,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (type_num1 == NPY_DATETIME) {
@@ -703,7 +792,7 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
@@ -739,11 +828,11 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -756,21 +845,6 @@ PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
}
return 0;
-
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
- }
}
/*
@@ -793,7 +867,6 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -801,7 +874,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
/* Use the default when datetime and timedelta are not involved */
if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
int ret;
- ret = PyUFunc_SimpleBinaryOperationTypeResolver(ufunc, casting,
+ ret = PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting,
operands, type_tup, out_dtypes);
if (ret < 0) {
return ret;
@@ -846,7 +919,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (type_num1 == NPY_DATETIME) {
@@ -904,7 +977,7 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
Py_INCREF(out_dtypes[1]);
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
@@ -922,11 +995,11 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_TIMEDELTA;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -939,21 +1012,6 @@ PyUFunc_SubtractionTypeResolver(PyUFuncObject *ufunc,
}
return 0;
-
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
- }
}
/*
@@ -973,14 +1031,13 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
/* Use the default when datetime and timedelta are not involved */
if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
- return PyUFunc_SimpleBinaryOperationTypeResolver(ufunc, casting,
+ return PyUFunc_SimpleUniformOperationTypeResolver(ufunc, casting,
operands, type_tup, out_dtypes);
}
@@ -1020,7 +1077,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_DOUBLE;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISINTEGER(type_num1) || PyTypeNum_ISBOOL(type_num1)) {
@@ -1042,7 +1099,7 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_LONGLONG;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else if (PyTypeNum_ISFLOAT(type_num1)) {
@@ -1064,11 +1121,11 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
type_num1 = NPY_DOUBLE;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -1081,21 +1138,6 @@ PyUFunc_MultiplicationTypeResolver(PyUFuncObject *ufunc,
}
return 0;
-
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
- return -1;
- }
}
@@ -1115,7 +1157,6 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
{
int type_num1, type_num2;
int i;
- const char *ufunc_name = ufunc_get_name_cstr(ufunc);
type_num1 = PyArray_DESCR(operands[0])->type_num;
type_num2 = PyArray_DESCR(operands[1])->type_num;
@@ -1139,7 +1180,16 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
}
out_dtypes[1] = out_dtypes[0];
Py_INCREF(out_dtypes[1]);
+
+ /*
+ * TODO: split function into truediv and floordiv resolvers
+ */
+ if (strcmp(ufunc->name, "floor_divide") == 0) {
+ out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
+ }
+ else {
out_dtypes[2] = PyArray_DescrFromType(NPY_DOUBLE);
+ }
if (out_dtypes[2] == NULL) {
Py_DECREF(out_dtypes[0]);
out_dtypes[0] = NULL;
@@ -1183,11 +1233,11 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
type_num2 = NPY_DOUBLE;
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
}
else {
- goto type_reso_error;
+ return raise_binary_type_reso_error(ufunc, operands);
}
/* Check against the casting rules */
@@ -1200,21 +1250,57 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
}
return 0;
+}
+
+
+NPY_NO_EXPORT int
+PyUFunc_RemainderTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ int type_num1, type_num2;
+ int i;
+
+ type_num1 = PyArray_DESCR(operands[0])->type_num;
+ type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+ /* Use the default when datetime and timedelta are not involved */
+ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+ }
+ if (type_num1 == NPY_TIMEDELTA) {
+ if (type_num2 == NPY_TIMEDELTA) {
+ out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]),
+ PyArray_DESCR(operands[1]));
+ if (out_dtypes[0] == NULL) {
+ return -1;
+ }
+ out_dtypes[1] = out_dtypes[0];
+ Py_INCREF(out_dtypes[1]);
+ out_dtypes[2] = out_dtypes[0];
+ Py_INCREF(out_dtypes[2]);
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
-type_reso_error: {
- PyObject *errmsg;
- errmsg = PyUString_FromFormat("ufunc %s cannot use operands "
- "with types ", ufunc_name);
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[0])));
- PyUString_ConcatAndDel(&errmsg,
- PyUString_FromString(" and "));
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)PyArray_DESCR(operands[1])));
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
+ /* Check against the casting rules */
+ if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
+ for (i = 0; i < 3; ++i) {
+ Py_DECREF(out_dtypes[i]);
+ out_dtypes[i] = NULL;
+ }
return -1;
}
+
+ return 0;
}
@@ -1275,7 +1361,7 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes)
{
- /* Depreciation checks needed only on python 2 */
+ /* Deprecation checks needed only on python 2 */
#if !defined(NPY_PY3K)
int type_num1, type_num2;
@@ -1293,7 +1379,6 @@ PyUFunc_MixedDivisionTypeResolver(PyUFuncObject *ufunc,
type_tup, out_dtypes);
}
-
static int
find_userloop(PyUFuncObject *ufunc,
PyArray_Descr **dtypes,
@@ -1363,12 +1448,8 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc,
{
int nargs = ufunc->nargs;
char *types;
- const char *ufunc_name;
- PyObject *errmsg;
int i, j;
- ufunc_name = ufunc_get_name_cstr(ufunc);
-
/*
* If there are user-loops search them first.
* TODO: There needs to be a loop selection acceleration structure,
@@ -1403,19 +1484,7 @@ PyUFunc_DefaultLegacyInnerLoopSelector(PyUFuncObject *ufunc,
types += nargs;
}
- errmsg = PyUString_FromFormat("ufunc '%s' did not contain a loop "
- "with signature matching types ", ufunc_name);
- for (i = 0; i < nargs; ++i) {
- PyUString_ConcatAndDel(&errmsg,
- PyObject_Repr((PyObject *)dtypes[i]));
- if (i < nargs - 1) {
- PyUString_ConcatAndDel(&errmsg, PyUString_FromString(" "));
- }
- }
- PyErr_SetObject(PyExc_TypeError, errmsg);
- Py_DECREF(errmsg);
-
- return -1;
+ return raise_no_loop_found_error(ufunc, dtypes);
}
typedef struct {
@@ -1680,7 +1749,7 @@ set_ufunc_loop_data_types(PyUFuncObject *self, PyArrayObject **op,
}
/*
* For outputs, copy the dtype from op[0] if the type_num
- * matches, similarly to preserve metdata.
+ * matches, similarly to preserve metadata.
*/
else if (i >= nin && op[0] != NULL &&
PyArray_DESCR(op[0])->type_num == type_nums[i]) {
@@ -1871,73 +1940,6 @@ type_tuple_userloop_type_resolver(PyUFuncObject *self,
return 0;
}
-/*
- * Provides an ordering for the dtype 'kind' character codes, to help
- * determine when to use the min_scalar_type function. This groups
- * 'kind' into boolean, integer, floating point, and everything else.
- */
-
-static int
-dtype_kind_to_simplified_ordering(char kind)
-{
- switch (kind) {
- /* Boolean kind */
- case 'b':
- return 0;
- /* Unsigned int kind */
- case 'u':
- /* Signed int kind */
- case 'i':
- return 1;
- /* Float kind */
- case 'f':
- /* Complex kind */
- case 'c':
- return 2;
- /* Anything else */
- default:
- return 3;
- }
-}
-
-static int
-should_use_min_scalar(PyArrayObject **op, int nop)
-{
- int i, use_min_scalar, kind;
- int all_scalars = 1, max_scalar_kind = -1, max_array_kind = -1;
-
- /*
- * Determine if there are any scalars, and if so, whether
- * the maximum "kind" of the scalars surpasses the maximum
- * "kind" of the arrays
- */
- use_min_scalar = 0;
- if (nop > 1) {
- for(i = 0; i < nop; ++i) {
- kind = dtype_kind_to_simplified_ordering(
- PyArray_DESCR(op[i])->kind);
- if (PyArray_NDIM(op[i]) == 0) {
- if (kind > max_scalar_kind) {
- max_scalar_kind = kind;
- }
- }
- else {
- all_scalars = 0;
- if (kind > max_array_kind) {
- max_array_kind = kind;
- }
-
- }
- }
-
- /* Indicate whether to use the min_scalar_type function */
- if (!all_scalars && max_array_kind >= max_scalar_kind) {
- use_min_scalar = 1;
- }
- }
-
- return use_min_scalar;
-}
/*
* Does a linear search for the best inner loop of the ufunc.
@@ -1956,14 +1958,15 @@ linear_search_type_resolver(PyUFuncObject *self,
npy_intp i, j, nin = self->nin, nop = nin + self->nout;
int types[NPY_MAXARGS];
const char *ufunc_name;
- int no_castable_output, use_min_scalar;
+ int no_castable_output = 0;
+ int use_min_scalar;
/* For making a better error message on coercion error */
char err_dst_typecode = '-', err_src_typecode = '-';
ufunc_name = ufunc_get_name_cstr(self);
- use_min_scalar = should_use_min_scalar(op, nin);
+ use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
/* If the ufunc has userloops, search for them. */
if (self->userloops) {
@@ -2072,7 +2075,7 @@ type_tuple_type_resolver(PyUFuncObject *self,
ufunc_name = ufunc_get_name_cstr(self);
- use_min_scalar = should_use_min_scalar(op, nin);
+ use_min_scalar = should_use_min_scalar(nin, op, 0, NULL);
/* Fill in specified_types from the tuple or string */
if (PyTuple_Check(type_tup)) {
@@ -2232,8 +2235,56 @@ type_tuple_type_resolver(PyUFuncObject *self,
/* If no function was found, throw an error */
PyErr_Format(PyExc_TypeError,
- "No loop matching the specified signature and casting\n"
+ "No loop matching the specified signature and casting "
"was found for ufunc %s", ufunc_name);
return -1;
}
+
+NPY_NO_EXPORT int
+PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes)
+{
+ int type_num1, type_num2;
+ int i;
+
+ type_num1 = PyArray_DESCR(operands[0])->type_num;
+ type_num2 = PyArray_DESCR(operands[1])->type_num;
+
+ /* Use the default when datetime and timedelta are not involved */
+ if (!PyTypeNum_ISDATETIME(type_num1) && !PyTypeNum_ISDATETIME(type_num2)) {
+ return PyUFunc_DefaultTypeResolver(ufunc, casting, operands,
+ type_tup, out_dtypes);
+ }
+ if (type_num1 == NPY_TIMEDELTA) {
+ if (type_num2 == NPY_TIMEDELTA) {
+ out_dtypes[0] = PyArray_PromoteTypes(PyArray_DESCR(operands[0]),
+ PyArray_DESCR(operands[1]));
+ out_dtypes[1] = out_dtypes[0];
+ Py_INCREF(out_dtypes[1]);
+ out_dtypes[2] = PyArray_DescrFromType(NPY_LONGLONG);
+ out_dtypes[3] = out_dtypes[0];
+ Py_INCREF(out_dtypes[3]);
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+ }
+ else {
+ return raise_binary_type_reso_error(ufunc, operands);
+ }
+
+ /* Check against the casting rules */
+ if (PyUFunc_ValidateCasting(ufunc, casting, operands, out_dtypes) < 0) {
+ for (i = 0; i < 4; ++i) {
+ Py_DECREF(out_dtypes[i]);
+ out_dtypes[i] = NULL;
+ }
+ return -1;
+ }
+
+ return 0;
+}
diff --git a/numpy/core/src/umath/ufunc_type_resolution.h b/numpy/core/src/umath/ufunc_type_resolution.h
index fa9f1dbfa..a4e670a8e 100644
--- a/numpy/core/src/umath/ufunc_type_resolution.h
+++ b/numpy/core/src/umath/ufunc_type_resolution.h
@@ -9,13 +9,6 @@ PyUFunc_SimpleBinaryComparisonTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
-PyUFunc_SimpleUnaryOperationTypeResolver(PyUFuncObject *ufunc,
- NPY_CASTING casting,
- PyArrayObject **operands,
- PyObject *type_tup,
- PyArray_Descr **out_dtypes);
-
-NPY_NO_EXPORT int
PyUFunc_NegativeTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
@@ -30,7 +23,7 @@ PyUFunc_OnesLikeTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
-PyUFunc_SimpleBinaryOperationTypeResolver(PyUFuncObject *ufunc,
+PyUFunc_SimpleUniformOperationTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
PyObject *type_tup,
@@ -51,6 +44,13 @@ PyUFunc_IsNaTTypeResolver(PyUFuncObject *ufunc,
PyArray_Descr **out_dtypes);
NPY_NO_EXPORT int
+PyUFunc_IsFiniteTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+NPY_NO_EXPORT int
PyUFunc_AdditionTypeResolver(PyUFuncObject *ufunc,
NPY_CASTING casting,
PyArrayObject **operands,
@@ -92,6 +92,20 @@ PyUFunc_DivisionTypeResolver(PyUFuncObject *ufunc,
PyObject *type_tup,
PyArray_Descr **out_dtypes);
+NPY_NO_EXPORT int
+PyUFunc_RemainderTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
+NPY_NO_EXPORT int
+PyUFunc_DivmodTypeResolver(PyUFuncObject *ufunc,
+ NPY_CASTING casting,
+ PyArrayObject **operands,
+ PyObject *type_tup,
+ PyArray_Descr **out_dtypes);
+
/*
* Does a linear search for the best inner loop of the ufunc.
*
@@ -138,5 +152,4 @@ PyUFunc_DefaultMaskedInnerLoopSelector(PyUFuncObject *ufunc,
NpyAuxData **out_innerloopdata,
int *out_needs_api);
-
#endif
diff --git a/numpy/core/src/umath/umathmodule.c b/numpy/core/src/umath/umathmodule.c
index 20bd2b0a8..6ec474376 100644
--- a/numpy/core/src/umath/umathmodule.c
+++ b/numpy/core/src/umath/umathmodule.c
@@ -29,6 +29,7 @@
#include "abstract.h"
#include "numpy/npy_math.h"
+#include "number.h"
static PyUFuncGenericFunction pyfunc_functions[] = {PyUFunc_On_Om};
@@ -160,6 +161,7 @@ ufunc_frompyfunc(PyObject *NPY_UNUSED(dummy), PyObject *args, PyObject *NPY_UNUS
self->type_resolver = &object_ufunc_type_resolver;
self->legacy_inner_loop_selector = &object_ufunc_loop_selector;
+ PyObject_GC_Track(self);
return (PyObject *)self;
}
@@ -169,7 +171,7 @@ PyObject *
add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
{
PyUFuncObject *ufunc;
- PyObject *str;
+ PyObject *str, *tmp;
char *docstr, *newdocstr;
#if defined(NPY_PY3K)
@@ -177,11 +179,15 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
&PyUnicode_Type, &str)) {
return NULL;
}
- docstr = PyBytes_AS_STRING(PyUnicode_AsUTF8String(str));
+ tmp = PyUnicode_AsUTF8String(str);
+ if (tmp == NULL) {
+ return NULL;
+ }
+ docstr = PyBytes_AS_STRING(tmp);
#else
if (!PyArg_ParseTuple(args, "O!O!:_add_newdoc_ufunc", &PyUFunc_Type, &ufunc,
&PyString_Type, &str)) {
- return NULL;
+ return NULL;
}
docstr = PyString_AS_STRING(str);
#endif
@@ -189,6 +195,9 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
if (NULL != ufunc->doc) {
PyErr_SetString(PyExc_ValueError,
"Cannot change docstring of ufunc with non-NULL docstring");
+#if defined(NPY_PY3K)
+ Py_DECREF(tmp);
+#endif
return NULL;
}
@@ -202,6 +211,9 @@ add_newdoc_ufunc(PyObject *NPY_UNUSED(dummy), PyObject *args)
strcpy(newdocstr, docstr);
ufunc->doc = newdocstr;
+#if defined(NPY_PY3K)
+ Py_DECREF(tmp);
+#endif
Py_RETURN_NONE;
}
@@ -267,10 +279,6 @@ int initumath(PyObject *m)
UFUNC_FLOATING_POINT_SUPPORT = 0;
#endif
- /* Initialize the types */
- if (PyType_Ready(&PyUFunc_Type) < 0)
- return -1;
-
/* Add some symbolic constants to the module */
d = PyModule_GetDict(m);
@@ -325,7 +333,7 @@ int initumath(PyObject *m)
s2 = PyDict_GetItemString(d, "remainder");
/* Setup the array object's numerical structures with appropriate
ufuncs in d*/
- PyArray_SetNumericOps(d);
+ _PyArray_SetNumericOps(d);
PyDict_SetItemString(d, "conj", s);
PyDict_SetItemString(d, "mod", s2);
diff --git a/numpy/core/tests/data/umath-validation-set-README b/numpy/core/tests/data/umath-validation-set-README
new file mode 100644
index 000000000..6561ca3b5
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-README
@@ -0,0 +1,15 @@
+Steps to validate transcendental functions:
+1) Add a file 'umath-validation-set-<ufuncname>', where ufuncname is name of
+ the function in NumPy you want to validate
+2) The file should contain 4 columns: dtype,input,expected output,ulperror
+ a. dtype: one of np.float16, np.float32, np.float64
+ b. input: floating point input to ufunc in hex. Example: 0x414570a4
+ represents 12.340000152587890625
+ c. expected output: floating point output for the corresponding input in hex.
+ This should be computed using a high(er) precision library and then rounded to
+ same format as the input.
+ d. ulperror: expected maximum ulp error of the function. This
+ should be same across all rows of the same dtype. Otherwise, the function is
+ tested for the maximum ulp error among all entries of that dtype.
+3) Add file umath-validation-set-<ufuncname> to the test file test_umath_accuracy.py
+ which will then validate your ufunc.
diff --git a/numpy/core/tests/data/umath-validation-set-cos b/numpy/core/tests/data/umath-validation-set-cos
new file mode 100644
index 000000000..360ebcd6a
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-cos
@@ -0,0 +1,707 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x3f800000,2
+np.float32,0x007b2490,0x3f800000,2
+np.float32,0x007c99fa,0x3f800000,2
+np.float32,0x00734a0c,0x3f800000,2
+np.float32,0x0070de24,0x3f800000,2
+np.float32,0x007fffff,0x3f800000,2
+np.float32,0x00000001,0x3f800000,2
+## -ve denormals ##
+np.float32,0x80495d65,0x3f800000,2
+np.float32,0x806894f6,0x3f800000,2
+np.float32,0x80555a76,0x3f800000,2
+np.float32,0x804e1fb8,0x3f800000,2
+np.float32,0x80687de9,0x3f800000,2
+np.float32,0x807fffff,0x3f800000,2
+np.float32,0x80000001,0x3f800000,2
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0x3f800000,2
+np.float32,0x80000000,0x3f800000,2
+np.float32,0x00800000,0x3f800000,2
+np.float32,0x7f7fffff,0x3f5a5f96,2
+np.float32,0x80800000,0x3f800000,2
+np.float32,0xff7fffff,0x3f5a5f96,2
+## 1.00f + 0x00000001 ##
+np.float32,0x3f800000,0x3f0a5140,2
+np.float32,0x3f800001,0x3f0a513f,2
+np.float32,0x3f800002,0x3f0a513d,2
+np.float32,0xc090a8b0,0xbe4332ce,2
+np.float32,0x41ce3184,0x3f4d1de1,2
+np.float32,0xc1d85848,0xbeaa8980,2
+np.float32,0x402b8820,0xbf653aa3,2
+np.float32,0x42b4e454,0xbf4a338b,2
+np.float32,0x42a67a60,0x3c58202e,2
+np.float32,0x41d92388,0xbed987c7,2
+np.float32,0x422dd66c,0x3f5dcab3,2
+np.float32,0xc28f5be6,0xbf5688d8,2
+np.float32,0x41ab2674,0xbf53aa3b,2
+np.float32,0xd0102756,0x3f45d12d,2
+np.float32,0xcf99405e,0xbe9cf281,2
+np.float32,0xcfd83a12,0x3eaae4ca,2
+np.float32,0x4fb54db0,0xbf7b2894,2
+np.float32,0xcfcca29d,0x3f752e4e,2
+np.float32,0xceec2ac0,0xbf745303,2
+np.float32,0xcfdca97f,0x3ef554a7,2
+np.float32,0xcfe92b0a,0x3f4618f2,2
+np.float32,0x5014b0eb,0x3ee933e6,2
+np.float32,0xcfa7ee96,0xbeedeeb2,2
+np.float32,0x754c09a0,0xbef298de,2
+np.float32,0x77a731fb,0x3f24599f,2
+np.float32,0x76de2494,0x3f79576c,2
+np.float32,0xf74920dc,0xbf4d196e,2
+np.float32,0x7707a312,0xbeb5cb8e,2
+np.float32,0x75bf9790,0xbf7fd7fe,2
+np.float32,0xf4ca7c40,0xbe15107d,2
+np.float32,0x77e91899,0xbe8a968b,2
+np.float32,0xf74c9820,0xbf7f9677,2
+np.float32,0x7785ca29,0xbe6ef93b,2
+np.float32,0x3f490fdb,0x3f3504f3,2
+np.float32,0xbf490fdb,0x3f3504f3,2
+np.float32,0x3fc90fdb,0xb33bbd2e,2
+np.float32,0xbfc90fdb,0xb33bbd2e,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x3fc90fdb,0xb33bbd2e,2
+np.float32,0xbfc90fdb,0xb33bbd2e,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x4016cbe4,0xbf3504f3,2
+np.float32,0xc016cbe4,0xbf3504f3,2
+np.float32,0x4096cbe4,0x324cde2e,2
+np.float32,0xc096cbe4,0x324cde2e,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x40490fdb,0xbf800000,2
+np.float32,0xc0490fdb,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x407b53d2,0xbf3504f1,2
+np.float32,0xc07b53d2,0xbf3504f1,2
+np.float32,0x40fb53d2,0xb4b5563d,2
+np.float32,0xc0fb53d2,0xb4b5563d,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x4096cbe4,0x324cde2e,2
+np.float32,0xc096cbe4,0x324cde2e,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x40afede0,0x3f3504f7,2
+np.float32,0xc0afede0,0x3f3504f7,2
+np.float32,0x412fede0,0x353222c4,2
+np.float32,0xc12fede0,0x353222c4,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x40c90fdb,0x3f800000,2
+np.float32,0xc0c90fdb,0x3f800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x40e231d6,0x3f3504f3,2
+np.float32,0xc0e231d6,0x3f3504f3,2
+np.float32,0x416231d6,0xb319a6a2,2
+np.float32,0xc16231d6,0xb319a6a2,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x40fb53d2,0xb4b5563d,2
+np.float32,0xc0fb53d2,0xb4b5563d,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x410a3ae7,0xbf3504fb,2
+np.float32,0xc10a3ae7,0xbf3504fb,2
+np.float32,0x418a3ae7,0x35b08908,2
+np.float32,0xc18a3ae7,0x35b08908,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x4116cbe4,0xbf800000,2
+np.float32,0xc116cbe4,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x41235ce2,0xbf3504ef,2
+np.float32,0xc1235ce2,0xbf3504ef,2
+np.float32,0x41a35ce2,0xb53889b6,2
+np.float32,0xc1a35ce2,0xb53889b6,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x412fede0,0x353222c4,2
+np.float32,0xc12fede0,0x353222c4,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x413c7edd,0x3f3504f4,2
+np.float32,0xc13c7edd,0x3f3504f4,2
+np.float32,0x41bc7edd,0x33800add,2
+np.float32,0xc1bc7edd,0x33800add,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x41490fdb,0x3f800000,2
+np.float32,0xc1490fdb,0x3f800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x4155a0d9,0x3f3504eb,2
+np.float32,0xc155a0d9,0x3f3504eb,2
+np.float32,0x41d5a0d9,0xb5b3bc81,2
+np.float32,0xc1d5a0d9,0xb5b3bc81,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x416231d6,0xb319a6a2,2
+np.float32,0xc16231d6,0xb319a6a2,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x416ec2d4,0xbf3504f7,2
+np.float32,0xc16ec2d4,0xbf3504f7,2
+np.float32,0x41eec2d4,0x353ef0a7,2
+np.float32,0xc1eec2d4,0x353ef0a7,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x417b53d2,0xbf800000,2
+np.float32,0xc17b53d2,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x4183f268,0xbf3504e7,2
+np.float32,0xc183f268,0xbf3504e7,2
+np.float32,0x4203f268,0xb6059a13,2
+np.float32,0xc203f268,0xb6059a13,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x418a3ae7,0x35b08908,2
+np.float32,0xc18a3ae7,0x35b08908,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x41908365,0x3f3504f0,2
+np.float32,0xc1908365,0x3f3504f0,2
+np.float32,0x42108365,0xb512200d,2
+np.float32,0xc2108365,0xb512200d,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x4196cbe4,0x3f800000,2
+np.float32,0xc196cbe4,0x3f800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x419d1463,0x3f3504ef,2
+np.float32,0xc19d1463,0x3f3504ef,2
+np.float32,0x421d1463,0xb5455799,2
+np.float32,0xc21d1463,0xb5455799,2
+np.float32,0x429d1463,0xbf800000,2
+np.float32,0xc29d1463,0xbf800000,2
+np.float32,0x41a35ce2,0xb53889b6,2
+np.float32,0xc1a35ce2,0xb53889b6,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x42a35ce2,0x3f800000,2
+np.float32,0xc2a35ce2,0x3f800000,2
+np.float32,0x41a9a561,0xbf3504ff,2
+np.float32,0xc1a9a561,0xbf3504ff,2
+np.float32,0x4229a561,0x360733d0,2
+np.float32,0xc229a561,0x360733d0,2
+np.float32,0x42a9a561,0xbf800000,2
+np.float32,0xc2a9a561,0xbf800000,2
+np.float32,0x41afede0,0xbf800000,2
+np.float32,0xc1afede0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x42afede0,0x3f800000,2
+np.float32,0xc2afede0,0x3f800000,2
+np.float32,0x41b6365e,0xbf3504f6,2
+np.float32,0xc1b6365e,0xbf3504f6,2
+np.float32,0x4236365e,0x350bb91c,2
+np.float32,0xc236365e,0x350bb91c,2
+np.float32,0x42b6365e,0xbf800000,2
+np.float32,0xc2b6365e,0xbf800000,2
+np.float32,0x41bc7edd,0x33800add,2
+np.float32,0xc1bc7edd,0x33800add,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x42bc7edd,0x3f800000,2
+np.float32,0xc2bc7edd,0x3f800000,2
+np.float32,0x41c2c75c,0x3f3504f8,2
+np.float32,0xc1c2c75c,0x3f3504f8,2
+np.float32,0x4242c75c,0x354bbe8a,2
+np.float32,0xc242c75c,0x354bbe8a,2
+np.float32,0x42c2c75c,0xbf800000,2
+np.float32,0xc2c2c75c,0xbf800000,2
+np.float32,0x41c90fdb,0x3f800000,2
+np.float32,0xc1c90fdb,0x3f800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x42c90fdb,0x3f800000,2
+np.float32,0xc2c90fdb,0x3f800000,2
+np.float32,0x41cf585a,0x3f3504e7,2
+np.float32,0xc1cf585a,0x3f3504e7,2
+np.float32,0x424f585a,0xb608cd8c,2
+np.float32,0xc24f585a,0xb608cd8c,2
+np.float32,0x42cf585a,0xbf800000,2
+np.float32,0xc2cf585a,0xbf800000,2
+np.float32,0x41d5a0d9,0xb5b3bc81,2
+np.float32,0xc1d5a0d9,0xb5b3bc81,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x42d5a0d9,0x3f800000,2
+np.float32,0xc2d5a0d9,0x3f800000,2
+np.float32,0x41dbe958,0xbf350507,2
+np.float32,0xc1dbe958,0xbf350507,2
+np.float32,0x425be958,0x365eab75,2
+np.float32,0xc25be958,0x365eab75,2
+np.float32,0x42dbe958,0xbf800000,2
+np.float32,0xc2dbe958,0xbf800000,2
+np.float32,0x41e231d6,0xbf800000,2
+np.float32,0xc1e231d6,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x42e231d6,0x3f800000,2
+np.float32,0xc2e231d6,0x3f800000,2
+np.float32,0x41e87a55,0xbf3504ef,2
+np.float32,0xc1e87a55,0xbf3504ef,2
+np.float32,0x42687a55,0xb552257b,2
+np.float32,0xc2687a55,0xb552257b,2
+np.float32,0x42e87a55,0xbf800000,2
+np.float32,0xc2e87a55,0xbf800000,2
+np.float32,0x41eec2d4,0x353ef0a7,2
+np.float32,0xc1eec2d4,0x353ef0a7,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x42eec2d4,0x3f800000,2
+np.float32,0xc2eec2d4,0x3f800000,2
+np.float32,0x41f50b53,0x3f3504ff,2
+np.float32,0xc1f50b53,0x3f3504ff,2
+np.float32,0x42750b53,0x360a6748,2
+np.float32,0xc2750b53,0x360a6748,2
+np.float32,0x42f50b53,0xbf800000,2
+np.float32,0xc2f50b53,0xbf800000,2
+np.float32,0x41fb53d2,0x3f800000,2
+np.float32,0xc1fb53d2,0x3f800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x42fb53d2,0x3f800000,2
+np.float32,0xc2fb53d2,0x3f800000,2
+np.float32,0x4200ce28,0x3f3504f6,2
+np.float32,0xc200ce28,0x3f3504f6,2
+np.float32,0x4280ce28,0x34fdd672,2
+np.float32,0xc280ce28,0x34fdd672,2
+np.float32,0x4300ce28,0xbf800000,2
+np.float32,0xc300ce28,0xbf800000,2
+np.float32,0x4203f268,0xb6059a13,2
+np.float32,0xc203f268,0xb6059a13,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x4303f268,0x3f800000,2
+np.float32,0xc303f268,0x3f800000,2
+np.float32,0x420716a7,0xbf3504f8,2
+np.float32,0xc20716a7,0xbf3504f8,2
+np.float32,0x428716a7,0x35588c6d,2
+np.float32,0xc28716a7,0x35588c6d,2
+np.float32,0x430716a7,0xbf800000,2
+np.float32,0xc30716a7,0xbf800000,2
+np.float32,0x420a3ae7,0xbf800000,2
+np.float32,0xc20a3ae7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x430a3ae7,0x3f800000,2
+np.float32,0xc30a3ae7,0x3f800000,2
+np.float32,0x420d5f26,0xbf3504e7,2
+np.float32,0xc20d5f26,0xbf3504e7,2
+np.float32,0x428d5f26,0xb60c0105,2
+np.float32,0xc28d5f26,0xb60c0105,2
+np.float32,0x430d5f26,0xbf800000,2
+np.float32,0xc30d5f26,0xbf800000,2
+np.float32,0x42108365,0xb512200d,2
+np.float32,0xc2108365,0xb512200d,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x43108365,0x3f800000,2
+np.float32,0xc3108365,0x3f800000,2
+np.float32,0x4213a7a5,0x3f350507,2
+np.float32,0xc213a7a5,0x3f350507,2
+np.float32,0x4293a7a5,0x3661deee,2
+np.float32,0xc293a7a5,0x3661deee,2
+np.float32,0x4313a7a5,0xbf800000,2
+np.float32,0xc313a7a5,0xbf800000,2
+np.float32,0x4216cbe4,0x3f800000,2
+np.float32,0xc216cbe4,0x3f800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x4316cbe4,0x3f800000,2
+np.float32,0xc316cbe4,0x3f800000,2
+np.float32,0x4219f024,0x3f3504d8,2
+np.float32,0xc219f024,0x3f3504d8,2
+np.float32,0x4299f024,0xb69bde6c,2
+np.float32,0xc299f024,0xb69bde6c,2
+np.float32,0x4319f024,0xbf800000,2
+np.float32,0xc319f024,0xbf800000,2
+np.float32,0x421d1463,0xb5455799,2
+np.float32,0xc21d1463,0xb5455799,2
+np.float32,0x429d1463,0xbf800000,2
+np.float32,0xc29d1463,0xbf800000,2
+np.float32,0x431d1463,0x3f800000,2
+np.float32,0xc31d1463,0x3f800000,2
+np.float32,0x422038a3,0xbf350516,2
+np.float32,0xc22038a3,0xbf350516,2
+np.float32,0x42a038a3,0x36c6cd61,2
+np.float32,0xc2a038a3,0x36c6cd61,2
+np.float32,0x432038a3,0xbf800000,2
+np.float32,0xc32038a3,0xbf800000,2
+np.float32,0x42235ce2,0xbf800000,2
+np.float32,0xc2235ce2,0xbf800000,2
+np.float32,0x42a35ce2,0x3f800000,2
+np.float32,0xc2a35ce2,0x3f800000,2
+np.float32,0x43235ce2,0x3f800000,2
+np.float32,0xc3235ce2,0x3f800000,2
+np.float32,0x42268121,0xbf3504f6,2
+np.float32,0xc2268121,0xbf3504f6,2
+np.float32,0x42a68121,0x34e43aac,2
+np.float32,0xc2a68121,0x34e43aac,2
+np.float32,0x43268121,0xbf800000,2
+np.float32,0xc3268121,0xbf800000,2
+np.float32,0x4229a561,0x360733d0,2
+np.float32,0xc229a561,0x360733d0,2
+np.float32,0x42a9a561,0xbf800000,2
+np.float32,0xc2a9a561,0xbf800000,2
+np.float32,0x4329a561,0x3f800000,2
+np.float32,0xc329a561,0x3f800000,2
+np.float32,0x422cc9a0,0x3f3504f8,2
+np.float32,0xc22cc9a0,0x3f3504f8,2
+np.float32,0x42acc9a0,0x35655a50,2
+np.float32,0xc2acc9a0,0x35655a50,2
+np.float32,0x432cc9a0,0xbf800000,2
+np.float32,0xc32cc9a0,0xbf800000,2
+np.float32,0x422fede0,0x3f800000,2
+np.float32,0xc22fede0,0x3f800000,2
+np.float32,0x42afede0,0x3f800000,2
+np.float32,0xc2afede0,0x3f800000,2
+np.float32,0x432fede0,0x3f800000,2
+np.float32,0xc32fede0,0x3f800000,2
+np.float32,0x4233121f,0x3f3504e7,2
+np.float32,0xc233121f,0x3f3504e7,2
+np.float32,0x42b3121f,0xb60f347d,2
+np.float32,0xc2b3121f,0xb60f347d,2
+np.float32,0x4333121f,0xbf800000,2
+np.float32,0xc333121f,0xbf800000,2
+np.float32,0x4236365e,0x350bb91c,2
+np.float32,0xc236365e,0x350bb91c,2
+np.float32,0x42b6365e,0xbf800000,2
+np.float32,0xc2b6365e,0xbf800000,2
+np.float32,0x4336365e,0x3f800000,2
+np.float32,0xc336365e,0x3f800000,2
+np.float32,0x42395a9e,0xbf350507,2
+np.float32,0xc2395a9e,0xbf350507,2
+np.float32,0x42b95a9e,0x36651267,2
+np.float32,0xc2b95a9e,0x36651267,2
+np.float32,0x43395a9e,0xbf800000,2
+np.float32,0xc3395a9e,0xbf800000,2
+np.float32,0x423c7edd,0xbf800000,2
+np.float32,0xc23c7edd,0xbf800000,2
+np.float32,0x42bc7edd,0x3f800000,2
+np.float32,0xc2bc7edd,0x3f800000,2
+np.float32,0x433c7edd,0x3f800000,2
+np.float32,0xc33c7edd,0x3f800000,2
+np.float32,0x423fa31d,0xbf3504d7,2
+np.float32,0xc23fa31d,0xbf3504d7,2
+np.float32,0x42bfa31d,0xb69d7828,2
+np.float32,0xc2bfa31d,0xb69d7828,2
+np.float32,0x433fa31d,0xbf800000,2
+np.float32,0xc33fa31d,0xbf800000,2
+np.float32,0x4242c75c,0x354bbe8a,2
+np.float32,0xc242c75c,0x354bbe8a,2
+np.float32,0x42c2c75c,0xbf800000,2
+np.float32,0xc2c2c75c,0xbf800000,2
+np.float32,0x4342c75c,0x3f800000,2
+np.float32,0xc342c75c,0x3f800000,2
+np.float32,0x4245eb9c,0x3f350517,2
+np.float32,0xc245eb9c,0x3f350517,2
+np.float32,0x42c5eb9c,0x36c8671d,2
+np.float32,0xc2c5eb9c,0x36c8671d,2
+np.float32,0x4345eb9c,0xbf800000,2
+np.float32,0xc345eb9c,0xbf800000,2
+np.float32,0x42490fdb,0x3f800000,2
+np.float32,0xc2490fdb,0x3f800000,2
+np.float32,0x42c90fdb,0x3f800000,2
+np.float32,0xc2c90fdb,0x3f800000,2
+np.float32,0x43490fdb,0x3f800000,2
+np.float32,0xc3490fdb,0x3f800000,2
+np.float32,0x424c341a,0x3f3504f5,2
+np.float32,0xc24c341a,0x3f3504f5,2
+np.float32,0x42cc341a,0x34ca9ee6,2
+np.float32,0xc2cc341a,0x34ca9ee6,2
+np.float32,0x434c341a,0xbf800000,2
+np.float32,0xc34c341a,0xbf800000,2
+np.float32,0x424f585a,0xb608cd8c,2
+np.float32,0xc24f585a,0xb608cd8c,2
+np.float32,0x42cf585a,0xbf800000,2
+np.float32,0xc2cf585a,0xbf800000,2
+np.float32,0x434f585a,0x3f800000,2
+np.float32,0xc34f585a,0x3f800000,2
+np.float32,0x42527c99,0xbf3504f9,2
+np.float32,0xc2527c99,0xbf3504f9,2
+np.float32,0x42d27c99,0x35722833,2
+np.float32,0xc2d27c99,0x35722833,2
+np.float32,0x43527c99,0xbf800000,2
+np.float32,0xc3527c99,0xbf800000,2
+np.float32,0x4255a0d9,0xbf800000,2
+np.float32,0xc255a0d9,0xbf800000,2
+np.float32,0x42d5a0d9,0x3f800000,2
+np.float32,0xc2d5a0d9,0x3f800000,2
+np.float32,0x4355a0d9,0x3f800000,2
+np.float32,0xc355a0d9,0x3f800000,2
+np.float32,0x4258c518,0xbf3504e6,2
+np.float32,0xc258c518,0xbf3504e6,2
+np.float32,0x42d8c518,0xb61267f6,2
+np.float32,0xc2d8c518,0xb61267f6,2
+np.float32,0x4358c518,0xbf800000,2
+np.float32,0xc358c518,0xbf800000,2
+np.float32,0x425be958,0x365eab75,2
+np.float32,0xc25be958,0x365eab75,2
+np.float32,0x42dbe958,0xbf800000,2
+np.float32,0xc2dbe958,0xbf800000,2
+np.float32,0x435be958,0x3f800000,2
+np.float32,0xc35be958,0x3f800000,2
+np.float32,0x425f0d97,0x3f350508,2
+np.float32,0xc25f0d97,0x3f350508,2
+np.float32,0x42df0d97,0x366845e0,2
+np.float32,0xc2df0d97,0x366845e0,2
+np.float32,0x435f0d97,0xbf800000,2
+np.float32,0xc35f0d97,0xbf800000,2
+np.float32,0x426231d6,0x3f800000,2
+np.float32,0xc26231d6,0x3f800000,2
+np.float32,0x42e231d6,0x3f800000,2
+np.float32,0xc2e231d6,0x3f800000,2
+np.float32,0x436231d6,0x3f800000,2
+np.float32,0xc36231d6,0x3f800000,2
+np.float32,0x42655616,0x3f3504d7,2
+np.float32,0xc2655616,0x3f3504d7,2
+np.float32,0x42e55616,0xb69f11e5,2
+np.float32,0xc2e55616,0xb69f11e5,2
+np.float32,0x43655616,0xbf800000,2
+np.float32,0xc3655616,0xbf800000,2
+np.float32,0x42687a55,0xb552257b,2
+np.float32,0xc2687a55,0xb552257b,2
+np.float32,0x42e87a55,0xbf800000,2
+np.float32,0xc2e87a55,0xbf800000,2
+np.float32,0x43687a55,0x3f800000,2
+np.float32,0xc3687a55,0x3f800000,2
+np.float32,0x426b9e95,0xbf350517,2
+np.float32,0xc26b9e95,0xbf350517,2
+np.float32,0x42eb9e95,0x36ca00d9,2
+np.float32,0xc2eb9e95,0x36ca00d9,2
+np.float32,0x436b9e95,0xbf800000,2
+np.float32,0xc36b9e95,0xbf800000,2
+np.float32,0x426ec2d4,0xbf800000,2
+np.float32,0xc26ec2d4,0xbf800000,2
+np.float32,0x42eec2d4,0x3f800000,2
+np.float32,0xc2eec2d4,0x3f800000,2
+np.float32,0x436ec2d4,0x3f800000,2
+np.float32,0xc36ec2d4,0x3f800000,2
+np.float32,0x4271e713,0xbf3504f5,2
+np.float32,0xc271e713,0xbf3504f5,2
+np.float32,0x42f1e713,0x34b10321,2
+np.float32,0xc2f1e713,0x34b10321,2
+np.float32,0x4371e713,0xbf800000,2
+np.float32,0xc371e713,0xbf800000,2
+np.float32,0x42750b53,0x360a6748,2
+np.float32,0xc2750b53,0x360a6748,2
+np.float32,0x42f50b53,0xbf800000,2
+np.float32,0xc2f50b53,0xbf800000,2
+np.float32,0x43750b53,0x3f800000,2
+np.float32,0xc3750b53,0x3f800000,2
+np.float32,0x42782f92,0x3f3504f9,2
+np.float32,0xc2782f92,0x3f3504f9,2
+np.float32,0x42f82f92,0x357ef616,2
+np.float32,0xc2f82f92,0x357ef616,2
+np.float32,0x43782f92,0xbf800000,2
+np.float32,0xc3782f92,0xbf800000,2
+np.float32,0x427b53d2,0x3f800000,2
+np.float32,0xc27b53d2,0x3f800000,2
+np.float32,0x42fb53d2,0x3f800000,2
+np.float32,0xc2fb53d2,0x3f800000,2
+np.float32,0x437b53d2,0x3f800000,2
+np.float32,0xc37b53d2,0x3f800000,2
+np.float32,0x427e7811,0x3f3504e6,2
+np.float32,0xc27e7811,0x3f3504e6,2
+np.float32,0x42fe7811,0xb6159b6f,2
+np.float32,0xc2fe7811,0xb6159b6f,2
+np.float32,0x437e7811,0xbf800000,2
+np.float32,0xc37e7811,0xbf800000,2
+np.float32,0x4280ce28,0x34fdd672,2
+np.float32,0xc280ce28,0x34fdd672,2
+np.float32,0x4300ce28,0xbf800000,2
+np.float32,0xc300ce28,0xbf800000,2
+np.float32,0x4380ce28,0x3f800000,2
+np.float32,0xc380ce28,0x3f800000,2
+np.float32,0x42826048,0xbf350508,2
+np.float32,0xc2826048,0xbf350508,2
+np.float32,0x43026048,0x366b7958,2
+np.float32,0xc3026048,0x366b7958,2
+np.float32,0x43826048,0xbf800000,2
+np.float32,0xc3826048,0xbf800000,2
+np.float32,0x4283f268,0xbf800000,2
+np.float32,0xc283f268,0xbf800000,2
+np.float32,0x4303f268,0x3f800000,2
+np.float32,0xc303f268,0x3f800000,2
+np.float32,0x4383f268,0x3f800000,2
+np.float32,0xc383f268,0x3f800000,2
+np.float32,0x42858487,0xbf350504,2
+np.float32,0xc2858487,0xbf350504,2
+np.float32,0x43058487,0x363ea8be,2
+np.float32,0xc3058487,0x363ea8be,2
+np.float32,0x43858487,0xbf800000,2
+np.float32,0xc3858487,0xbf800000,2
+np.float32,0x428716a7,0x35588c6d,2
+np.float32,0xc28716a7,0x35588c6d,2
+np.float32,0x430716a7,0xbf800000,2
+np.float32,0xc30716a7,0xbf800000,2
+np.float32,0x438716a7,0x3f800000,2
+np.float32,0xc38716a7,0x3f800000,2
+np.float32,0x4288a8c7,0x3f350517,2
+np.float32,0xc288a8c7,0x3f350517,2
+np.float32,0x4308a8c7,0x36cb9a96,2
+np.float32,0xc308a8c7,0x36cb9a96,2
+np.float32,0x4388a8c7,0xbf800000,2
+np.float32,0xc388a8c7,0xbf800000,2
+np.float32,0x428a3ae7,0x3f800000,2
+np.float32,0xc28a3ae7,0x3f800000,2
+np.float32,0x430a3ae7,0x3f800000,2
+np.float32,0xc30a3ae7,0x3f800000,2
+np.float32,0x438a3ae7,0x3f800000,2
+np.float32,0xc38a3ae7,0x3f800000,2
+np.float32,0x428bcd06,0x3f3504f5,2
+np.float32,0xc28bcd06,0x3f3504f5,2
+np.float32,0x430bcd06,0x3497675b,2
+np.float32,0xc30bcd06,0x3497675b,2
+np.float32,0x438bcd06,0xbf800000,2
+np.float32,0xc38bcd06,0xbf800000,2
+np.float32,0x428d5f26,0xb60c0105,2
+np.float32,0xc28d5f26,0xb60c0105,2
+np.float32,0x430d5f26,0xbf800000,2
+np.float32,0xc30d5f26,0xbf800000,2
+np.float32,0x438d5f26,0x3f800000,2
+np.float32,0xc38d5f26,0x3f800000,2
+np.float32,0x428ef146,0xbf350526,2
+np.float32,0xc28ef146,0xbf350526,2
+np.float32,0x430ef146,0x3710bc40,2
+np.float32,0xc30ef146,0x3710bc40,2
+np.float32,0x438ef146,0xbf800000,2
+np.float32,0xc38ef146,0xbf800000,2
+np.float32,0x42908365,0xbf800000,2
+np.float32,0xc2908365,0xbf800000,2
+np.float32,0x43108365,0x3f800000,2
+np.float32,0xc3108365,0x3f800000,2
+np.float32,0x43908365,0x3f800000,2
+np.float32,0xc3908365,0x3f800000,2
+np.float32,0x42921585,0xbf3504e6,2
+np.float32,0xc2921585,0xbf3504e6,2
+np.float32,0x43121585,0xb618cee8,2
+np.float32,0xc3121585,0xb618cee8,2
+np.float32,0x43921585,0xbf800000,2
+np.float32,0xc3921585,0xbf800000,2
+np.float32,0x4293a7a5,0x3661deee,2
+np.float32,0xc293a7a5,0x3661deee,2
+np.float32,0x4313a7a5,0xbf800000,2
+np.float32,0xc313a7a5,0xbf800000,2
+np.float32,0x4393a7a5,0x3f800000,2
+np.float32,0xc393a7a5,0x3f800000,2
+np.float32,0x429539c5,0x3f350536,2
+np.float32,0xc29539c5,0x3f350536,2
+np.float32,0x431539c5,0x373bab34,2
+np.float32,0xc31539c5,0x373bab34,2
+np.float32,0x439539c5,0xbf800000,2
+np.float32,0xc39539c5,0xbf800000,2
+np.float32,0x4296cbe4,0x3f800000,2
+np.float32,0xc296cbe4,0x3f800000,2
+np.float32,0x4316cbe4,0x3f800000,2
+np.float32,0xc316cbe4,0x3f800000,2
+np.float32,0x4396cbe4,0x3f800000,2
+np.float32,0xc396cbe4,0x3f800000,2
+np.float32,0x42985e04,0x3f3504d7,2
+np.float32,0xc2985e04,0x3f3504d7,2
+np.float32,0x43185e04,0xb6a2455d,2
+np.float32,0xc3185e04,0xb6a2455d,2
+np.float32,0x43985e04,0xbf800000,2
+np.float32,0xc3985e04,0xbf800000,2
+np.float32,0x4299f024,0xb69bde6c,2
+np.float32,0xc299f024,0xb69bde6c,2
+np.float32,0x4319f024,0xbf800000,2
+np.float32,0xc319f024,0xbf800000,2
+np.float32,0x4399f024,0x3f800000,2
+np.float32,0xc399f024,0x3f800000,2
+np.float32,0x429b8243,0xbf3504ea,2
+np.float32,0xc29b8243,0xbf3504ea,2
+np.float32,0x431b8243,0xb5cb2eb8,2
+np.float32,0xc31b8243,0xb5cb2eb8,2
+np.float32,0x439b8243,0xbf800000,2
+np.float32,0xc39b8243,0xbf800000,2
+np.float32,0x435b2047,0x3f3504c1,2
+np.float32,0x42a038a2,0xb5e4ca7e,2
+np.float32,0x432038a2,0xbf800000,2
+np.float32,0x4345eb9b,0xbf800000,2
+np.float32,0x42c5eb9b,0xb5de638c,2
+np.float32,0x42eb9e94,0xb5d7fc9b,2
+np.float32,0x4350ea79,0x3631dadb,2
+np.float32,0x42dbe957,0xbf800000,2
+np.float32,0x425be957,0xb505522a,2
+np.float32,0x435be957,0x3f800000,2
+np.float32,0x487fe5ab,0xba140185,2
+np.float32,0x497fe5ab,0x3f7fffd5,2
+np.float32,0x49ffe5ab,0x3f7fff55,2
+np.float32,0x49ffeb37,0x3b9382f5,2
+np.float32,0x497ff0c3,0x3b13049f,2
+np.float32,0x49fff0c3,0xbf7fff57,2
+np.float32,0x49fff64f,0xbb928618,2
+np.float32,0x497ffbdb,0xbf7fffd6,2
+np.float32,0x49fffbdb,0x3f7fff59,2
+np.float32,0x48fffbdb,0xba9207c6,2
+np.float32,0x4e736e56,0xbf800000,2
+np.float32,0x4d4da377,0xbf800000,2
+np.float32,0x4ece58c3,0xbf800000,2
+np.float32,0x4ee0db9c,0xbf800000,2
+np.float32,0x4dee7002,0x3f800000,2
+np.float32,0x4ee86afc,0x38857a23,2
+np.float32,0x4dca4f3f,0xbf800000,2
+np.float32,0x4ecb48af,0xb95d1e10,2
+np.float32,0x4e51e33f,0xbf800000,2
+np.float32,0x4ef5f421,0xbf800000,2
+np.float32,0x46027eb2,0x3e7d94c9,2
+np.float32,0x4477baed,0xbe7f1824,2
+np.float32,0x454b8024,0x3e7f5268,2
+np.float32,0x455d2c09,0x3e7f40cb,2
+np.float32,0x4768d3de,0xba14b4af,2
+np.float32,0x46c1e7cd,0x3e7fb102,2
+np.float32,0x44a52949,0xbe7dc9d5,2
+np.float32,0x4454633a,0x3e7dbc7d,2
+np.float32,0x4689810b,0x3e7eb02b,2
+np.float32,0x473473cd,0xbe7eef6f,2
+np.float32,0x44a5193f,0x3e7e1b1f,2
+np.float32,0x46004b36,0x3e7dac59,2
+np.float32,0x467f604b,0x3d7ffd3a,2
+np.float32,0x45ea1805,0x3dffd2e0,2
+np.float32,0x457b6af3,0x3dff7831,2
+np.float32,0x44996159,0xbe7d85f4,2
+np.float32,0x47883553,0xbb80584e,2
+np.float32,0x44e19f0c,0xbdffcfe6,2
+np.float32,0x472b3bf6,0xbe7f7a82,2
+np.float32,0x4600bb4e,0x3a135e33,2
+np.float32,0x449f4556,0x3e7e42e5,2
+np.float32,0x474e9420,0x3dff77b2,2
+np.float32,0x45cbdb23,0x3dff7240,2
+np.float32,0x44222747,0x3dffb039,2
+np.float32,0x4772e419,0xbdff74b8,2
diff --git a/numpy/core/tests/data/umath-validation-set-exp b/numpy/core/tests/data/umath-validation-set-exp
new file mode 100644
index 000000000..1b2cc9ce4
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-exp
@@ -0,0 +1,135 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x3f800000,3
+np.float32,0x007b2490,0x3f800000,3
+np.float32,0x007c99fa,0x3f800000,3
+np.float32,0x00734a0c,0x3f800000,3
+np.float32,0x0070de24,0x3f800000,3
+np.float32,0x00495d65,0x3f800000,3
+np.float32,0x006894f6,0x3f800000,3
+np.float32,0x00555a76,0x3f800000,3
+np.float32,0x004e1fb8,0x3f800000,3
+np.float32,0x00687de9,0x3f800000,3
+## -ve denormals ##
+np.float32,0x805b59af,0x3f800000,3
+np.float32,0x807ed8ed,0x3f800000,3
+np.float32,0x807142ad,0x3f800000,3
+np.float32,0x80772002,0x3f800000,3
+np.float32,0x8062abcb,0x3f800000,3
+np.float32,0x8045e31c,0x3f800000,3
+np.float32,0x805f01c2,0x3f800000,3
+np.float32,0x80506432,0x3f800000,3
+np.float32,0x8060089d,0x3f800000,3
+np.float32,0x8071292f,0x3f800000,3
+## floats that output a denormal ##
+np.float32,0xc2cf3fc1,0x00000001,3
+np.float32,0xc2c79726,0x00000021,3
+np.float32,0xc2cb295d,0x00000005,3
+np.float32,0xc2b49e6b,0x00068c4c,3
+np.float32,0xc2ca8116,0x00000008,3
+np.float32,0xc2c23f82,0x000001d7,3
+np.float32,0xc2cb69c0,0x00000005,3
+np.float32,0xc2cc1f4d,0x00000003,3
+np.float32,0xc2ae094e,0x00affc4c,3
+np.float32,0xc2c86c44,0x00000015,3
+## random floats between -87.0f and 88.0f ##
+np.float32,0x4030d7e0,0x417d9a05,3
+np.float32,0x426f60e8,0x6aa1be2c,3
+np.float32,0x41a1b220,0x4e0efc11,3
+np.float32,0xc20cc722,0x26159da7,3
+np.float32,0x41c492bc,0x512ec79d,3
+np.float32,0x40980210,0x42e73a0e,3
+np.float32,0xbf1f7b80,0x3f094de3,3
+np.float32,0x42a678a4,0x7b87a383,3
+np.float32,0xc20f3cfd,0x25a1c304,3
+np.float32,0x423ff34c,0x6216467f,3
+np.float32,0x00000000,0x3f800000,3
+## floats that cause an overflow ##
+np.float32,0x7f06d8c1,0x7f800000,3
+np.float32,0x7f451912,0x7f800000,3
+np.float32,0x7ecceac3,0x7f800000,3
+np.float32,0x7f643b45,0x7f800000,3
+np.float32,0x7e910ea0,0x7f800000,3
+np.float32,0x7eb4756b,0x7f800000,3
+np.float32,0x7f4ec708,0x7f800000,3
+np.float32,0x7f6b4551,0x7f800000,3
+np.float32,0x7d8edbda,0x7f800000,3
+np.float32,0x7f730718,0x7f800000,3
+np.float32,0x42b17217,0x7f7fff84,3
+np.float32,0x42b17218,0x7f800000,3
+np.float32,0x42b17219,0x7f800000,3
+np.float32,0xfef2b0bc,0x00000000,3
+np.float32,0xff69f83e,0x00000000,3
+np.float32,0xff4ecb12,0x00000000,3
+np.float32,0xfeac6d86,0x00000000,3
+np.float32,0xfde0cdb8,0x00000000,3
+np.float32,0xff26aef4,0x00000000,3
+np.float32,0xff6f9277,0x00000000,3
+np.float32,0xff7adfc4,0x00000000,3
+np.float32,0xff0ad40e,0x00000000,3
+np.float32,0xff6fd8f3,0x00000000,3
+np.float32,0xc2cff1b4,0x00000001,3
+np.float32,0xc2cff1b5,0x00000000,3
+np.float32,0xc2cff1b6,0x00000000,3
+np.float32,0x7f800000,0x7f800000,3
+np.float32,0xff800000,0x00000000,3
+np.float32,0x4292f27c,0x7480000a,3
+np.float32,0x42a920be,0x7c7fff94,3
+np.float32,0x41c214c9,0x50ffffd9,3
+np.float32,0x41abe686,0x4effffd9,3
+np.float32,0x4287db5a,0x707fffd3,3
+np.float32,0x41902cbb,0x4c800078,3
+np.float32,0x42609466,0x67ffffeb,3
+np.float32,0x41a65af5,0x4e7fffd1,3
+np.float32,0x417f13ff,0x4affffc9,3
+np.float32,0x426d0e6c,0x6a3504f2,3
+np.float32,0x41bc8934,0x507fff51,3
+np.float32,0x42a7bdde,0x7c0000d6,3
+np.float32,0x4120cf66,0x46b504f6,3
+np.float32,0x4244da8f,0x62ffff1a,3
+np.float32,0x41a0cf69,0x4e000034,3
+np.float32,0x41cd2bec,0x52000005,3
+np.float32,0x42893e41,0x7100009e,3
+np.float32,0x41b437e1,0x4fb50502,3
+np.float32,0x41d8430f,0x5300001d,3
+np.float32,0x4244da92,0x62ffffda,3
+np.float32,0x41a0cf63,0x4dffffa9,3
+np.float32,0x3eb17218,0x3fb504f3,3
+np.float32,0x428729e8,0x703504dc,3
+np.float32,0x41a0cf67,0x4e000014,3
+np.float32,0x4252b77d,0x65800011,3
+np.float32,0x41902cb9,0x4c800058,3
+np.float32,0x42a0cf67,0x79800052,3
+np.float32,0x4152b77b,0x48ffffe9,3
+np.float32,0x41265af3,0x46ffffc8,3
+np.float32,0x42187e0b,0x5affff9a,3
+np.float32,0xc0d2b77c,0x3ab504f6,3
+np.float32,0xc283b2ac,0x10000072,3
+np.float32,0xc1cff1b4,0x2cb504f5,3
+np.float32,0xc05dce9e,0x3d000000,3
+np.float32,0xc28ec9d2,0x0bfffea5,3
+np.float32,0xc23c893a,0x1d7fffde,3
+np.float32,0xc2a920c0,0x027fff6c,3
+np.float32,0xc1f9886f,0x2900002b,3
+np.float32,0xc2c42920,0x000000b5,3
+np.float32,0xc2893e41,0x0dfffec5,3
+np.float32,0xc2c4da93,0x00000080,3
+np.float32,0xc17f1401,0x3400000c,3
+np.float32,0xc1902cb6,0x327fffaf,3
+np.float32,0xc27c4e3b,0x11ffffc5,3
+np.float32,0xc268e5c5,0x157ffe9d,3
+np.float32,0xc2b4e953,0x0005a826,3
+np.float32,0xc287db5a,0x0e800016,3
+np.float32,0xc207db5a,0x2700000b,3
+np.float32,0xc2b2d4fe,0x000ffff1,3
+np.float32,0xc268e5c0,0x157fffdd,3
+np.float32,0xc22920bd,0x2100003b,3
+np.float32,0xc2902caf,0x0b80011e,3
+np.float32,0xc1902cba,0x327fff2f,3
+np.float32,0xc2ca6625,0x00000008,3
+np.float32,0xc280ece8,0x10fffeb5,3
+np.float32,0xc2918f94,0x0b0000ea,3
+np.float32,0xc29b43d5,0x077ffffc,3
+np.float32,0xc1e61ff7,0x2ab504f5,3
+np.float32,0xc2867878,0x0effff15,3
+np.float32,0xc2a2324a,0x04fffff4,3
diff --git a/numpy/core/tests/data/umath-validation-set-log b/numpy/core/tests/data/umath-validation-set-log
new file mode 100644
index 000000000..a7bd98481
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-log
@@ -0,0 +1,118 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0xc2afbc1b,4
+np.float32,0x007b2490,0xc2aec01e,4
+np.float32,0x007c99fa,0xc2aeba17,4
+np.float32,0x00734a0c,0xc2aee1dc,4
+np.float32,0x0070de24,0xc2aeecba,4
+np.float32,0x007fffff,0xc2aeac50,4
+np.float32,0x00000001,0xc2ce8ed0,4
+## -ve denormals ##
+np.float32,0x80495d65,0xffc00000,4
+np.float32,0x806894f6,0xffc00000,4
+np.float32,0x80555a76,0xffc00000,4
+np.float32,0x804e1fb8,0xffc00000,4
+np.float32,0x80687de9,0xffc00000,4
+np.float32,0x807fffff,0xffc00000,4
+np.float32,0x80000001,0xffc00000,4
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0xff800000,4
+np.float32,0x80000000,0xff800000,4
+np.float32,0x7f7fffff,0x42b17218,4
+np.float32,0x80800000,0xffc00000,4
+np.float32,0xff7fffff,0xffc00000,4
+## 1.00f + 0x00000001 ##
+np.float32,0x3f800000,0x00000000,4
+np.float32,0x3f800001,0x33ffffff,4
+np.float32,0x3f800002,0x347ffffe,4
+np.float32,0x3f7fffff,0xb3800000,4
+np.float32,0x3f7ffffe,0xb4000000,4
+np.float32,0x3f7ffffd,0xb4400001,4
+np.float32,0x402df853,0x3f7ffffe,4
+np.float32,0x402df854,0x3f7fffff,4
+np.float32,0x402df855,0x3f800000,4
+np.float32,0x402df856,0x3f800001,4
+np.float32,0x3ebc5ab0,0xbf800001,4
+np.float32,0x3ebc5ab1,0xbf800000,4
+np.float32,0x3ebc5ab2,0xbf800000,4
+np.float32,0x3ebc5ab3,0xbf7ffffe,4
+np.float32,0x423ef575,0x407768ab,4
+np.float32,0x427b8c61,0x408485dd,4
+np.float32,0x4211e9ee,0x406630b0,4
+np.float32,0x424d5c41,0x407c0fed,4
+np.float32,0x42be722a,0x4091cc91,4
+np.float32,0x42b73d30,0x4090908b,4
+np.float32,0x427e48e2,0x4084de7f,4
+np.float32,0x428f759b,0x4088bba3,4
+np.float32,0x41629069,0x4029a0cc,4
+np.float32,0x4272c99d,0x40836379,4
+np.float32,0x4d1b7458,0x4197463d,4
+np.float32,0x4f10c594,0x41ace2b2,4
+np.float32,0x4ea397c2,0x41a85171,4
+np.float32,0x4fefa9d1,0x41b6769c,4
+np.float32,0x4ebac6ab,0x41a960dc,4
+np.float32,0x4f6efb42,0x41b0e535,4
+np.float32,0x4e9ab8e7,0x41a7df44,4
+np.float32,0x4e81b5d1,0x41a67625,4
+np.float32,0x5014d9f2,0x41b832bd,4
+np.float32,0x4f02175c,0x41ac07b8,4
+np.float32,0x7f034f89,0x42b01c47,4
+np.float32,0x7f56d00e,0x42b11849,4
+np.float32,0x7f1cd5f6,0x42b0773a,4
+np.float32,0x7e979174,0x42af02d7,4
+np.float32,0x7f23369f,0x42b08ba2,4
+np.float32,0x7f0637ae,0x42b0277d,4
+np.float32,0x7efcb6e8,0x42b00897,4
+np.float32,0x7f7907c8,0x42b163f6,4
+np.float32,0x7e95c4c2,0x42aefcba,4
+np.float32,0x7f4577b2,0x42b0ed2d,4
+np.float32,0x3f49c92e,0xbe73ae84,4
+np.float32,0x3f4a23d1,0xbe71e2f8,4
+np.float32,0x3f4abb67,0xbe6ee430,4
+np.float32,0x3f48169a,0xbe7c5532,4
+np.float32,0x3f47f5fa,0xbe7cfc37,4
+np.float32,0x3f488309,0xbe7a2ad8,4
+np.float32,0x3f479df4,0xbe7ebf5f,4
+np.float32,0x3f47cfff,0xbe7dbec9,4
+np.float32,0x3f496704,0xbe75a125,4
+np.float32,0x3f478ee8,0xbe7f0c92,4
+np.float32,0x3f4a763b,0xbe7041ce,4
+np.float32,0x3f47a108,0xbe7eaf94,4
+np.float32,0x3f48136c,0xbe7c6578,4
+np.float32,0x3f481c17,0xbe7c391c,4
+np.float32,0x3f47cd28,0xbe7dcd56,4
+np.float32,0x3f478be8,0xbe7f1bf7,4
+np.float32,0x3f4c1f8e,0xbe67e367,4
+np.float32,0x3f489b0c,0xbe79b03f,4
+np.float32,0x3f4934cf,0xbe76a08a,4
+np.float32,0x3f4954df,0xbe75fd6a,4
+np.float32,0x3f47a3f5,0xbe7ea093,4
+np.float32,0x3f4ba4fc,0xbe6a4b02,4
+np.float32,0x3f47a0e1,0xbe7eb05c,4
+np.float32,0x3f48c30a,0xbe78e42f,4
+np.float32,0x3f48cab8,0xbe78bd05,4
+np.float32,0x3f4b0569,0xbe6d6ea4,4
+np.float32,0x3f47de32,0xbe7d7607,4
+np.float32,0x3f477328,0xbe7f9b00,4
+np.float32,0x3f496dab,0xbe757f52,4
+np.float32,0x3f47662c,0xbe7fddac,4
+np.float32,0x3f48ddd8,0xbe785b80,4
+np.float32,0x3f481866,0xbe7c4bff,4
+np.float32,0x3f48b119,0xbe793fb6,4
+np.float32,0x3f48c7e8,0xbe78cb5c,4
+np.float32,0x3f4985f6,0xbe7503da,4
+np.float32,0x3f483fdf,0xbe7b8212,4
+np.float32,0x3f4b1c76,0xbe6cfa67,4
+np.float32,0x3f480b2e,0xbe7c8fa8,4
+np.float32,0x3f48745f,0xbe7a75bf,4
+np.float32,0x3f485bda,0xbe7af308,4
+np.float32,0x3f47a660,0xbe7e942c,4
+np.float32,0x3f47d4d5,0xbe7da600,4
+np.float32,0x3f4b0a26,0xbe6d56be,4
+np.float32,0x3f4a4883,0xbe712924,4
+np.float32,0x3f4769e7,0xbe7fca84,4
+np.float32,0x3f499702,0xbe74ad3f,4
+np.float32,0x3f494ab1,0xbe763131,4
+np.float32,0x3f476b69,0xbe7fc2c6,4
+np.float32,0x3f4884e8,0xbe7a214a,4
+np.float32,0x3f486945,0xbe7aae76,4
diff --git a/numpy/core/tests/data/umath-validation-set-sin b/numpy/core/tests/data/umath-validation-set-sin
new file mode 100644
index 000000000..a56273195
--- /dev/null
+++ b/numpy/core/tests/data/umath-validation-set-sin
@@ -0,0 +1,707 @@
+dtype,input,output,ulperrortol
+## +ve denormals ##
+np.float32,0x004b4716,0x004b4716,2
+np.float32,0x007b2490,0x007b2490,2
+np.float32,0x007c99fa,0x007c99fa,2
+np.float32,0x00734a0c,0x00734a0c,2
+np.float32,0x0070de24,0x0070de24,2
+np.float32,0x007fffff,0x007fffff,2
+np.float32,0x00000001,0x00000001,2
+## -ve denormals ##
+np.float32,0x80495d65,0x80495d65,2
+np.float32,0x806894f6,0x806894f6,2
+np.float32,0x80555a76,0x80555a76,2
+np.float32,0x804e1fb8,0x804e1fb8,2
+np.float32,0x80687de9,0x80687de9,2
+np.float32,0x807fffff,0x807fffff,2
+np.float32,0x80000001,0x80000001,2
+## +/-0.0f, +/-FLT_MIN +/-FLT_MAX ##
+np.float32,0x00000000,0x00000000,2
+np.float32,0x80000000,0x80000000,2
+np.float32,0x00800000,0x00800000,2
+np.float32,0x7f7fffff,0xbf0599b3,2
+np.float32,0x80800000,0x80800000,2
+np.float32,0xff7fffff,0x3f0599b3,2
+## 1.00f ##
+np.float32,0x3f800000,0x3f576aa4,2
+np.float32,0x3f800001,0x3f576aa6,2
+np.float32,0x3f800002,0x3f576aa7,2
+np.float32,0xc090a8b0,0x3f7b4e48,2
+np.float32,0x41ce3184,0x3f192d43,2
+np.float32,0xc1d85848,0xbf7161cb,2
+np.float32,0x402b8820,0x3ee3f29f,2
+np.float32,0x42b4e454,0x3f1d0151,2
+np.float32,0x42a67a60,0x3f7ffa4c,2
+np.float32,0x41d92388,0x3f67beef,2
+np.float32,0x422dd66c,0xbeffb0c1,2
+np.float32,0xc28f5be6,0xbf0bae79,2
+np.float32,0x41ab2674,0x3f0ffe2b,2
+np.float32,0xd0102756,0x3f227e8a,2
+np.float32,0xcf99405e,0x3f73ad00,2
+np.float32,0xcfd83a12,0xbf7151a7,2
+np.float32,0x4fb54db0,0xbe46354b,2
+np.float32,0xcfcca29d,0xbe9345e6,2
+np.float32,0xceec2ac0,0x3e98dc89,2
+np.float32,0xcfdca97f,0xbf60b2b4,2
+np.float32,0xcfe92b0a,0xbf222705,2
+np.float32,0x5014b0eb,0x3f63e75c,2
+np.float32,0xcfa7ee96,0x3f62ada4,2
+np.float32,0x754c09a0,0xbf617056,2
+np.float32,0x77a731fb,0x3f44472b,2
+np.float32,0x76de2494,0xbe680739,2
+np.float32,0xf74920dc,0xbf193338,2
+np.float32,0x7707a312,0xbf6f51b1,2
+np.float32,0x75bf9790,0xbd0f1a47,2
+np.float32,0xf4ca7c40,0xbf7d45e7,2
+np.float32,0x77e91899,0x3f767181,2
+np.float32,0xf74c9820,0xbd685b75,2
+np.float32,0x7785ca29,0x3f78ee61,2
+np.float32,0x3f490fdb,0x3f3504f3,2
+np.float32,0xbf490fdb,0xbf3504f3,2
+np.float32,0x3fc90fdb,0x3f800000,2
+np.float32,0xbfc90fdb,0xbf800000,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x3fc90fdb,0x3f800000,2
+np.float32,0xbfc90fdb,0xbf800000,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x4016cbe4,0x3f3504f3,2
+np.float32,0xc016cbe4,0xbf3504f3,2
+np.float32,0x4096cbe4,0xbf800000,2
+np.float32,0xc096cbe4,0x3f800000,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x40490fdb,0xb3bbbd2e,2
+np.float32,0xc0490fdb,0x33bbbd2e,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x407b53d2,0xbf3504f5,2
+np.float32,0xc07b53d2,0x3f3504f5,2
+np.float32,0x40fb53d2,0x3f800000,2
+np.float32,0xc0fb53d2,0xbf800000,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x4096cbe4,0xbf800000,2
+np.float32,0xc096cbe4,0x3f800000,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x40afede0,0xbf3504ef,2
+np.float32,0xc0afede0,0x3f3504ef,2
+np.float32,0x412fede0,0xbf800000,2
+np.float32,0xc12fede0,0x3f800000,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x40c90fdb,0x343bbd2e,2
+np.float32,0xc0c90fdb,0xb43bbd2e,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x40e231d6,0x3f3504f3,2
+np.float32,0xc0e231d6,0xbf3504f3,2
+np.float32,0x416231d6,0x3f800000,2
+np.float32,0xc16231d6,0xbf800000,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x40fb53d2,0x3f800000,2
+np.float32,0xc0fb53d2,0xbf800000,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x410a3ae7,0x3f3504eb,2
+np.float32,0xc10a3ae7,0xbf3504eb,2
+np.float32,0x418a3ae7,0xbf800000,2
+np.float32,0xc18a3ae7,0x3f800000,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x4116cbe4,0xb2ccde2e,2
+np.float32,0xc116cbe4,0x32ccde2e,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x41235ce2,0xbf3504f7,2
+np.float32,0xc1235ce2,0x3f3504f7,2
+np.float32,0x41a35ce2,0x3f800000,2
+np.float32,0xc1a35ce2,0xbf800000,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x412fede0,0xbf800000,2
+np.float32,0xc12fede0,0x3f800000,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x413c7edd,0xbf3504f3,2
+np.float32,0xc13c7edd,0x3f3504f3,2
+np.float32,0x41bc7edd,0xbf800000,2
+np.float32,0xc1bc7edd,0x3f800000,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x41490fdb,0x34bbbd2e,2
+np.float32,0xc1490fdb,0xb4bbbd2e,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x4155a0d9,0x3f3504fb,2
+np.float32,0xc155a0d9,0xbf3504fb,2
+np.float32,0x41d5a0d9,0x3f800000,2
+np.float32,0xc1d5a0d9,0xbf800000,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x416231d6,0x3f800000,2
+np.float32,0xc16231d6,0xbf800000,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x416ec2d4,0x3f3504ef,2
+np.float32,0xc16ec2d4,0xbf3504ef,2
+np.float32,0x41eec2d4,0xbf800000,2
+np.float32,0xc1eec2d4,0x3f800000,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x417b53d2,0xb535563d,2
+np.float32,0xc17b53d2,0x3535563d,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x4183f268,0xbf3504ff,2
+np.float32,0xc183f268,0x3f3504ff,2
+np.float32,0x4203f268,0x3f800000,2
+np.float32,0xc203f268,0xbf800000,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x418a3ae7,0xbf800000,2
+np.float32,0xc18a3ae7,0x3f800000,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x41908365,0xbf3504f6,2
+np.float32,0xc1908365,0x3f3504f6,2
+np.float32,0x42108365,0xbf800000,2
+np.float32,0xc2108365,0x3f800000,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x4196cbe4,0x334cde2e,2
+np.float32,0xc196cbe4,0xb34cde2e,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x419d1463,0x3f3504f8,2
+np.float32,0xc19d1463,0xbf3504f8,2
+np.float32,0x421d1463,0x3f800000,2
+np.float32,0xc21d1463,0xbf800000,2
+np.float32,0x429d1463,0xb5c55799,2
+np.float32,0xc29d1463,0x35c55799,2
+np.float32,0x41a35ce2,0x3f800000,2
+np.float32,0xc1a35ce2,0xbf800000,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x42a35ce2,0x363889b6,2
+np.float32,0xc2a35ce2,0xb63889b6,2
+np.float32,0x41a9a561,0x3f3504e7,2
+np.float32,0xc1a9a561,0xbf3504e7,2
+np.float32,0x4229a561,0xbf800000,2
+np.float32,0xc229a561,0x3f800000,2
+np.float32,0x42a9a561,0xb68733d0,2
+np.float32,0xc2a9a561,0x368733d0,2
+np.float32,0x41afede0,0xb5b222c4,2
+np.float32,0xc1afede0,0x35b222c4,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x42afede0,0x36b222c4,2
+np.float32,0xc2afede0,0xb6b222c4,2
+np.float32,0x41b6365e,0xbf3504f0,2
+np.float32,0xc1b6365e,0x3f3504f0,2
+np.float32,0x4236365e,0x3f800000,2
+np.float32,0xc236365e,0xbf800000,2
+np.float32,0x42b6365e,0x358bb91c,2
+np.float32,0xc2b6365e,0xb58bb91c,2
+np.float32,0x41bc7edd,0xbf800000,2
+np.float32,0xc1bc7edd,0x3f800000,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x42bc7edd,0x34800add,2
+np.float32,0xc2bc7edd,0xb4800add,2
+np.float32,0x41c2c75c,0xbf3504ef,2
+np.float32,0xc1c2c75c,0x3f3504ef,2
+np.float32,0x4242c75c,0xbf800000,2
+np.float32,0xc242c75c,0x3f800000,2
+np.float32,0x42c2c75c,0xb5cbbe8a,2
+np.float32,0xc2c2c75c,0x35cbbe8a,2
+np.float32,0x41c90fdb,0x353bbd2e,2
+np.float32,0xc1c90fdb,0xb53bbd2e,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x42c90fdb,0x363bbd2e,2
+np.float32,0xc2c90fdb,0xb63bbd2e,2
+np.float32,0x41cf585a,0x3f3504ff,2
+np.float32,0xc1cf585a,0xbf3504ff,2
+np.float32,0x424f585a,0x3f800000,2
+np.float32,0xc24f585a,0xbf800000,2
+np.float32,0x42cf585a,0xb688cd8c,2
+np.float32,0xc2cf585a,0x3688cd8c,2
+np.float32,0x41d5a0d9,0x3f800000,2
+np.float32,0xc1d5a0d9,0xbf800000,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x42d5a0d9,0x36b3bc81,2
+np.float32,0xc2d5a0d9,0xb6b3bc81,2
+np.float32,0x41dbe958,0x3f3504e0,2
+np.float32,0xc1dbe958,0xbf3504e0,2
+np.float32,0x425be958,0xbf800000,2
+np.float32,0xc25be958,0x3f800000,2
+np.float32,0x42dbe958,0xb6deab75,2
+np.float32,0xc2dbe958,0x36deab75,2
+np.float32,0x41e231d6,0xb399a6a2,2
+np.float32,0xc1e231d6,0x3399a6a2,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x42e231d6,0x3499a6a2,2
+np.float32,0xc2e231d6,0xb499a6a2,2
+np.float32,0x41e87a55,0xbf3504f8,2
+np.float32,0xc1e87a55,0x3f3504f8,2
+np.float32,0x42687a55,0x3f800000,2
+np.float32,0xc2687a55,0xbf800000,2
+np.float32,0x42e87a55,0xb5d2257b,2
+np.float32,0xc2e87a55,0x35d2257b,2
+np.float32,0x41eec2d4,0xbf800000,2
+np.float32,0xc1eec2d4,0x3f800000,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x42eec2d4,0x363ef0a7,2
+np.float32,0xc2eec2d4,0xb63ef0a7,2
+np.float32,0x41f50b53,0xbf3504e7,2
+np.float32,0xc1f50b53,0x3f3504e7,2
+np.float32,0x42750b53,0xbf800000,2
+np.float32,0xc2750b53,0x3f800000,2
+np.float32,0x42f50b53,0xb68a6748,2
+np.float32,0xc2f50b53,0x368a6748,2
+np.float32,0x41fb53d2,0x35b5563d,2
+np.float32,0xc1fb53d2,0xb5b5563d,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x42fb53d2,0x36b5563d,2
+np.float32,0xc2fb53d2,0xb6b5563d,2
+np.float32,0x4200ce28,0x3f3504f0,2
+np.float32,0xc200ce28,0xbf3504f0,2
+np.float32,0x4280ce28,0x3f800000,2
+np.float32,0xc280ce28,0xbf800000,2
+np.float32,0x4300ce28,0x357dd672,2
+np.float32,0xc300ce28,0xb57dd672,2
+np.float32,0x4203f268,0x3f800000,2
+np.float32,0xc203f268,0xbf800000,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x4303f268,0x37059a13,2
+np.float32,0xc303f268,0xb7059a13,2
+np.float32,0x420716a7,0x3f3504ee,2
+np.float32,0xc20716a7,0xbf3504ee,2
+np.float32,0x428716a7,0xbf800000,2
+np.float32,0xc28716a7,0x3f800000,2
+np.float32,0x430716a7,0xb5d88c6d,2
+np.float32,0xc30716a7,0x35d88c6d,2
+np.float32,0x420a3ae7,0xb6308908,2
+np.float32,0xc20a3ae7,0x36308908,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x430a3ae7,0x37308908,2
+np.float32,0xc30a3ae7,0xb7308908,2
+np.float32,0x420d5f26,0xbf350500,2
+np.float32,0xc20d5f26,0x3f350500,2
+np.float32,0x428d5f26,0x3f800000,2
+np.float32,0xc28d5f26,0xbf800000,2
+np.float32,0x430d5f26,0xb68c0105,2
+np.float32,0xc30d5f26,0x368c0105,2
+np.float32,0x42108365,0xbf800000,2
+np.float32,0xc2108365,0x3f800000,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x43108365,0xb612200d,2
+np.float32,0xc3108365,0x3612200d,2
+np.float32,0x4213a7a5,0xbf3504df,2
+np.float32,0xc213a7a5,0x3f3504df,2
+np.float32,0x4293a7a5,0xbf800000,2
+np.float32,0xc293a7a5,0x3f800000,2
+np.float32,0x4313a7a5,0xb6e1deee,2
+np.float32,0xc313a7a5,0x36e1deee,2
+np.float32,0x4216cbe4,0x33ccde2e,2
+np.float32,0xc216cbe4,0xb3ccde2e,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x4316cbe4,0x34ccde2e,2
+np.float32,0xc316cbe4,0xb4ccde2e,2
+np.float32,0x4219f024,0x3f35050f,2
+np.float32,0xc219f024,0xbf35050f,2
+np.float32,0x4299f024,0x3f800000,2
+np.float32,0xc299f024,0xbf800000,2
+np.float32,0x4319f024,0xb71bde6c,2
+np.float32,0xc319f024,0x371bde6c,2
+np.float32,0x421d1463,0x3f800000,2
+np.float32,0xc21d1463,0xbf800000,2
+np.float32,0x429d1463,0xb5c55799,2
+np.float32,0xc29d1463,0x35c55799,2
+np.float32,0x431d1463,0x36455799,2
+np.float32,0xc31d1463,0xb6455799,2
+np.float32,0x422038a3,0x3f3504d0,2
+np.float32,0xc22038a3,0xbf3504d0,2
+np.float32,0x42a038a3,0xbf800000,2
+np.float32,0xc2a038a3,0x3f800000,2
+np.float32,0x432038a3,0xb746cd61,2
+np.float32,0xc32038a3,0x3746cd61,2
+np.float32,0x42235ce2,0xb5b889b6,2
+np.float32,0xc2235ce2,0x35b889b6,2
+np.float32,0x42a35ce2,0x363889b6,2
+np.float32,0xc2a35ce2,0xb63889b6,2
+np.float32,0x43235ce2,0x36b889b6,2
+np.float32,0xc3235ce2,0xb6b889b6,2
+np.float32,0x42268121,0xbf3504f1,2
+np.float32,0xc2268121,0x3f3504f1,2
+np.float32,0x42a68121,0x3f800000,2
+np.float32,0xc2a68121,0xbf800000,2
+np.float32,0x43268121,0x35643aac,2
+np.float32,0xc3268121,0xb5643aac,2
+np.float32,0x4229a561,0xbf800000,2
+np.float32,0xc229a561,0x3f800000,2
+np.float32,0x42a9a561,0xb68733d0,2
+np.float32,0xc2a9a561,0x368733d0,2
+np.float32,0x4329a561,0x370733d0,2
+np.float32,0xc329a561,0xb70733d0,2
+np.float32,0x422cc9a0,0xbf3504ee,2
+np.float32,0xc22cc9a0,0x3f3504ee,2
+np.float32,0x42acc9a0,0xbf800000,2
+np.float32,0xc2acc9a0,0x3f800000,2
+np.float32,0x432cc9a0,0xb5e55a50,2
+np.float32,0xc32cc9a0,0x35e55a50,2
+np.float32,0x422fede0,0x363222c4,2
+np.float32,0xc22fede0,0xb63222c4,2
+np.float32,0x42afede0,0x36b222c4,2
+np.float32,0xc2afede0,0xb6b222c4,2
+np.float32,0x432fede0,0x373222c4,2
+np.float32,0xc32fede0,0xb73222c4,2
+np.float32,0x4233121f,0x3f350500,2
+np.float32,0xc233121f,0xbf350500,2
+np.float32,0x42b3121f,0x3f800000,2
+np.float32,0xc2b3121f,0xbf800000,2
+np.float32,0x4333121f,0xb68f347d,2
+np.float32,0xc333121f,0x368f347d,2
+np.float32,0x4236365e,0x3f800000,2
+np.float32,0xc236365e,0xbf800000,2
+np.float32,0x42b6365e,0x358bb91c,2
+np.float32,0xc2b6365e,0xb58bb91c,2
+np.float32,0x4336365e,0xb60bb91c,2
+np.float32,0xc336365e,0x360bb91c,2
+np.float32,0x42395a9e,0x3f3504df,2
+np.float32,0xc2395a9e,0xbf3504df,2
+np.float32,0x42b95a9e,0xbf800000,2
+np.float32,0xc2b95a9e,0x3f800000,2
+np.float32,0x43395a9e,0xb6e51267,2
+np.float32,0xc3395a9e,0x36e51267,2
+np.float32,0x423c7edd,0xb4000add,2
+np.float32,0xc23c7edd,0x34000add,2
+np.float32,0x42bc7edd,0x34800add,2
+np.float32,0xc2bc7edd,0xb4800add,2
+np.float32,0x433c7edd,0x35000add,2
+np.float32,0xc33c7edd,0xb5000add,2
+np.float32,0x423fa31d,0xbf35050f,2
+np.float32,0xc23fa31d,0x3f35050f,2
+np.float32,0x42bfa31d,0x3f800000,2
+np.float32,0xc2bfa31d,0xbf800000,2
+np.float32,0x433fa31d,0xb71d7828,2
+np.float32,0xc33fa31d,0x371d7828,2
+np.float32,0x4242c75c,0xbf800000,2
+np.float32,0xc242c75c,0x3f800000,2
+np.float32,0x42c2c75c,0xb5cbbe8a,2
+np.float32,0xc2c2c75c,0x35cbbe8a,2
+np.float32,0x4342c75c,0x364bbe8a,2
+np.float32,0xc342c75c,0xb64bbe8a,2
+np.float32,0x4245eb9c,0xbf3504d0,2
+np.float32,0xc245eb9c,0x3f3504d0,2
+np.float32,0x42c5eb9c,0xbf800000,2
+np.float32,0xc2c5eb9c,0x3f800000,2
+np.float32,0x4345eb9c,0xb748671d,2
+np.float32,0xc345eb9c,0x3748671d,2
+np.float32,0x42490fdb,0x35bbbd2e,2
+np.float32,0xc2490fdb,0xb5bbbd2e,2
+np.float32,0x42c90fdb,0x363bbd2e,2
+np.float32,0xc2c90fdb,0xb63bbd2e,2
+np.float32,0x43490fdb,0x36bbbd2e,2
+np.float32,0xc3490fdb,0xb6bbbd2e,2
+np.float32,0x424c341a,0x3f3504f1,2
+np.float32,0xc24c341a,0xbf3504f1,2
+np.float32,0x42cc341a,0x3f800000,2
+np.float32,0xc2cc341a,0xbf800000,2
+np.float32,0x434c341a,0x354a9ee6,2
+np.float32,0xc34c341a,0xb54a9ee6,2
+np.float32,0x424f585a,0x3f800000,2
+np.float32,0xc24f585a,0xbf800000,2
+np.float32,0x42cf585a,0xb688cd8c,2
+np.float32,0xc2cf585a,0x3688cd8c,2
+np.float32,0x434f585a,0x3708cd8c,2
+np.float32,0xc34f585a,0xb708cd8c,2
+np.float32,0x42527c99,0x3f3504ee,2
+np.float32,0xc2527c99,0xbf3504ee,2
+np.float32,0x42d27c99,0xbf800000,2
+np.float32,0xc2d27c99,0x3f800000,2
+np.float32,0x43527c99,0xb5f22833,2
+np.float32,0xc3527c99,0x35f22833,2
+np.float32,0x4255a0d9,0xb633bc81,2
+np.float32,0xc255a0d9,0x3633bc81,2
+np.float32,0x42d5a0d9,0x36b3bc81,2
+np.float32,0xc2d5a0d9,0xb6b3bc81,2
+np.float32,0x4355a0d9,0x3733bc81,2
+np.float32,0xc355a0d9,0xb733bc81,2
+np.float32,0x4258c518,0xbf350500,2
+np.float32,0xc258c518,0x3f350500,2
+np.float32,0x42d8c518,0x3f800000,2
+np.float32,0xc2d8c518,0xbf800000,2
+np.float32,0x4358c518,0xb69267f6,2
+np.float32,0xc358c518,0x369267f6,2
+np.float32,0x425be958,0xbf800000,2
+np.float32,0xc25be958,0x3f800000,2
+np.float32,0x42dbe958,0xb6deab75,2
+np.float32,0xc2dbe958,0x36deab75,2
+np.float32,0x435be958,0x375eab75,2
+np.float32,0xc35be958,0xb75eab75,2
+np.float32,0x425f0d97,0xbf3504df,2
+np.float32,0xc25f0d97,0x3f3504df,2
+np.float32,0x42df0d97,0xbf800000,2
+np.float32,0xc2df0d97,0x3f800000,2
+np.float32,0x435f0d97,0xb6e845e0,2
+np.float32,0xc35f0d97,0x36e845e0,2
+np.float32,0x426231d6,0x3419a6a2,2
+np.float32,0xc26231d6,0xb419a6a2,2
+np.float32,0x42e231d6,0x3499a6a2,2
+np.float32,0xc2e231d6,0xb499a6a2,2
+np.float32,0x436231d6,0x3519a6a2,2
+np.float32,0xc36231d6,0xb519a6a2,2
+np.float32,0x42655616,0x3f35050f,2
+np.float32,0xc2655616,0xbf35050f,2
+np.float32,0x42e55616,0x3f800000,2
+np.float32,0xc2e55616,0xbf800000,2
+np.float32,0x43655616,0xb71f11e5,2
+np.float32,0xc3655616,0x371f11e5,2
+np.float32,0x42687a55,0x3f800000,2
+np.float32,0xc2687a55,0xbf800000,2
+np.float32,0x42e87a55,0xb5d2257b,2
+np.float32,0xc2e87a55,0x35d2257b,2
+np.float32,0x43687a55,0x3652257b,2
+np.float32,0xc3687a55,0xb652257b,2
+np.float32,0x426b9e95,0x3f3504cf,2
+np.float32,0xc26b9e95,0xbf3504cf,2
+np.float32,0x42eb9e95,0xbf800000,2
+np.float32,0xc2eb9e95,0x3f800000,2
+np.float32,0x436b9e95,0xb74a00d9,2
+np.float32,0xc36b9e95,0x374a00d9,2
+np.float32,0x426ec2d4,0xb5bef0a7,2
+np.float32,0xc26ec2d4,0x35bef0a7,2
+np.float32,0x42eec2d4,0x363ef0a7,2
+np.float32,0xc2eec2d4,0xb63ef0a7,2
+np.float32,0x436ec2d4,0x36bef0a7,2
+np.float32,0xc36ec2d4,0xb6bef0a7,2
+np.float32,0x4271e713,0xbf3504f1,2
+np.float32,0xc271e713,0x3f3504f1,2
+np.float32,0x42f1e713,0x3f800000,2
+np.float32,0xc2f1e713,0xbf800000,2
+np.float32,0x4371e713,0x35310321,2
+np.float32,0xc371e713,0xb5310321,2
+np.float32,0x42750b53,0xbf800000,2
+np.float32,0xc2750b53,0x3f800000,2
+np.float32,0x42f50b53,0xb68a6748,2
+np.float32,0xc2f50b53,0x368a6748,2
+np.float32,0x43750b53,0x370a6748,2
+np.float32,0xc3750b53,0xb70a6748,2
+np.float32,0x42782f92,0xbf3504ee,2
+np.float32,0xc2782f92,0x3f3504ee,2
+np.float32,0x42f82f92,0xbf800000,2
+np.float32,0xc2f82f92,0x3f800000,2
+np.float32,0x43782f92,0xb5fef616,2
+np.float32,0xc3782f92,0x35fef616,2
+np.float32,0x427b53d2,0x3635563d,2
+np.float32,0xc27b53d2,0xb635563d,2
+np.float32,0x42fb53d2,0x36b5563d,2
+np.float32,0xc2fb53d2,0xb6b5563d,2
+np.float32,0x437b53d2,0x3735563d,2
+np.float32,0xc37b53d2,0xb735563d,2
+np.float32,0x427e7811,0x3f350500,2
+np.float32,0xc27e7811,0xbf350500,2
+np.float32,0x42fe7811,0x3f800000,2
+np.float32,0xc2fe7811,0xbf800000,2
+np.float32,0x437e7811,0xb6959b6f,2
+np.float32,0xc37e7811,0x36959b6f,2
+np.float32,0x4280ce28,0x3f800000,2
+np.float32,0xc280ce28,0xbf800000,2
+np.float32,0x4300ce28,0x357dd672,2
+np.float32,0xc300ce28,0xb57dd672,2
+np.float32,0x4380ce28,0xb5fdd672,2
+np.float32,0xc380ce28,0x35fdd672,2
+np.float32,0x42826048,0x3f3504de,2
+np.float32,0xc2826048,0xbf3504de,2
+np.float32,0x43026048,0xbf800000,2
+np.float32,0xc3026048,0x3f800000,2
+np.float32,0x43826048,0xb6eb7958,2
+np.float32,0xc3826048,0x36eb7958,2
+np.float32,0x4283f268,0xb6859a13,2
+np.float32,0xc283f268,0x36859a13,2
+np.float32,0x4303f268,0x37059a13,2
+np.float32,0xc303f268,0xb7059a13,2
+np.float32,0x4383f268,0x37859a13,2
+np.float32,0xc383f268,0xb7859a13,2
+np.float32,0x42858487,0xbf3504e2,2
+np.float32,0xc2858487,0x3f3504e2,2
+np.float32,0x43058487,0x3f800000,2
+np.float32,0xc3058487,0xbf800000,2
+np.float32,0x43858487,0x36bea8be,2
+np.float32,0xc3858487,0xb6bea8be,2
+np.float32,0x428716a7,0xbf800000,2
+np.float32,0xc28716a7,0x3f800000,2
+np.float32,0x430716a7,0xb5d88c6d,2
+np.float32,0xc30716a7,0x35d88c6d,2
+np.float32,0x438716a7,0x36588c6d,2
+np.float32,0xc38716a7,0xb6588c6d,2
+np.float32,0x4288a8c7,0xbf3504cf,2
+np.float32,0xc288a8c7,0x3f3504cf,2
+np.float32,0x4308a8c7,0xbf800000,2
+np.float32,0xc308a8c7,0x3f800000,2
+np.float32,0x4388a8c7,0xb74b9a96,2
+np.float32,0xc388a8c7,0x374b9a96,2
+np.float32,0x428a3ae7,0x36b08908,2
+np.float32,0xc28a3ae7,0xb6b08908,2
+np.float32,0x430a3ae7,0x37308908,2
+np.float32,0xc30a3ae7,0xb7308908,2
+np.float32,0x438a3ae7,0x37b08908,2
+np.float32,0xc38a3ae7,0xb7b08908,2
+np.float32,0x428bcd06,0x3f3504f2,2
+np.float32,0xc28bcd06,0xbf3504f2,2
+np.float32,0x430bcd06,0x3f800000,2
+np.float32,0xc30bcd06,0xbf800000,2
+np.float32,0x438bcd06,0x3517675b,2
+np.float32,0xc38bcd06,0xb517675b,2
+np.float32,0x428d5f26,0x3f800000,2
+np.float32,0xc28d5f26,0xbf800000,2
+np.float32,0x430d5f26,0xb68c0105,2
+np.float32,0xc30d5f26,0x368c0105,2
+np.float32,0x438d5f26,0x370c0105,2
+np.float32,0xc38d5f26,0xb70c0105,2
+np.float32,0x428ef146,0x3f3504c0,2
+np.float32,0xc28ef146,0xbf3504c0,2
+np.float32,0x430ef146,0xbf800000,2
+np.float32,0xc30ef146,0x3f800000,2
+np.float32,0x438ef146,0xb790bc40,2
+np.float32,0xc38ef146,0x3790bc40,2
+np.float32,0x42908365,0x3592200d,2
+np.float32,0xc2908365,0xb592200d,2
+np.float32,0x43108365,0xb612200d,2
+np.float32,0xc3108365,0x3612200d,2
+np.float32,0x43908365,0xb692200d,2
+np.float32,0xc3908365,0x3692200d,2
+np.float32,0x42921585,0xbf350501,2
+np.float32,0xc2921585,0x3f350501,2
+np.float32,0x43121585,0x3f800000,2
+np.float32,0xc3121585,0xbf800000,2
+np.float32,0x43921585,0xb698cee8,2
+np.float32,0xc3921585,0x3698cee8,2
+np.float32,0x4293a7a5,0xbf800000,2
+np.float32,0xc293a7a5,0x3f800000,2
+np.float32,0x4313a7a5,0xb6e1deee,2
+np.float32,0xc313a7a5,0x36e1deee,2
+np.float32,0x4393a7a5,0x3761deee,2
+np.float32,0xc393a7a5,0xb761deee,2
+np.float32,0x429539c5,0xbf3504b1,2
+np.float32,0xc29539c5,0x3f3504b1,2
+np.float32,0x431539c5,0xbf800000,2
+np.float32,0xc31539c5,0x3f800000,2
+np.float32,0x439539c5,0xb7bbab34,2
+np.float32,0xc39539c5,0x37bbab34,2
+np.float32,0x4296cbe4,0x344cde2e,2
+np.float32,0xc296cbe4,0xb44cde2e,2
+np.float32,0x4316cbe4,0x34ccde2e,2
+np.float32,0xc316cbe4,0xb4ccde2e,2
+np.float32,0x4396cbe4,0x354cde2e,2
+np.float32,0xc396cbe4,0xb54cde2e,2
+np.float32,0x42985e04,0x3f350510,2
+np.float32,0xc2985e04,0xbf350510,2
+np.float32,0x43185e04,0x3f800000,2
+np.float32,0xc3185e04,0xbf800000,2
+np.float32,0x43985e04,0xb722455d,2
+np.float32,0xc3985e04,0x3722455d,2
+np.float32,0x4299f024,0x3f800000,2
+np.float32,0xc299f024,0xbf800000,2
+np.float32,0x4319f024,0xb71bde6c,2
+np.float32,0xc319f024,0x371bde6c,2
+np.float32,0x4399f024,0x379bde6c,2
+np.float32,0xc399f024,0xb79bde6c,2
+np.float32,0x429b8243,0x3f3504fc,2
+np.float32,0xc29b8243,0xbf3504fc,2
+np.float32,0x431b8243,0xbf800000,2
+np.float32,0xc31b8243,0x3f800000,2
+np.float32,0x439b8243,0x364b2eb8,2
+np.float32,0xc39b8243,0xb64b2eb8,2
+np.float32,0x435b2047,0xbf350525,2
+np.float32,0x42a038a2,0xbf800000,2
+np.float32,0x432038a2,0x3664ca7e,2
+np.float32,0x4345eb9b,0x365e638c,2
+np.float32,0x42c5eb9b,0xbf800000,2
+np.float32,0x42eb9e94,0xbf800000,2
+np.float32,0x4350ea79,0x3f800000,2
+np.float32,0x42dbe957,0x3585522a,2
+np.float32,0x425be957,0xbf800000,2
+np.float32,0x435be957,0xb605522a,2
+np.float32,0x487fe5ab,0xbf7ffffd,2
+np.float32,0x497fe5ab,0xbb14017d,2
+np.float32,0x49ffe5ab,0xbb940164,2
+np.float32,0x49ffeb37,0x3f7fff56,2
+np.float32,0x497ff0c3,0x3f7fffd6,2
+np.float32,0x49fff0c3,0x3b930487,2
+np.float32,0x49fff64f,0xbf7fff58,2
+np.float32,0x497ffbdb,0x3b1207c0,2
+np.float32,0x49fffbdb,0xbb9207a9,2
+np.float32,0x48fffbdb,0xbf7ffff6,2
+np.float32,0x4e736e56,0x397fa7f2,2
+np.float32,0x4d4da377,0xb57c64bc,2
+np.float32,0x4ece58c3,0xb80846c8,2
+np.float32,0x4ee0db9c,0x394c4786,2
+np.float32,0x4dee7002,0x381bce96,2
+np.float32,0x4ee86afc,0x3f800000,2
+np.float32,0x4dca4f3f,0xb8e25111,2
+np.float32,0x4ecb48af,0xbf800000,2
+np.float32,0x4e51e33f,0xb8a4fa6f,2
+np.float32,0x4ef5f421,0x387ca7df,2
+np.float32,0x476362a2,0xbd7ff911,2
+np.float32,0x464c99a4,0x3e7f4d41,2
+np.float32,0x4471f73d,0x3e7fe1b0,2
+np.float32,0x445a6752,0x3e7ef367,2
+np.float32,0x474fa400,0x3e7f9fcd,2
+np.float32,0x47c9e70e,0xbb4bba09,2
+np.float32,0x45c1e72f,0xbe7fc7af,2
+np.float32,0x4558c91d,0x3e7e9f31,2
+np.float32,0x43784f94,0xbdff6654,2
+np.float32,0x466e8500,0xbe7ea0a3,2
+np.float32,0x468e1c25,0x3e7e22fb,2
+np.float32,0x47d28adc,0xbe7d5e6b,2
+np.float32,0x44ea6cfc,0x3dff70c3,2
+np.float32,0x4605126c,0x3e7f89ef,2
+np.float32,0x4788b3c6,0xbb87d853,2
+np.float32,0x4531b042,0x3dffd163,2
+np.float32,0x47e46c29,0xbe7def2b,2
+np.float32,0x47c10e07,0xbdff63d4,2
+np.float32,0x43f1f71d,0x3dfff387,2
+np.float32,0x47c3e38c,0x3e7f0b2f,2
+np.float32,0x462c3fa5,0xbd7fe13d,2
+np.float32,0x441c5354,0xbdff76b4,2
+np.float32,0x44908b69,0x3e7dcf0d,2
+np.float32,0x478813ad,0xbe7e9d80,2
+np.float32,0x441c4351,0x3dff937b,2
diff --git a/numpy/core/tests/test__exceptions.py b/numpy/core/tests/test__exceptions.py
new file mode 100644
index 000000000..494b51f34
--- /dev/null
+++ b/numpy/core/tests/test__exceptions.py
@@ -0,0 +1,42 @@
+"""
+Tests of the ._exceptions module. Primarily for exercising the __str__ methods.
+"""
+import numpy as np
+
+_ArrayMemoryError = np.core._exceptions._ArrayMemoryError
+
+class TestArrayMemoryError:
+ def test_str(self):
+ e = _ArrayMemoryError((1023,), np.dtype(np.uint8))
+ str(e) # not crashing is enough
+
+ # testing these properties is easier than testing the full string repr
+ def test__size_to_string(self):
+ """ Test e._size_to_string """
+ f = _ArrayMemoryError._size_to_string
+ Ki = 1024
+ assert f(0) == '0 bytes'
+ assert f(1) == '1 bytes'
+ assert f(1023) == '1023 bytes'
+ assert f(Ki) == '1.00 KiB'
+ assert f(Ki+1) == '1.00 KiB'
+ assert f(10*Ki) == '10.0 KiB'
+ assert f(int(999.4*Ki)) == '999. KiB'
+ assert f(int(1023.4*Ki)) == '1023. KiB'
+ assert f(int(1023.5*Ki)) == '1.00 MiB'
+ assert f(Ki*Ki) == '1.00 MiB'
+
+ # 1023.9999 Mib should round to 1 GiB
+ assert f(int(Ki*Ki*Ki*0.9999)) == '1.00 GiB'
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki) == '1.00 EiB'
+ # larger than sys.maxsize, adding larger prefices isn't going to help
+ # anyway.
+ assert f(Ki*Ki*Ki*Ki*Ki*Ki*123456) == '123456. EiB'
+
+ def test__total_size(self):
+ """ Test e._total_size """
+ e = _ArrayMemoryError((1,), np.dtype(np.uint8))
+ assert e._total_size == 1
+
+ e = _ArrayMemoryError((2, 4), np.dtype((np.uint64, 16)))
+ assert e._total_size == 1024
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 9755e7b36..32e2ea537 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -3,8 +3,10 @@ from __future__ import division, absolute_import, print_function
import sys
import numpy as np
+import pytest
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT
+ assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
+ HAS_REFCOUNT
)
# Switch between new behaviour when NPY_RELAXED_STRIDES_CHECKING is set.
@@ -289,6 +291,14 @@ def test_array_astype():
a = np.array(1000, dtype='i4')
assert_raises(TypeError, a.astype, 'U1', casting='safe')
+@pytest.mark.parametrize("t",
+ np.sctypes['uint'] + np.sctypes['int'] + np.sctypes['float']
+)
+def test_array_astype_warning(t):
+ # test ComplexWarning when casting from complex to float or int
+ a = np.array(10, dtype=np.complex)
+ assert_warns(np.ComplexWarning, a.astype, t)
+
def test_copyto_fromscalar():
a = np.arange(6, dtype='f4').reshape(2, 3)
diff --git a/numpy/core/tests/test_arrayprint.py b/numpy/core/tests/test_arrayprint.py
index 6522c6e8a..702e68e76 100644
--- a/numpy/core/tests/test_arrayprint.py
+++ b/numpy/core/tests/test_arrayprint.py
@@ -90,6 +90,7 @@ class TestArrayRepr(object):
assert_equal(repr(x),
'sub(sub(sub(..., dtype=object), dtype=object), dtype=object)')
assert_equal(str(x), '...')
+ x[()] = 0 # resolve circular references for garbage collector
# nested 0d-subclass-object
x = sub(None)
@@ -124,11 +125,13 @@ class TestArrayRepr(object):
arr0d[()] = arr0d
assert_equal(repr(arr0d),
'array(array(..., dtype=object), dtype=object)')
+ arr0d[()] = 0 # resolve recursion for garbage collector
arr1d = np.array([None, None])
arr1d[1] = arr1d
assert_equal(repr(arr1d),
'array([None, array(..., dtype=object)], dtype=object)')
+ arr1d[1] = 0 # resolve recursion for garbage collector
first = np.array(None)
second = np.array(None)
@@ -136,6 +139,7 @@ class TestArrayRepr(object):
second[()] = first
assert_equal(repr(first),
'array(array(array(..., dtype=object), dtype=object), dtype=object)')
+ first[()] = 0 # resolve circular references for garbage collector
def test_containing_list(self):
# printing square brackets directly would be ambiguuous
@@ -258,11 +262,6 @@ class TestArray2String(object):
assert_(np.array2string(s, formatter={'numpystr':lambda s: s*2}) ==
'[abcabc defdef]')
- # check for backcompat that using FloatFormat works and emits warning
- with assert_warns(DeprecationWarning):
- fmt = np.core.arrayprint.FloatFormat(x, 9, 'maxprec', False)
- assert_equal(np.array2string(x, formatter={'float_kind': fmt}),
- '[0. 1. 2.]')
def test_structure_format(self):
dt = np.dtype([('name', np.str_, 16), ('grades', np.float64, (2,))])
@@ -842,6 +841,10 @@ class TestPrintOptions(object):
[[ 0.]]]])""")
)
+ def test_bad_args(self):
+ assert_raises(ValueError, np.set_printoptions, threshold=float('nan'))
+ assert_raises(TypeError, np.set_printoptions, threshold='1')
+ assert_raises(TypeError, np.set_printoptions, threshold=b'1')
def test_unicode_object_array():
import sys
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index 6303c043a..f99c0f72b 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pickle
import numpy
import numpy as np
@@ -8,7 +7,9 @@ import datetime
import pytest
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_warns, suppress_warnings,
+ assert_raises_regex,
)
+from numpy.compat import pickle
# Use pytz to test out various time zones if available
try:
@@ -257,6 +258,21 @@ class TestDateTime(object):
arr = np.array([dt, dt]).astype('datetime64')
assert_equal(arr.dtype, np.dtype('M8[us]'))
+ @pytest.mark.parametrize("unit", [
+ # test all date / time units and use
+ # "generic" to select generic unit
+ ("Y"), ("M"), ("W"), ("D"), ("h"), ("m"),
+ ("s"), ("ms"), ("us"), ("ns"), ("ps"),
+ ("fs"), ("as"), ("generic") ])
+ def test_timedelta_np_int_construction(self, unit):
+ # regression test for gh-7617
+ if unit != "generic":
+ assert_equal(np.timedelta64(np.int64(123), unit),
+ np.timedelta64(123, unit))
+ else:
+ assert_equal(np.timedelta64(np.int64(123)),
+ np.timedelta64(123))
+
def test_timedelta_scalar_construction(self):
# Construct with different units
assert_equal(np.timedelta64(7, 'D'),
@@ -1065,6 +1081,133 @@ class TestDateTime(object):
check(np.timedelta64(0), f, nat)
check(nat, f, nat)
+ @pytest.mark.parametrize("op1, op2, exp", [
+ # m8 same units round down
+ (np.timedelta64(7, 's'),
+ np.timedelta64(4, 's'),
+ 1),
+ # m8 same units round down with negative
+ (np.timedelta64(7, 's'),
+ np.timedelta64(-4, 's'),
+ -2),
+ # m8 same units negative no round down
+ (np.timedelta64(8, 's'),
+ np.timedelta64(-4, 's'),
+ -2),
+ # m8 different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(31, 's'),
+ 1),
+ # m8 generic units
+ (np.timedelta64(1890),
+ np.timedelta64(31),
+ 60),
+ # Y // M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64('13', 'M'),
+ 1),
+ # handle 1D arrays
+ (np.array([1, 2, 3], dtype='m8'),
+ np.array([2], dtype='m8'),
+ np.array([0, 1, 1], dtype=np.int64)),
+ ])
+ def test_timedelta_floor_divide(self, op1, op2, exp):
+ assert_equal(op1 // op2, exp)
+
+ @pytest.mark.parametrize("op1, op2", [
+ # div by 0
+ (np.timedelta64(10, 'us'),
+ np.timedelta64(0, 'us')),
+ # div with NaT
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'us')),
+ # special case for int64 min
+ # in integer floor division
+ (np.timedelta64(np.iinfo(np.int64).min),
+ np.timedelta64(-1)),
+ ])
+ def test_timedelta_floor_div_warnings(self, op1, op2):
+ with assert_warns(RuntimeWarning):
+ actual = op1 // op2
+ assert_equal(actual, 0)
+ assert_equal(actual.dtype, np.int64)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # the smallest integer that can't be represented
+ # exactly in a double should be preserved if we avoid
+ # casting to double in floordiv operation
+ (9007199254740993, 1),
+ # stress the alternate floordiv code path where
+ # operand signs don't match and remainder isn't 0
+ (9007199254740999, -2),
+ ])
+ def test_timedelta_floor_div_precision(self, val1, val2):
+ op1 = np.timedelta64(val1)
+ op2 = np.timedelta64(val2)
+ actual = op1 // op2
+ # Python reference integer floor
+ expected = val1 // val2
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for floor division operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_floor_div_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 // val2
+
+ @pytest.mark.parametrize("op1, op2", [
+ # reuse the test cases from floordiv
+ (np.timedelta64(7, 's'),
+ np.timedelta64(4, 's')),
+ # m8 same units round down with negative
+ (np.timedelta64(7, 's'),
+ np.timedelta64(-4, 's')),
+ # m8 same units negative no round down
+ (np.timedelta64(8, 's'),
+ np.timedelta64(-4, 's')),
+ # m8 different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(31, 's')),
+ # m8 generic units
+ (np.timedelta64(1890),
+ np.timedelta64(31)),
+ # Y // M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64('13', 'M')),
+ # handle 1D arrays
+ (np.array([1, 2, 3], dtype='m8'),
+ np.array([2], dtype='m8')),
+ ])
+ def test_timedelta_divmod(self, op1, op2):
+ expected = (op1 // op2, op1 % op2)
+ assert_equal(divmod(op1, op2), expected)
+
+ @pytest.mark.parametrize("op1, op2", [
+ # reuse cases from floordiv
+ # div by 0
+ (np.timedelta64(10, 'us'),
+ np.timedelta64(0, 'us')),
+ # div with NaT
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'us')),
+ # special case for int64 min
+ # in integer floor division
+ (np.timedelta64(np.iinfo(np.int64).min),
+ np.timedelta64(-1)),
+ ])
+ def test_timedelta_divmod_warnings(self, op1, op2):
+ with assert_warns(RuntimeWarning):
+ expected = (op1 // op2, op1 % op2)
+ with assert_warns(RuntimeWarning):
+ actual = divmod(op1, op2)
+ assert_equal(actual, expected)
+
def test_datetime_divide(self):
for dta, tda, tdb, tdc, tdd in \
[
@@ -1095,8 +1238,6 @@ class TestDateTime(object):
assert_equal(tda / tdd, 60.0)
assert_equal(tdd / tda, 1.0 / 60.0)
- # m8 // m8
- assert_raises(TypeError, np.floor_divide, tda, tdb)
# int / m8
assert_raises(TypeError, np.divide, 2, tdb)
# float / m8
@@ -1402,6 +1543,12 @@ class TestDateTime(object):
assert_equal(x[0].astype(np.int64), 322689600000000000)
+ # gh-13062
+ with pytest.raises(OverflowError):
+ np.datetime64(2**64, 'D')
+ with pytest.raises(OverflowError):
+ np.timedelta64(2**64, 'D')
+
def test_datetime_as_string(self):
# Check all the units with default string conversion
date = '1959-10-13'
@@ -1611,6 +1758,76 @@ class TestDateTime(object):
assert_raises(TypeError, np.arange, np.timedelta64(0, 'Y'),
np.timedelta64(5, 'D'))
+ @pytest.mark.parametrize("val1, val2, expected", [
+ # case from gh-12092
+ (np.timedelta64(7, 's'),
+ np.timedelta64(3, 's'),
+ np.timedelta64(1, 's')),
+ # negative value cases
+ (np.timedelta64(3, 's'),
+ np.timedelta64(-2, 's'),
+ np.timedelta64(-1, 's')),
+ (np.timedelta64(-3, 's'),
+ np.timedelta64(2, 's'),
+ np.timedelta64(1, 's')),
+ # larger value cases
+ (np.timedelta64(17, 's'),
+ np.timedelta64(22, 's'),
+ np.timedelta64(17, 's')),
+ (np.timedelta64(22, 's'),
+ np.timedelta64(17, 's'),
+ np.timedelta64(5, 's')),
+ # different units
+ (np.timedelta64(1, 'm'),
+ np.timedelta64(57, 's'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(1, 'us'),
+ np.timedelta64(727, 'ns'),
+ np.timedelta64(273, 'ns')),
+ # NaT is propagated
+ (np.timedelta64('NaT'),
+ np.timedelta64(50, 'ns'),
+ np.timedelta64('NaT')),
+ # Y % M works
+ (np.timedelta64(2, 'Y'),
+ np.timedelta64(22, 'M'),
+ np.timedelta64(2, 'M')),
+ ])
+ def test_timedelta_modulus(self, val1, val2, expected):
+ assert_equal(val1 % val2, expected)
+
+ @pytest.mark.parametrize("val1, val2", [
+ # years and months sometimes can't be unambiguously
+ # divided for modulus operation
+ (np.timedelta64(7, 'Y'),
+ np.timedelta64(3, 's')),
+ (np.timedelta64(7, 'M'),
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_error(self, val1, val2):
+ with assert_raises_regex(TypeError, "common metadata divisor"):
+ val1 % val2
+
+ def test_timedelta_modulus_div_by_zero(self):
+ with assert_warns(RuntimeWarning):
+ actual = np.timedelta64(10, 's') % np.timedelta64(0, 's')
+ assert_equal(actual, np.timedelta64('NaT'))
+
+ @pytest.mark.parametrize("val1, val2", [
+ # cases where one operand is not
+ # timedelta64
+ (np.timedelta64(7, 'Y'),
+ 15,),
+ (7.5,
+ np.timedelta64(1, 'D')),
+ ])
+ def test_timedelta_modulus_type_resolution(self, val1, val2):
+ # NOTE: some of the operations may be supported
+ # in the future
+ with assert_raises_regex(TypeError,
+ "'remainder' cannot use operands with types"):
+ val1 % val2
+
def test_timedelta_arange_no_dtype(self):
d = np.array(5, dtype="m8[D]")
assert_equal(np.arange(d, d + 1), d)
@@ -1991,6 +2208,27 @@ class TestDateTime(object):
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
+ def test_isfinite(self):
+ assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
+ assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
+ assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
+
+ assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
+ assert_(np.isfinite(np.timedelta64(34, "ms")))
+
+ res = np.array([True, True, False])
+ for unit in ['Y', 'M', 'W', 'D',
+ 'h', 'm', 's', 'ms', 'us',
+ 'ns', 'ps', 'fs', 'as']:
+ arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
+ assert_equal(np.isfinite(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
+ assert_equal(np.isfinite(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
+ assert_equal(np.isfinite(arr), res)
+ arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
+ assert_equal(np.isfinite(arr), res)
+
def test_corecursive_input(self):
# construct a co-recursive list
a, b = [], []
@@ -2003,6 +2241,44 @@ class TestDateTime(object):
assert_raises(RecursionError, obj_arr.astype, 'M8')
assert_raises(RecursionError, obj_arr.astype, 'm8')
+ @pytest.mark.parametrize("time_unit", [
+ "Y", "M", "W", "D", "h", "m", "s", "ms", "us", "ns", "ps", "fs", "as",
+ # compound units
+ "10D", "2M",
+ ])
+ def test_limit_symmetry(self, time_unit):
+ """
+ Dates should have symmetric limits around the unix epoch at +/-np.int64
+ """
+ epoch = np.datetime64(0, time_unit)
+ latest = np.datetime64(np.iinfo(np.int64).max, time_unit)
+ earliest = np.datetime64(-np.iinfo(np.int64).max, time_unit)
+
+ # above should not have overflowed
+ assert earliest < epoch < latest
+
+ @pytest.mark.parametrize("time_unit", [
+ "Y", "M",
+ pytest.param("W", marks=pytest.mark.xfail(reason="gh-13197")),
+ "D", "h", "m",
+ "s", "ms", "us", "ns", "ps", "fs", "as",
+ pytest.param("10D", marks=pytest.mark.xfail(reason="similar to gh-13197")),
+ ])
+ @pytest.mark.parametrize("sign", [-1, 1])
+ def test_limit_str_roundtrip(self, time_unit, sign):
+ """
+ Limits should roundtrip when converted to strings.
+
+ This tests the conversion to and from npy_datetimestruct.
+ """
+ # TODO: add absolute (gold standard) time span limit strings
+ limit = np.datetime64(np.iinfo(np.int64).max * sign, time_unit)
+
+ # Convert to string and back. Explicit unit needed since the day and
+ # week reprs are not distinguishable.
+ limit_via_str = np.datetime64(str(limit), time_unit)
+ assert limit_via_str == limit
+
class TestDateTimeData(object):
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 10ef16800..8bffaa9af 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -10,13 +10,16 @@ import sys
import operator
import warnings
import pytest
+import shutil
+import tempfile
import numpy as np
from numpy.testing import (
- assert_raises, assert_warns, assert_no_warnings, assert_array_equal,
- assert_
+ assert_raises, assert_warns, assert_, assert_array_equal
)
+from numpy.core._multiarray_tests import fromstring_null_term_c_api
+
try:
import pytz
_has_pytz = True
@@ -102,7 +105,7 @@ class _DeprecationTestCase(object):
(self.warning_cls.__name__, warning.category))
if num is not None and num_found != num:
msg = "%i warnings found but %i expected." % (len(self.log), num)
- lst = [str(w.category) for w in self.log]
+ lst = [str(w) for w in self.log]
raise AssertionError("\n".join([msg] + lst))
with warnings.catch_warnings():
@@ -150,16 +153,6 @@ class TestNonTupleNDIndexDeprecation(object):
a[[0, 1]]
-class TestRankDeprecation(_DeprecationTestCase):
- """Test that np.rank is deprecated. The function should simply be
- removed. The VisibleDeprecationWarning may become unnecessary.
- """
-
- def test(self):
- a = np.arange(10)
- assert_warns(np.VisibleDeprecationWarning, np.rank, a)
-
-
class TestComparisonDeprecations(_DeprecationTestCase):
"""This tests the deprecation, for non-element-wise comparison logic.
This used to mean that when an error occurred during element-wise comparison
@@ -282,36 +275,6 @@ class TestNonCContiguousViewDeprecation(_DeprecationTestCase):
self.assert_deprecated(np.ones((2,2)).T.view, args=(np.int8,))
-class TestInvalidOrderParameterInputForFlattenArrayDeprecation(_DeprecationTestCase):
- """Invalid arguments to the ORDER parameter in array.flatten() should not be
- allowed and should raise an error. However, in the interests of not breaking
- code that may inadvertently pass invalid arguments to this parameter, a
- DeprecationWarning will be issued instead for the time being to give developers
- time to refactor relevant code.
- """
-
- def test_flatten_array_non_string_arg(self):
- x = np.zeros((3, 5))
- self.message = ("Non-string object detected for "
- "the array ordering. Please pass "
- "in 'C', 'F', 'A', or 'K' instead")
- self.assert_deprecated(x.flatten, args=(np.pi,))
-
- def test_flatten_array_invalid_string_arg(self):
- # Tests that a DeprecationWarning is raised
- # when a string of length greater than one
- # starting with "C", "F", "A", or "K" (case-
- # and unicode-insensitive) is passed in for
- # the ORDER parameter. Otherwise, a TypeError
- # will be raised!
-
- x = np.zeros((3, 5))
- self.message = ("Non length-one string passed "
- "in for the array ordering. Please "
- "pass in 'C', 'F', 'A', or 'K' instead")
- self.assert_deprecated(x.flatten, args=("FACK",))
-
-
class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
"""Assigning the 'data' attribute of an ndarray is unsafe as pointed
out in gh-7093. Eventually, such assignment should NOT be allowed, but
@@ -330,22 +293,6 @@ class TestArrayDataAttributeAssignmentDeprecation(_DeprecationTestCase):
self.assert_deprecated(a.__setattr__, args=('data', b.data))
-class TestLinspaceInvalidNumParameter(_DeprecationTestCase):
- """Argument to the num parameter in linspace that cannot be
- safely interpreted as an integer is deprecated in 1.12.0.
-
- Argument to the num parameter in linspace that cannot be
- safely interpreted as an integer should not be allowed.
- In the interest of not breaking code that passes
- an argument that could still be interpreted as an integer, a
- DeprecationWarning will be issued for the time being to give
- developers time to refactor relevant code.
- """
- def test_float_arg(self):
- # 2016-02-25, PR#7328
- self.assert_deprecated(np.linspace, args=(0, 10, 2.5))
-
-
class TestBinaryReprInsufficientWidthParameterForRepresentation(_DeprecationTestCase):
"""
If a 'width' parameter is passed into ``binary_repr`` that is insufficient to
@@ -453,6 +400,18 @@ class TestNPY_CHAR(_DeprecationTestCase):
assert_(npy_char_deprecation() == 'S1')
+class TestPyArray_AS1D(_DeprecationTestCase):
+ def test_npy_pyarrayas1d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas1d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas1d_deprecation)
+
+
+class TestPyArray_AS2D(_DeprecationTestCase):
+ def test_npy_pyarrayas2d_deprecation(self):
+ from numpy.core._multiarray_tests import npy_pyarrayas2d_deprecation
+ assert_raises(NotImplementedError, npy_pyarrayas2d_deprecation)
+
+
class Test_UPDATEIFCOPY(_DeprecationTestCase):
"""
v1.14 deprecates creating an array with the UPDATEIFCOPY flag, use
@@ -500,6 +459,12 @@ class TestBincount(_DeprecationTestCase):
self.assert_deprecated(lambda: np.bincount([1, 2, 3], minlength=None))
+class TestAlen(_DeprecationTestCase):
+ # 2019-08-02, 1.18.0
+ def test_alen(self):
+ self.assert_deprecated(lambda: np.alen(np.array([1, 2, 3])))
+
+
class TestGeneratorSum(_DeprecationTestCase):
# 2018-02-25, 1.15.0
def test_generator_sum(self):
@@ -519,7 +484,87 @@ class TestPositiveOnNonNumerical(_DeprecationTestCase):
def test_positive_on_non_number(self):
self.assert_deprecated(operator.pos, args=(np.array('foo'),))
+
class TestFromstring(_DeprecationTestCase):
# 2017-10-19, 1.14
def test_fromstring(self):
self.assert_deprecated(np.fromstring, args=('\x00'*80,))
+
+
+class TestFromStringAndFileInvalidData(_DeprecationTestCase):
+ # 2019-06-08, 1.17.0
+ # Tests should be moved to real tests when deprecation is done.
+ message = "string or file could not be read to its end"
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_data_file(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+
+ with tempfile.TemporaryFile(mode="w") as f:
+ x.tofile(f, sep=',', format='%.2f')
+ f.write(invalid_str)
+
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=","))
+ f.seek(0)
+ self.assert_deprecated(lambda: np.fromfile(f, sep=",", count=5))
+ # Should not raise:
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ f.seek(0)
+ res = np.fromfile(f, sep=",", count=4)
+ assert_array_equal(res, x)
+
+ @pytest.mark.parametrize("invalid_str", [",invalid_data", "invalid_sep"])
+ def test_deprecate_unparsable_string(self, invalid_str):
+ x = np.array([1.51, 2, 3.51, 4], dtype=float)
+ x_str = "1.51,2,3.51,4{}".format(invalid_str)
+
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=","))
+ self.assert_deprecated(lambda: np.fromstring(x_str, sep=",", count=5))
+
+ # The C-level API can use not fixed size, but 0 terminated strings,
+ # so test that as well:
+ bytestr = x_str.encode("ascii")
+ self.assert_deprecated(lambda: fromstring_null_term_c_api(bytestr))
+
+ with assert_warns(DeprecationWarning):
+ # this is slightly strange, in that fromstring leaves data
+ # potentially uninitialized (would be good to error when all is
+ # read, but count is larger then actual data maybe).
+ res = np.fromstring(x_str, sep=",", count=5)
+ assert_array_equal(res[:-1], x)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # Should not raise:
+ res = np.fromstring(x_str, sep=",", count=4)
+ assert_array_equal(res, x)
+
+
+class Test_GetSet_NumericOps(_DeprecationTestCase):
+ # 2018-09-20, 1.16.0
+ def test_get_numeric_ops(self):
+ from numpy.core._multiarray_tests import getset_numericops
+ self.assert_deprecated(getset_numericops, num=2)
+
+ # empty kwargs prevents any state actually changing which would break
+ # other tests.
+ self.assert_deprecated(np.set_numeric_ops, kwargs={})
+ assert_raises(ValueError, np.set_numeric_ops, add='abc')
+
+
+class TestShape1Fields(_DeprecationTestCase):
+ warning_cls = FutureWarning
+
+ # 2019-05-20, 1.17.0
+ def test_shape_1_fields(self):
+ self.assert_deprecated(np.dtype, args=([('a', int, 1)],))
+
+
+class TestNonZero(_DeprecationTestCase):
+ # 2019-05-26, 1.17.0
+ def test_zerod(self):
+ self.assert_deprecated(lambda: np.nonzero(np.array(0)))
+ self.assert_deprecated(lambda: np.nonzero(np.array(1)))
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index fc4dc952a..d2fbbae5b 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -1,14 +1,17 @@
from __future__ import division, absolute_import, print_function
-import pickle
import sys
import operator
import pytest
import ctypes
+import gc
import numpy as np
from numpy.core._rational_tests import rational
-from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.testing import (
+ assert_, assert_equal, assert_array_equal, assert_raises, HAS_REFCOUNT)
+from numpy.compat import pickle
+from itertools import permutations
def assert_dtype_equal(a, b):
assert_equal(a, b)
@@ -85,6 +88,36 @@ class TestBuiltin(object):
assert_raises(TypeError, np.dtype, 'q8')
assert_raises(TypeError, np.dtype, 'Q8')
+ @pytest.mark.parametrize(
+ 'value',
+ ['m8', 'M8', 'datetime64', 'timedelta64',
+ 'i4, (2,3)f8, f4', 'a3, 3u8, (3,4)a10',
+ '>f', '<f', '=f', '|f',
+ ])
+ def test_dtype_bytes_str_equivalence(self, value):
+ bytes_value = value.encode('ascii')
+ from_bytes = np.dtype(bytes_value)
+ from_str = np.dtype(value)
+ assert_dtype_equal(from_bytes, from_str)
+
+ def test_dtype_from_bytes(self):
+ # Empty bytes object
+ assert_raises(TypeError, np.dtype, b'')
+ # Byte order indicator, but no type
+ assert_raises(TypeError, np.dtype, b'|')
+
+ # Single character with ordinal < NPY_NTYPES returns
+ # type by index into _builtin_descrs
+ assert_dtype_equal(np.dtype(bytes([0])), np.dtype('bool'))
+ assert_dtype_equal(np.dtype(bytes([17])), np.dtype(object))
+
+ # Single character where value is a valid type code
+ assert_dtype_equal(np.dtype(b'f'), np.dtype('float32'))
+
+ # Bytes with non-ascii values raise errors
+ assert_raises(TypeError, np.dtype, b'\xff')
+ assert_raises(TypeError, np.dtype, b's\xff')
+
def test_bad_param(self):
# Can't give a size that's too small
assert_raises(ValueError, np.dtype,
@@ -136,6 +169,18 @@ class TestRecord(object):
'titles': ['RRed pixel', 'Blue pixel']})
assert_dtype_not_equal(a, b)
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount_dictionary_setting(self):
+ names = ["name1"]
+ formats = ["f8"]
+ titles = ["t1"]
+ offsets = [0]
+ d = dict(names=names, formats=formats, titles=titles, offsets=offsets)
+ refcounts = {k: sys.getrefcount(i) for k, i in d.items()}
+ np.dtype(d)
+ refcounts_new = {k: sys.getrefcount(i) for k, i in d.items()}
+ assert refcounts == refcounts_new
+
def test_mutate(self):
# Mutating a dtype should reset the cached hash value
a = np.dtype([('yo', int)])
@@ -156,9 +201,9 @@ class TestRecord(object):
the dtype constructor.
"""
assert_raises(TypeError, np.dtype,
- dict(names=set(['A', 'B']), formats=['f8', 'i4']))
+ dict(names={'A', 'B'}, formats=['f8', 'i4']))
assert_raises(TypeError, np.dtype,
- dict(names=['A', 'B'], formats=set(['f8', 'i4'])))
+ dict(names=['A', 'B'], formats={'f8', 'i4'}))
def test_aligned_size(self):
# Check that structured dtypes get padded to an aligned size
@@ -213,7 +258,6 @@ class TestRecord(object):
assert_equal(dt1.descr, [('a', '|i1'), ('', '|V3'),
('b', [('f0', '<i2'), ('', '|V2'),
('f1', '<f4')], (2,))])
-
def test_union_struct(self):
# Should be able to create union dtypes
@@ -315,10 +359,91 @@ class TestRecord(object):
assert_raises(IndexError, lambda: dt[-3])
assert_raises(TypeError, operator.getitem, dt, 3.0)
- assert_raises(TypeError, operator.getitem, dt, [])
assert_equal(dt[1], dt[np.int8(1)])
+ @pytest.mark.parametrize('align_flag',[False, True])
+ def test_multifield_index(self, align_flag):
+ # indexing with a list produces subfields
+ # the align flag should be preserved
+ dt = np.dtype([
+ (('title', 'col1'), '<U20'), ('A', '<f8'), ('B', '<f8')
+ ], align=align_flag)
+
+ dt_sub = dt[['B', 'col1']]
+ assert_equal(
+ dt_sub,
+ np.dtype({
+ 'names': ['B', 'col1'],
+ 'formats': ['<f8', '<U20'],
+ 'offsets': [88, 0],
+ 'titles': [None, 'title'],
+ 'itemsize': 96
+ })
+ )
+ assert_equal(dt_sub.isalignedstruct, align_flag)
+
+ dt_sub = dt[['B']]
+ assert_equal(
+ dt_sub,
+ np.dtype({
+ 'names': ['B'],
+ 'formats': ['<f8'],
+ 'offsets': [88],
+ 'itemsize': 96
+ })
+ )
+ assert_equal(dt_sub.isalignedstruct, align_flag)
+
+ dt_sub = dt[[]]
+ assert_equal(
+ dt_sub,
+ np.dtype({
+ 'names': [],
+ 'formats': [],
+ 'offsets': [],
+ 'itemsize': 96
+ })
+ )
+ assert_equal(dt_sub.isalignedstruct, align_flag)
+
+ assert_raises(TypeError, operator.getitem, dt, ())
+ assert_raises(TypeError, operator.getitem, dt, [1, 2, 3])
+ assert_raises(TypeError, operator.getitem, dt, ['col1', 2])
+ assert_raises(KeyError, operator.getitem, dt, ['fake'])
+ assert_raises(KeyError, operator.getitem, dt, ['title'])
+ assert_raises(ValueError, operator.getitem, dt, ['col1', 'col1'])
+
+ def test_partial_dict(self):
+ # 'names' is missing
+ assert_raises(ValueError, np.dtype,
+ {'formats': ['i4', 'i4'], 'f0': ('i4', 0), 'f1':('i4', 4)})
+
+ def test_fieldless_views(self):
+ a = np.zeros(2, dtype={'names':[], 'formats':[], 'offsets':[],
+ 'itemsize':8})
+ assert_raises(ValueError, a.view, np.dtype([]))
+
+ d = np.dtype((np.dtype([]), 10))
+ assert_equal(d.shape, (10,))
+ assert_equal(d.itemsize, 0)
+ assert_equal(d.base, np.dtype([]))
+
+ arr = np.fromiter((() for i in range(10)), [])
+ assert_equal(arr.dtype, np.dtype([]))
+ assert_raises(ValueError, np.frombuffer, b'', dtype=[])
+ assert_equal(np.frombuffer(b'', dtype=[], count=2),
+ np.empty(2, dtype=[]))
+
+ assert_raises(ValueError, np.dtype, ([], 'f8'))
+ assert_raises(ValueError, np.zeros(1, dtype='i4').view, [])
+
+ assert_equal(np.zeros(2, dtype=[]) == np.zeros(2, dtype=[]),
+ np.ones(2, dtype=bool))
+
+ assert_equal(np.zeros((1, 2), dtype=[]) == a,
+ np.ones((1, 2), dtype=bool))
+
class TestSubarray(object):
def test_single_subarray(self):
@@ -352,7 +477,10 @@ class TestSubarray(object):
def test_shape_equal(self):
"""Test some data types that are equal"""
assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', tuple())))
- assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
+ # FutureWarning during deprecation period; after it is passed this
+ # should instead check that "(1)f8" == "1f8" == ("f8", 1).
+ with pytest.warns(FutureWarning):
+ assert_dtype_equal(np.dtype('f8'), np.dtype(('f8', 1)))
assert_dtype_equal(np.dtype((int, 2)), np.dtype((int, (2,))))
assert_dtype_equal(np.dtype(('<f4', (3, 2))), np.dtype(('<f4', (3, 2))))
d = ([('a', 'f4', (1, 2)), ('b', 'f8', (3, 1))], (3, 2))
@@ -441,11 +569,178 @@ class TestSubarray(object):
def test_alignment(self):
#Check that subarrays are aligned
- t1 = np.dtype('1i4', align=True)
+ t1 = np.dtype('(1,)i4', align=True)
t2 = np.dtype('2i4', align=True)
assert_equal(t1.alignment, t2.alignment)
+def iter_struct_object_dtypes():
+ """
+ Iterates over a few complex dtypes and object pattern which
+ fill the array with a given object (defaults to a singleton).
+
+ Yields
+ ------
+ dtype : dtype
+ pattern : tuple
+ Structured tuple for use with `np.array`.
+ count : int
+ Number of objects stored in the dtype.
+ singleton : object
+ A singleton object. The returned pattern is constructed so that
+ all objects inside the datatype are set to the singleton.
+ """
+ obj = object()
+
+ dt = np.dtype([('b', 'O', (2, 3))])
+ p = ([[obj] * 3] * 2,)
+ yield pytest.param(dt, p, 6, obj, id="<subarray>")
+
+ dt = np.dtype([('a', 'i4'), ('b', 'O', (2, 3))])
+ p = (0, [[obj] * 3] * 2)
+ yield pytest.param(dt, p, 6, obj, id="<subarray in field>")
+
+ dt = np.dtype([('a', 'i4'),
+ ('b', [('ba', 'O'), ('bb', 'i1')], (2, 3))])
+ p = (0, [[(obj, 0)] * 3] * 2)
+ yield pytest.param(dt, p, 6, obj, id="<structured subarray 1>")
+
+ dt = np.dtype([('a', 'i4'),
+ ('b', [('ba', 'O'), ('bb', 'O')], (2, 3))])
+ p = (0, [[(obj, obj)] * 3] * 2)
+ yield pytest.param(dt, p, 12, obj, id="<structured subarray 2>")
+
+
+@pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+class TestStructuredObjectRefcounting:
+ """These tests cover various uses of complicated structured types which
+ include objects and thus require reference counting.
+ """
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ @pytest.mark.parametrize(["creation_func", "creation_obj"], [
+ pytest.param(np.empty, None,
+ # None is probably used for too many things
+ marks=pytest.mark.skip("unreliable due to python's behaviour")),
+ (np.ones, 1),
+ (np.zeros, 0)])
+ def test_structured_object_create_delete(self, dt, pat, count, singleton,
+ creation_func, creation_obj):
+ """Structured object reference counting in creation and deletion"""
+ # The test assumes that 0, 1, and None are singletons.
+ gc.collect()
+ before = sys.getrefcount(creation_obj)
+ arr = creation_func(3, dt)
+
+ now = sys.getrefcount(creation_obj)
+ assert now - before == count * 3
+ del arr
+ now = sys.getrefcount(creation_obj)
+ assert now == before
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ def test_structured_object_item_setting(self, dt, pat, count, singleton):
+ """Structured object reference counting for simple item setting"""
+ one = 1
+
+ gc.collect()
+ before = sys.getrefcount(singleton)
+ arr = np.array([pat] * 3, dt)
+ assert sys.getrefcount(singleton) - before == count * 3
+ # Fill with `1` and check that it was replaced correctly:
+ before2 = sys.getrefcount(one)
+ arr[...] = one
+ after2 = sys.getrefcount(one)
+ assert after2 - before2 == count * 3
+ del arr
+ gc.collect()
+ assert sys.getrefcount(one) == before2
+ assert sys.getrefcount(singleton) == before
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ @pytest.mark.parametrize(
+ ['shape', 'index', 'items_changed'],
+ [((3,), ([0, 2],), 2),
+ ((3, 2), ([0, 2], slice(None)), 4),
+ ((3, 2), ([0, 2], [1]), 2),
+ ((3,), ([True, False, True]), 2)])
+ def test_structured_object_indexing(self, shape, index, items_changed,
+ dt, pat, count, singleton):
+ """Structured object reference counting for advanced indexing."""
+ zero = 0
+ one = 1
+
+ arr = np.zeros(shape, dt)
+
+ gc.collect()
+ before_zero = sys.getrefcount(zero)
+ before_one = sys.getrefcount(one)
+ # Test item getting:
+ part = arr[index]
+ after_zero = sys.getrefcount(zero)
+ assert after_zero - before_zero == count * items_changed
+ del part
+ # Test item setting:
+ arr[index] = one
+ gc.collect()
+ after_zero = sys.getrefcount(zero)
+ after_one = sys.getrefcount(one)
+ assert before_zero - after_zero == count * items_changed
+ assert after_one - before_one == count * items_changed
+
+ @pytest.mark.parametrize(['dt', 'pat', 'count', 'singleton'],
+ iter_struct_object_dtypes())
+ def test_structured_object_take_and_repeat(self, dt, pat, count, singleton):
+ """Structured object reference counting for specialized functions.
+ The older functions such as take and repeat use different code paths
+ then item setting (when writing this).
+ """
+ indices = [0, 1]
+
+ arr = np.array([pat] * 3, dt)
+ gc.collect()
+ before = sys.getrefcount(singleton)
+ res = arr.take(indices)
+ after = sys.getrefcount(singleton)
+ assert after - before == count * 2
+ new = res.repeat(10)
+ gc.collect()
+ after_repeat = sys.getrefcount(singleton)
+ assert after_repeat - after == count * 2 * 10
+
+
+class TestStructuredDtypeSparseFields(object):
+ """Tests subarray fields which contain sparse dtypes so that
+ not all memory is used by the dtype work. Such dtype's should
+ leave the underlying memory unchanged.
+ """
+ dtype = np.dtype([('a', {'names':['aa', 'ab'], 'formats':['f', 'f'],
+ 'offsets':[0, 4]}, (2, 3))])
+ sparse_dtype = np.dtype([('a', {'names':['ab'], 'formats':['f'],
+ 'offsets':[4]}, (2, 3))])
+
+ @pytest.mark.xfail(reason="inaccessible data is changed see gh-12686.")
+ @pytest.mark.valgrind_error(reason="reads from uninitialized buffers.")
+ def test_sparse_field_assignment(self):
+ arr = np.zeros(3, self.dtype)
+ sparse_arr = arr.view(self.sparse_dtype)
+
+ sparse_arr[...] = np.finfo(np.float32).max
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+ def test_sparse_field_assignment_fancy(self):
+ # Fancy assignment goes to the copyswap function for comlex types:
+ arr = np.zeros(3, self.dtype)
+ sparse_arr = arr.view(self.sparse_dtype)
+
+ sparse_arr[[0, 1, 2]] = np.finfo(np.float32).max
+ # dtype is reduced when accessing the field, so shape is (3, 2, 3):
+ assert_array_equal(arr["a"]["aa"], np.zeros((3, 2, 3)))
+
+
class TestMonsterType(object):
"""Test deeply nested subtypes."""
@@ -620,6 +915,25 @@ class TestString(object):
# Pull request #4722
np.array(["", ""]).astype(object)
+ def test_void_subclass_unsized(self):
+ dt = np.dtype(np.record)
+ assert_equal(repr(dt), "dtype('V')")
+ assert_equal(str(dt), '|V0')
+ assert_equal(dt.name, 'record')
+
+ def test_void_subclass_sized(self):
+ dt = np.dtype((np.record, 2))
+ assert_equal(repr(dt), "dtype('V2')")
+ assert_equal(str(dt), '|V2')
+ assert_equal(dt.name, 'record16')
+
+ def test_void_subclass_fields(self):
+ dt = np.dtype((np.record, [('a', '<u2')]))
+ assert_equal(repr(dt), "dtype((numpy.record, [('a', '<u2')]))")
+ assert_equal(str(dt), "(numpy.record, [('a', '<u2')])")
+ assert_equal(dt.name, 'record16')
+
+
class TestDtypeAttributeDeletion(object):
def test_dtype_non_writable_attributes_deletion(self):
@@ -649,13 +963,6 @@ class TestDtypeAttributes(object):
new_dtype = np.dtype(dtype.descr)
assert_equal(new_dtype.itemsize, 16)
- @pytest.mark.parametrize('t', np.typeDict.values())
- def test_name_builtin(self, t):
- name = t.__name__
- if name.endswith('_'):
- name = name[:-1]
- assert_equal(np.dtype(t).name, name)
-
def test_name_dtype_subclass(self):
# Ticket #4357
class user_def_subcls(np.void):
@@ -747,6 +1054,50 @@ def test_invalid_dtype_string():
assert_raises(TypeError, np.dtype, u'Fl\xfcgel')
+class TestFromDTypeAttribute(object):
+ def test_simple(self):
+ class dt:
+ dtype = "f8"
+
+ assert np.dtype(dt) == np.float64
+ assert np.dtype(dt()) == np.float64
+
+ def test_recursion(self):
+ class dt:
+ pass
+
+ dt.dtype = dt
+ with pytest.raises(RecursionError):
+ np.dtype(dt)
+
+ dt_instance = dt()
+ dt_instance.dtype = dt
+ with pytest.raises(RecursionError):
+ np.dtype(dt_instance)
+
+ def test_void_subtype(self):
+ class dt(np.void):
+ # This code path is fully untested before, so it is unclear
+ # what this should be useful for. Note that if np.void is used
+ # numpy will think we are deallocating a base type [1.17, 2019-02].
+ dtype = np.dtype("f,f")
+ pass
+
+ np.dtype(dt)
+ np.dtype(dt(1))
+
+ def test_void_subtype_recursion(self):
+ class dt(np.void):
+ pass
+
+ dt.dtype = dt
+
+ with pytest.raises(RecursionError):
+ np.dtype(dt)
+
+ with pytest.raises(RecursionError):
+ np.dtype(dt(1))
+
class TestFromCTypes(object):
@staticmethod
@@ -775,7 +1126,82 @@ class TestFromCTypes(object):
], align=True)
self.check(PaddedStruct, expected)
- @pytest.mark.xfail(reason="_pack_ is ignored - see gh-11651")
+ def test_bit_fields(self):
+ class BitfieldStruct(ctypes.Structure):
+ _fields_ = [
+ ('a', ctypes.c_uint8, 7),
+ ('b', ctypes.c_uint8, 1)
+ ]
+ assert_raises(TypeError, np.dtype, BitfieldStruct)
+ assert_raises(TypeError, np.dtype, BitfieldStruct())
+
+ def test_pointer(self):
+ p_uint8 = ctypes.POINTER(ctypes.c_uint8)
+ assert_raises(TypeError, np.dtype, p_uint8)
+
+ def test_void_pointer(self):
+ self.check(ctypes.c_void_p, np.uintp)
+
+ def test_union(self):
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b'],
+ formats=[np.uint8, np.uint16],
+ offsets=[0, 0],
+ itemsize=2
+ ))
+ self.check(Union, expected)
+
+ def test_union_with_struct_packed(self):
+ class Struct(ctypes.Structure):
+ _pack_ = 1
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+
+ class Union(ctypes.Union):
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
+ def test_union_packed(self):
+ class Struct(ctypes.Structure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ class Union(ctypes.Union):
+ _pack_ = 1
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint32),
+ ('d', Struct),
+ ]
+ expected = np.dtype(dict(
+ names=['a', 'b', 'c', 'd'],
+ formats=['u1', np.uint16, np.uint32, [('one', 'u1'), ('two', np.uint32)]],
+ offsets=[0, 0, 0, 0],
+ itemsize=ctypes.sizeof(Union)
+ ))
+ self.check(Union, expected)
+
def test_packed_structure(self):
class PackedStructure(ctypes.Structure):
_pack_ = 1
@@ -789,8 +1215,45 @@ class TestFromCTypes(object):
])
self.check(PackedStructure, expected)
- @pytest.mark.xfail(sys.byteorder != 'little',
- reason="non-native endianness does not work - see gh-10533")
+ def test_large_packed_structure(self):
+ class PackedStructure(ctypes.Structure):
+ _pack_ = 2
+ _fields_ = [
+ ('a', ctypes.c_uint8),
+ ('b', ctypes.c_uint16),
+ ('c', ctypes.c_uint8),
+ ('d', ctypes.c_uint16),
+ ('e', ctypes.c_uint32),
+ ('f', ctypes.c_uint32),
+ ('g', ctypes.c_uint8)
+ ]
+ expected = np.dtype(dict(
+ formats=[np.uint8, np.uint16, np.uint8, np.uint16, np.uint32, np.uint32, np.uint8 ],
+ offsets=[0, 2, 4, 6, 8, 12, 16],
+ names=['a', 'b', 'c', 'd', 'e', 'f', 'g'],
+ itemsize=18))
+ self.check(PackedStructure, expected)
+
+ def test_big_endian_structure_packed(self):
+ class BigEndStruct(ctypes.BigEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '>u4')])
+ self.check(BigEndStruct, expected)
+
+ def test_little_endian_structure_packed(self):
+ class LittleEndStruct(ctypes.LittleEndianStructure):
+ _fields_ = [
+ ('one', ctypes.c_uint8),
+ ('two', ctypes.c_uint32)
+ ]
+ _pack_ = 1
+ expected = np.dtype([('one', 'u1'), ('two', '<u4')])
+ self.check(LittleEndStruct, expected)
+
def test_little_endian_structure(self):
class PaddedStruct(ctypes.LittleEndianStructure):
_fields_ = [
@@ -803,8 +1266,6 @@ class TestFromCTypes(object):
], align=True)
self.check(PaddedStruct, expected)
- @pytest.mark.xfail(sys.byteorder != 'big',
- reason="non-native endianness does not work - see gh-10533")
def test_big_endian_structure(self):
class PaddedStruct(ctypes.BigEndianStructure):
_fields_ = [
@@ -816,3 +1277,24 @@ class TestFromCTypes(object):
('b', '>H')
], align=True)
self.check(PaddedStruct, expected)
+
+ def test_simple_endian_types(self):
+ self.check(ctypes.c_uint16.__ctype_le__, np.dtype('<u2'))
+ self.check(ctypes.c_uint16.__ctype_be__, np.dtype('>u2'))
+ self.check(ctypes.c_uint8.__ctype_le__, np.dtype('u1'))
+ self.check(ctypes.c_uint8.__ctype_be__, np.dtype('u1'))
+
+ all_types = set(np.typecodes['All'])
+ all_pairs = permutations(all_types, 2)
+
+ @pytest.mark.parametrize("pair", all_pairs)
+ def test_pairs(self, pair):
+ """
+ Check that np.dtype('x,y') matches [np.dtype('x'), np.dtype('y')]
+ Example: np.dtype('d,I') -> dtype([('f0', '<f8'), ('f1', '<u4')])
+ """
+ # gh-5645: check that np.dtype('i,L') can be used
+ pair_type = np.dtype('{},{}'.format(*pair))
+ expected = np.dtype([('f0', pair[0]), ('f1', pair[1])])
+ assert_equal(pair_type, expected)
+
diff --git a/numpy/core/tests/test_einsum.py b/numpy/core/tests/test_einsum.py
index 6b5b9c06e..cfeeb8a90 100644
--- a/numpy/core/tests/test_einsum.py
+++ b/numpy/core/tests/test_einsum.py
@@ -5,15 +5,13 @@ import itertools
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_raises, suppress_warnings
+ assert_raises, suppress_warnings, assert_raises_regex
)
# Setup for optimize einsum
chars = 'abcdefghij'
sizes = np.array([2, 3, 4, 5, 4, 3, 2, 6, 5, 4, 3])
-global_size_dict = {}
-for size, char in zip(sizes, chars):
- global_size_dict[char] = size
+global_size_dict = dict(zip(chars, sizes))
class TestEinsum(object):
@@ -92,6 +90,11 @@ class TestEinsum(object):
optimize=do_opt)
assert_raises(ValueError, np.einsum, "i->i", [[0, 1], [0, 1]],
out=np.arange(4).reshape(2, 2), optimize=do_opt)
+ with assert_raises_regex(ValueError, "'b'"):
+ # gh-11221 - 'c' erroneously appeared in the error message
+ a = np.ones((3, 3, 4, 5, 6))
+ b = np.ones((3, 4, 5))
+ np.einsum('aabcb,abc', a, b)
def test_einsum_views(self):
# pass-through
diff --git a/numpy/core/tests/test_errstate.py b/numpy/core/tests/test_errstate.py
index 670d485c1..0008c4cc8 100644
--- a/numpy/core/tests/test_errstate.py
+++ b/numpy/core/tests/test_errstate.py
@@ -39,3 +39,11 @@ class TestErrstate(object):
with np.errstate(call=None):
assert_(np.geterrcall() is None, 'call is not None')
assert_(np.geterrcall() is olderrcall, 'call is not olderrcall')
+
+ def test_errstate_decorator(self):
+ @np.errstate(all='ignore')
+ def foo():
+ a = -np.arange(3)
+ a // 0
+
+ foo()
diff --git a/numpy/core/tests/test_extint128.py b/numpy/core/tests/test_extint128.py
index 0e9c07fd5..7c454a603 100644
--- a/numpy/core/tests/test_extint128.py
+++ b/numpy/core/tests/test_extint128.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import sys
import itertools
import contextlib
import operator
@@ -8,7 +7,6 @@ import pytest
import numpy as np
import numpy.core._multiarray_tests as mt
-from numpy.compat import long
from numpy.testing import assert_raises, assert_equal
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index d0ff1c15f..84b60b19c 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
from numpy import (
logspace, linspace, geomspace, dtype, array, sctypes, arange, isnan,
- ndarray, sqrt, nextafter
+ ndarray, sqrt, nextafter, stack
)
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal, assert_allclose,
@@ -49,11 +49,25 @@ class TestLogspace(object):
assert_(len(y) == 50)
y = logspace(0, 6, num=100)
assert_(y[-1] == 10 ** 6)
- y = logspace(0, 6, endpoint=0)
+ y = logspace(0, 6, endpoint=False)
assert_(y[-1] < 10 ** 6)
y = logspace(0, 6, num=7)
assert_array_equal(y, [1, 10, 100, 1e3, 1e4, 1e5, 1e6])
+ def test_start_stop_array(self):
+ start = array([0., 1.])
+ stop = array([6., 7.])
+ t1 = logspace(start, stop, 6)
+ t2 = stack([logspace(_start, _stop, 6)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = logspace(start, stop[0], 6)
+ t4 = stack([logspace(_start, stop[0], 6)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = logspace(start, stop, 6, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_dtype(self):
y = logspace(0, 6, dtype='float32')
assert_equal(y.dtype, dtype('float32'))
@@ -156,7 +170,7 @@ class TestGeomspace(object):
y = geomspace(1, 1e6, dtype=complex)
assert_equal(y.dtype, dtype('complex'))
- def test_array_scalar(self):
+ def test_start_stop_array_scalar(self):
lim1 = array([120, 100], dtype="int8")
lim2 = array([-120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
@@ -172,6 +186,21 @@ class TestGeomspace(object):
assert_allclose(t2, t5, rtol=1e-2)
assert_allclose(t3, t6, rtol=1e-5)
+ def test_start_stop_array(self):
+ # Try to use all special cases.
+ start = array([1.e0, 32., 1j, -4j, 1+1j, -1])
+ stop = array([1.e4, 2., 16j, -324j, 10000+10000j, 1])
+ t1 = geomspace(start, stop, 5)
+ t2 = stack([geomspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = geomspace(start, stop[0], 5)
+ t4 = stack([geomspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = geomspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_physical_quantities(self):
a = PhysicalQuantity(1.0)
b = PhysicalQuantity(5.0)
@@ -200,17 +229,14 @@ class TestLinspace(object):
assert_(len(y) == 50)
y = linspace(2, 10, num=100)
assert_(y[-1] == 10)
- y = linspace(2, 10, endpoint=0)
+ y = linspace(2, 10, endpoint=False)
assert_(y[-1] < 10)
assert_raises(ValueError, linspace, 0, 10, num=-1)
def test_corner(self):
y = list(linspace(0, 1, 1))
assert_(y == [0.0], y)
- with suppress_warnings() as sup:
- sup.filter(DeprecationWarning, ".*safely interpreted as an integer")
- y = list(linspace(0, 1, 2.5))
- assert_(y == [0.0, 1.0])
+ assert_raises(TypeError, linspace, 0, 1, num=2.5)
def test_type(self):
t1 = linspace(0, 1, 0).dtype
@@ -227,7 +253,7 @@ class TestLinspace(object):
y = linspace(0, 6, dtype='int32')
assert_equal(y.dtype, dtype('int32'))
- def test_array_scalar(self):
+ def test_start_stop_array_scalar(self):
lim1 = array([-120, 100], dtype="int8")
lim2 = array([120, -100], dtype="int8")
lim3 = array([1200, 1000], dtype="uint16")
@@ -241,6 +267,20 @@ class TestLinspace(object):
assert_equal(t2, t5)
assert_equal(t3, t6)
+ def test_start_stop_array(self):
+ start = array([-120, 120], dtype="int8")
+ stop = array([100, -100], dtype="int8")
+ t1 = linspace(start, stop, 5)
+ t2 = stack([linspace(_start, _stop, 5)
+ for _start, _stop in zip(start, stop)], axis=1)
+ assert_equal(t1, t2)
+ t3 = linspace(start, stop[0], 5)
+ t4 = stack([linspace(_start, stop[0], 5)
+ for _start in start], axis=1)
+ assert_equal(t3, t4)
+ t5 = linspace(start, stop, 5, axis=-1)
+ assert_equal(t5, t2.T)
+
def test_complex(self):
lim1 = linspace(1 + 2j, 3 + 4j, 5)
t1 = array([1.0+2.j, 1.5+2.5j, 2.0+3j, 2.5+3.5j, 3.0+4j])
@@ -285,9 +325,7 @@ class TestLinspace(object):
@property
def __array_interface__(self):
- # Ideally should be `'shape': ()` but the current interface
- # does not allow that
- return {'shape': (1,), 'typestr': '<i4', 'data': self._data,
+ return {'shape': (), 'typestr': '<i4', 'data': self._data,
'version': 3}
def __mul__(self, other):
@@ -321,3 +359,9 @@ class TestLinspace(object):
assert_(isinstance(y, tuple) and len(y) == 2 and
len(y[0]) == num and isnan(y[1]),
'num={0}, endpoint={1}'.format(num, ept))
+
+ def test_object(self):
+ start = array(1, dtype='O')
+ stop = array(2, dtype='O')
+ y = linspace(start, stop, 3)
+ assert_array_equal(y, array([1., 1.5, 2.]))
diff --git a/numpy/core/tests/test_getlimits.py b/numpy/core/tests/test_getlimits.py
index ca8093c62..2f6648183 100644
--- a/numpy/core/tests/test_getlimits.py
+++ b/numpy/core/tests/test_getlimits.py
@@ -7,10 +7,7 @@ import numpy as np
from numpy.core import finfo, iinfo
from numpy import half, single, double, longdouble
from numpy.testing import assert_equal, assert_, assert_raises
-from numpy.core.getlimits import (
- _discovered_machar, _float16_ma, _float32_ma, _float64_ma, _float128_ma,
- _float80_ma
- )
+from numpy.core.getlimits import _discovered_machar, _float_ma
##################################################
@@ -101,9 +98,9 @@ def assert_ma_equal(discovered, ma_like):
def test_known_types():
# Test we are correctly compiling parameters for known types
- for ftype, ma_like in ((np.float16, _float16_ma),
- (np.float32, _float32_ma),
- (np.float64, _float64_ma)):
+ for ftype, ma_like in ((np.float16, _float_ma[16]),
+ (np.float32, _float_ma[32]),
+ (np.float64, _float_ma[64])):
assert_ma_equal(_discovered_machar(ftype), ma_like)
# Suppress warning for broken discovery of double double on PPC
with np.errstate(all='ignore'):
@@ -111,10 +108,10 @@ def test_known_types():
bytes = np.dtype(np.longdouble).itemsize
if (ld_ma.it, ld_ma.maxexp) == (63, 16384) and bytes in (12, 16):
# 80-bit extended precision
- assert_ma_equal(ld_ma, _float80_ma)
+ assert_ma_equal(ld_ma, _float_ma[80])
elif (ld_ma.it, ld_ma.maxexp) == (112, 16384) and bytes == 16:
# IEE 754 128-bit
- assert_ma_equal(ld_ma, _float128_ma)
+ assert_ma_equal(ld_ma, _float_ma[128])
def test_plausible_finfo():
diff --git a/numpy/core/tests/test_half.py b/numpy/core/tests/test_half.py
index d715569f8..1e1e6d7d9 100644
--- a/numpy/core/tests/test_half.py
+++ b/numpy/core/tests/test_half.py
@@ -5,7 +5,7 @@ import pytest
import numpy as np
from numpy import uint16, float16, float32, float64
-from numpy.testing import assert_, assert_equal, suppress_warnings
+from numpy.testing import assert_, assert_equal
def assert_raises_fpe(strmatch, callable, *args, **kwargs):
@@ -69,6 +69,85 @@ class TestHalf(object):
j = np.array(i_f16, dtype=int)
assert_equal(i_int, j)
+ @pytest.mark.parametrize("offset", [None, "up", "down"])
+ @pytest.mark.parametrize("shift", [None, "up", "down"])
+ @pytest.mark.parametrize("float_t", [np.float32, np.float64])
+ def test_half_conversion_rounding(self, float_t, shift, offset):
+ # Assumes that round to even is used during casting.
+ max_pattern = np.float16(np.finfo(np.float16).max).view(np.uint16)
+
+ # Test all (positive) finite numbers, denormals are most interesting
+ # however:
+ f16s_patterns = np.arange(0, max_pattern+1, dtype=np.uint16)
+ f16s_float = f16s_patterns.view(np.float16).astype(float_t)
+
+ # Shift the values by half a bit up or a down (or do not shift),
+ if shift == "up":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[1:]
+ elif shift == "down":
+ f16s_float = 0.5 * (f16s_float[:-1] + f16s_float[1:])[:-1]
+ else:
+ f16s_float = f16s_float[1:-1]
+
+ # Increase the float by a minimal value:
+ if offset == "up":
+ f16s_float = np.nextafter(f16s_float, float_t(1e50))
+ elif offset == "down":
+ f16s_float = np.nextafter(f16s_float, float_t(-1e50))
+
+ # Convert back to float16 and its bit pattern:
+ res_patterns = f16s_float.astype(np.float16).view(np.uint16)
+
+ # The above calculations tries the original values, or the exact
+ # mid points between the float16 values. It then further offsets them
+ # by as little as possible. If no offset occurs, "round to even"
+ # logic will be necessary, an arbitrarily small offset should cause
+ # normal up/down rounding always.
+
+ # Calculate the expected pattern:
+ cmp_patterns = f16s_patterns[1:-1].copy()
+
+ if shift == "down" and offset != "up":
+ shift_pattern = -1
+ elif shift == "up" and offset != "down":
+ shift_pattern = 1
+ else:
+ # There cannot be a shift, either shift is None, so all rounding
+ # will go back to original, or shift is reduced by offset too much.
+ shift_pattern = 0
+
+ # If rounding occurs, is it normal rounding or round to even?
+ if offset is None:
+ # Round to even occurs, modify only non-even, cast to allow + (-1)
+ cmp_patterns[0::2].view(np.int16)[...] += shift_pattern
+ else:
+ cmp_patterns.view(np.int16)[...] += shift_pattern
+
+ assert_equal(res_patterns, cmp_patterns)
+
+ @pytest.mark.parametrize(["float_t", "uint_t", "bits"],
+ [(np.float32, np.uint32, 23),
+ (np.float64, np.uint64, 52)])
+ def test_half_conversion_denormal_round_even(self, float_t, uint_t, bits):
+ # Test specifically that all bits are considered when deciding
+ # whether round to even should occur (i.e. no bits are lost at the
+ # end. Compare also gh-12721. The most bits can get lost for the
+ # smallest denormal:
+ smallest_value = np.uint16(1).view(np.float16).astype(float_t)
+ assert smallest_value == 2**-24
+
+ # Will be rounded to zero based on round to even rule:
+ rounded_to_zero = smallest_value / float_t(2)
+ assert rounded_to_zero.astype(np.float16) == 0
+
+ # The significand will be all 0 for the float_t, test that we do not
+ # lose the lower ones of these:
+ for i in range(bits):
+ # slightly increasing the value should make it round up:
+ larger_pattern = rounded_to_zero.view(uint_t) | uint_t(1 << i)
+ larger_value = larger_pattern.view(float_t)
+ assert larger_value.astype(np.float16) == smallest_value
+
def test_nans_infs(self):
with np.errstate(all='ignore'):
# Check some of the ufuncs
@@ -301,21 +380,19 @@ class TestHalf(object):
assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])
assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
- with suppress_warnings() as sup:
- sup.record(RuntimeWarning)
- x = np.maximum(b, c)
- assert_(np.isnan(x[3]))
- assert_equal(len(sup.log), 1)
+
+ x = np.maximum(b, c)
+ assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [0, 5, 1, 0, 6])
+
assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
- with suppress_warnings() as sup:
- sup.record(RuntimeWarning)
- x = np.minimum(b, c)
- assert_(np.isnan(x[3]))
- assert_equal(len(sup.log), 1)
+
+ x = np.minimum(b, c)
+ assert_(np.isnan(x[3]))
x[3] = 0
assert_equal(x, [-2, -1, -np.inf, 0, 3])
+
assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
diff --git a/numpy/core/tests/test_indexing.py b/numpy/core/tests/test_indexing.py
index 99792cee7..70a5a246f 100644
--- a/numpy/core/tests/test_indexing.py
+++ b/numpy/core/tests/test_indexing.py
@@ -249,6 +249,15 @@ class TestIndexing(object):
[4, 0, 6],
[0, 8, 0]])
+ def test_boolean_indexing_list(self):
+ # Regression test for #13715. It's a use-after-free bug which the
+ # test won't directly catch, but it will show up in valgrind.
+ a = np.array([1, 2, 3])
+ b = [True, False, True]
+ # Two variants of the test because the first takes a fast path
+ assert_equal(a[b], [1, 3])
+ assert_equal(a[None, b], [[1, 3]])
+
def test_reverse_strides_and_subspace_bufferinit(self):
# This tests that the strides are not reversed for simple and
# subspace fancy indexing.
@@ -608,6 +617,19 @@ class TestSubclasses(object):
assert_array_equal(s_bool, a[a > 0])
assert_array_equal(s_bool.base, a[a > 0])
+ def test_fancy_on_read_only(self):
+ # Test that fancy indexing on read-only SubClass does not make a
+ # read-only copy (gh-14132)
+ class SubClass(np.ndarray):
+ pass
+
+ a = np.arange(5)
+ s = a.view(SubClass)
+ s.flags.writeable = False
+ s_fancy = s[[0, 1, 2]]
+ assert_(s_fancy.flags.writeable)
+
+
def test_finalize_gets_full_info(self):
# Array finalize should be called on the filled array.
class SubClass(np.ndarray):
diff --git a/numpy/core/tests/test_item_selection.py b/numpy/core/tests/test_item_selection.py
index 3bc24fc95..9bd246866 100644
--- a/numpy/core/tests/test_item_selection.py
+++ b/numpy/core/tests/test_item_selection.py
@@ -79,9 +79,9 @@ class TestTake(object):
assert_array_equal(a, a_original)
def test_empty_argpartition(self):
- # In reference to github issue #6530
- a = np.array([0, 2, 4, 6, 8, 10])
- a = a.argpartition(np.array([], dtype=np.int16))
+ # In reference to github issue #6530
+ a = np.array([0, 2, 4, 6, 8, 10])
+ a = a.argpartition(np.array([], dtype=np.int16))
- b = np.array([0, 1, 2, 3, 4, 5])
- assert_array_equal(a, b)
+ b = np.array([0, 1, 2, 3, 4, 5])
+ assert_array_equal(a, b)
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index cf50d5d5c..59ac5923c 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -1,10 +1,12 @@
from __future__ import division, absolute_import, print_function
+import warnings
import pytest
import numpy as np
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_array_equal, temppath,
+ assert_, assert_equal, assert_raises, assert_warns, assert_array_equal,
+ temppath,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -70,18 +72,21 @@ def test_fromstring():
def test_fromstring_bogus():
- assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
- np.array([1., 2., 3.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
+ np.array([1., 2., 3.]))
def test_fromstring_empty():
- assert_equal(np.fromstring("xxxxx", sep="x"),
- np.array([]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("xxxxx", sep="x"),
+ np.array([]))
def test_fromstring_missing():
- assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
- np.array([1]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1xx3x4x5x6", sep="x"),
+ np.array([1]))
class TestFileBased(object):
@@ -94,7 +99,9 @@ class TestFileBased(object):
with temppath() as path:
with open(path, 'wt') as f:
f.write("1. 2. 3. flop 4.\n")
- res = np.fromfile(path, dtype=float, sep=" ")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
@pytest.mark.skipif(string_to_longdouble_inaccurate,
@@ -185,12 +192,14 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
assert_equal(a[0], f)
def test_fromstring_best_effort_float(self):
- assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
- np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=float, sep=" "),
+ np.array([1.]))
def test_fromstring_best_effort(self):
- assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
- np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1,234", dtype=np.longdouble, sep=" "),
+ np.array([1.]))
def test_fromstring_foreign(self):
s = "1.234"
@@ -203,5 +212,32 @@ class TestCommaDecimalPointLocale(CommaDecimalPointLocale):
assert_array_equal(a, b)
def test_fromstring_foreign_value(self):
- b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
- assert_array_equal(b[0], 1)
+ with assert_warns(DeprecationWarning):
+ b = np.fromstring("1,234", dtype=np.longdouble, sep=" ")
+ assert_array_equal(b[0], 1)
+
+
+@pytest.mark.parametrize("int_val", [
+ # cases discussed in gh-10723
+ # and gh-9968
+ 2 ** 1024, 0])
+def test_longdouble_from_int(int_val):
+ # for issue gh-9968
+ str_val = str(int_val)
+ # we'll expect a RuntimeWarning on platforms
+ # with np.longdouble equivalent to np.double
+ # for large integer input
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', '', RuntimeWarning)
+ # can be inf==inf on some platforms
+ assert np.longdouble(int_val) == np.longdouble(str_val)
+ # we can't directly compare the int and
+ # max longdouble value on all platforms
+ if np.allclose(np.finfo(np.longdouble).max,
+ np.finfo(np.double).max) and w:
+ assert w[0].category is RuntimeWarning
+
+@pytest.mark.parametrize("bool_val", [
+ True, False])
+def test_longdouble_from_bool(bool_val):
+ assert np.longdouble(bool_val) == np.longdouble(int(bool_val))
diff --git a/numpy/core/tests/test_mem_overlap.py b/numpy/core/tests/test_mem_overlap.py
index f4ce6a84a..3c8e0e722 100644
--- a/numpy/core/tests/test_mem_overlap.py
+++ b/numpy/core/tests/test_mem_overlap.py
@@ -10,7 +10,7 @@ from numpy.core import _umath_tests
from numpy.lib.stride_tricks import as_strided
from numpy.compat import long
from numpy.testing import (
- assert_, assert_raises, assert_equal, assert_array_equal, assert_allclose
+ assert_, assert_raises, assert_equal, assert_array_equal
)
if sys.version_info[0] >= 3:
diff --git a/numpy/core/tests/test_memmap.py b/numpy/core/tests/test_memmap.py
index 990d0ae26..d2ae564b2 100644
--- a/numpy/core/tests/test_memmap.py
+++ b/numpy/core/tests/test_memmap.py
@@ -204,3 +204,13 @@ class TestMemmap(object):
self.tmpfp.write(b'a'*16)
mm = memmap(self.tmpfp, dtype='float64')
assert_equal(mm.shape, (2,))
+
+ def test_empty_array(self):
+ # gh-12653
+ with pytest.raises(ValueError, match='empty file'):
+ memmap(self.tmpfp, shape=(0,4), mode='w+')
+
+ self.tmpfp.write(b'\0')
+
+ # ok now the file is not empty
+ memmap(self.tmpfp, shape=(0,4), mode='w+')
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index e8353a702..9b124f603 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -20,6 +20,17 @@ import gc
import weakref
import pytest
from contextlib import contextmanager
+
+from numpy.compat import pickle
+
+try:
+ import pathlib
+except ImportError:
+ try:
+ import pathlib2 as pathlib
+ except ImportError:
+ pathlib = None
+
if sys.version_info[0] >= 3:
import builtins
else:
@@ -33,7 +44,7 @@ from numpy.testing import (
assert_, assert_raises, assert_warns, assert_equal, assert_almost_equal,
assert_array_equal, assert_raises_regex, assert_array_almost_equal,
assert_allclose, IS_PYPY, HAS_REFCOUNT, assert_array_less, runstring,
- temppath, suppress_warnings
+ temppath, suppress_warnings, break_cycles,
)
from numpy.core.tests._locales import CommaDecimalPointLocale
@@ -51,7 +62,12 @@ else:
def _aligned_zeros(shape, dtype=float, order="C", align=None):
- """Allocate a new ndarray with aligned memory."""
+ """
+ Allocate a new ndarray with aligned memory.
+
+ The ndarray is guaranteed *not* aligned to twice the requested alignment.
+ Eg, if align=4, guarantees it is not aligned to 8. If align=None uses
+ dtype.alignment."""
dtype = np.dtype(dtype)
if dtype == np.dtype(object):
# Can't do this, fall back to standard allocation (which
@@ -64,10 +80,15 @@ def _aligned_zeros(shape, dtype=float, order="C", align=None):
if not hasattr(shape, '__len__'):
shape = (shape,)
size = functools.reduce(operator.mul, shape) * dtype.itemsize
- buf = np.empty(size + align + 1, np.uint8)
- offset = buf.__array_interface__['data'][0] % align
+ buf = np.empty(size + 2*align + 1, np.uint8)
+
+ ptr = buf.__array_interface__['data'][0]
+ offset = ptr % align
if offset != 0:
offset = align - offset
+ if (ptr % (2*align)) == 0:
+ offset += align
+
# Note: slices producing 0-size arrays do not necessarily change
# data pointer --- so we use and allocate size+1
buf = buf[offset:offset+size+1][:-1]
@@ -89,6 +110,130 @@ class TestFlags(object):
self.a[0] = 5
self.a[0] = 0
+ def test_writeable_any_base(self):
+ # Ensure that any base being writeable is sufficient to change flag;
+ # this is especially interesting for arrays from an array interface.
+ arr = np.arange(10)
+
+ class subclass(np.ndarray):
+ pass
+
+ # Create subclass so base will not be collapsed, this is OK to change
+ view1 = arr.view(subclass)
+ view2 = view1[...]
+ arr.flags.writeable = False
+ view2.flags.writeable = False
+ view2.flags.writeable = True # Can be set to True again.
+
+ arr = np.arange(10)
+
+ class frominterface:
+ def __init__(self, arr):
+ self.arr = arr
+ self.__array_interface__ = arr.__array_interface__
+
+ view1 = np.asarray(frominterface)
+ view2 = view1[...]
+ view2.flags.writeable = False
+ view2.flags.writeable = True
+
+ view1.flags.writeable = False
+ view2.flags.writeable = False
+ with assert_raises(ValueError):
+ # Must assume not writeable, since only base is not:
+ view2.flags.writeable = True
+
+ def test_writeable_from_readonly(self):
+ # gh-9440 - make sure fromstring, from buffer on readonly buffers
+ # set writeable False
+ data = b'\x00' * 100
+ vals = np.frombuffer(data, 'B')
+ assert_raises(ValueError, vals.setflags, write=True)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_raises(ValueError, vals.setflags, write=True)
+
+ def test_writeable_from_buffer(self):
+ data = bytearray(b'\x00' * 100)
+ vals = np.frombuffer(data, 'B')
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+ types = np.dtype( [('vals', 'u1'), ('res3', 'S4')] )
+ values = np.core.records.fromstring(data, types)
+ vals = values['vals']
+ assert_(vals.flags.writeable)
+ vals.setflags(write=False)
+ assert_(vals.flags.writeable is False)
+ vals.setflags(write=True)
+ assert_(vals.flags.writeable)
+
+ @pytest.mark.skipif(sys.version_info[0] < 3, reason="Python 2 always copies")
+ @pytest.mark.skipif(IS_PYPY, reason="PyPy always copies")
+ def test_writeable_pickle(self):
+ import pickle
+ # Small arrays will be copied without setting base.
+ # See condition for using PyArray_SetBaseObject in
+ # array_setstate.
+ a = np.arange(1000)
+ for v in range(pickle.HIGHEST_PROTOCOL):
+ vals = pickle.loads(pickle.dumps(a, v))
+ assert_(vals.flags.writeable)
+ assert_(isinstance(vals.base, bytes))
+
+ def test_writeable_from_c_data(self):
+ # Test that the writeable flag can be changed for an array wrapping
+ # low level C-data, but not owning its data.
+ # Also see that this is deprecated to change from python.
+ from numpy.core._multiarray_tests import get_c_wrapping_array
+
+ arr_writeable = get_c_wrapping_array(True)
+ assert not arr_writeable.flags.owndata
+ assert arr_writeable.flags.writeable
+ view = arr_writeable[...]
+
+ # Toggling the writeable flag works on the view:
+ view.flags.writeable = False
+ assert not view.flags.writeable
+ view.flags.writeable = True
+ assert view.flags.writeable
+ # Flag can be unset on the arr_writeable:
+ arr_writeable.flags.writeable = False
+
+ arr_readonly = get_c_wrapping_array(False)
+ assert not arr_readonly.flags.owndata
+ assert not arr_readonly.flags.writeable
+
+ for arr in [arr_writeable, arr_readonly]:
+ view = arr[...]
+ view.flags.writeable = False # make sure it is readonly
+ arr.flags.writeable = False
+ assert not arr.flags.writeable
+
+ with assert_raises(ValueError):
+ view.flags.writeable = True
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+ with assert_raises(DeprecationWarning):
+ arr.flags.writeable = True
+
+ with assert_warns(DeprecationWarning):
+ arr.flags.writeable = True
+
+ def test_warnonwrite(self):
+ a = np.arange(10)
+ a.flags._warn_on_write = True
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always')
+ a[1] = 10
+ a[2] = 10
+ # only warn once
+ assert_(len(w) == 1)
+
def test_otherflags(self):
assert_equal(self.a.flags.carray, True)
assert_equal(self.a.flags['C'], True)
@@ -824,6 +969,29 @@ class TestCreation(object):
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
+ def test_sequence_of_array_like(self):
+ class ArrayLike:
+ def __init__(self):
+ self.__array_interface__ = {
+ "shape": (42,),
+ "typestr": "<i1",
+ "data": bytes(42)
+ }
+
+ # Make sure __array_*__ is used instead of Sequence methods.
+ def __iter__(self):
+ raise AssertionError("__iter__ was called")
+
+ def __getitem__(self, idx):
+ raise AssertionError("__getitem__ was called")
+
+ def __len__(self):
+ return 42
+
+ assert_equal(
+ np.array([ArrayLike()]),
+ np.zeros((1, 42), dtype=np.byte))
+
def test_non_sequence_sequence(self):
"""Should not segfault.
@@ -1342,10 +1510,10 @@ class TestZeroSizeFlexible(object):
sort_func(zs, kind=kind, **kwargs)
def test_sort(self):
- self._test_sort_partition('sort', kinds='qhm')
+ self._test_sort_partition('sort', kinds='qhs')
def test_argsort(self):
- self._test_sort_partition('argsort', kinds='qhm')
+ self._test_sort_partition('argsort', kinds='qhs')
def test_partition(self):
self._test_sort_partition('partition', kinds=['introselect'], kth=2)
@@ -1370,8 +1538,11 @@ class TestZeroSizeFlexible(object):
# viewing as any non-empty type gives an empty result
assert_equal(zs.view((dt, 1)).shape, (0,))
+ def test_dumps(self):
+ zs = self._zeros(10, int)
+ assert_equal(zs, pickle.loads(zs.dumps()))
+
def test_pickle(self):
- import pickle
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
for dt in [bytes, np.void, unicode]:
zs = self._zeros(10, dt)
@@ -1380,8 +1551,26 @@ class TestZeroSizeFlexible(object):
assert_equal(zs.dtype, zs2.dtype)
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_pickle_with_buffercallback(self):
+ array = np.arange(10)
+ buffers = []
+ bytes_string = pickle.dumps(array, buffer_callback=buffers.append,
+ protocol=5)
+ array_from_buffer = pickle.loads(bytes_string, buffers=buffers)
+ # when using pickle protocol 5 with buffer callbacks,
+ # array_from_buffer is reconstructed from a buffer holding a view
+ # to the initial array's data, so modifying an element in array
+ # should modify it in array_from_buffer too.
+ array[0] = -1
+ assert array_from_buffer[0] == -1, array_from_buffer[0]
+
class TestMethods(object):
+
+ sort_kinds = ['quicksort', 'heapsort', 'stable']
+
def test_compress(self):
tgt = [[5, 6, 7, 8, 9]]
arr = np.arange(10).reshape(2, 5)
@@ -1421,6 +1610,11 @@ class TestMethods(object):
# gh-12031, caused SEGFAULT
assert_raises(TypeError, oned.choose,np.void(0), [oned])
+ # gh-6272 check overlap on out
+ x = np.arange(5)
+ y = np.choose([0,0,0], [x[:3], x[:3], x[:3]], out=x[1:4], mode='wrap')
+ assert_equal(y, np.array([0, 1, 2]))
+
def test_prod(self):
ba = [1, 2, 10, 11, 6, 5, 4]
ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]]
@@ -1540,22 +1734,37 @@ class TestMethods(object):
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
- a = np.arange(101)
- b = a[::-1].copy()
- for kind in ['q', 'm', 'h']:
- msg = "scalar sort, kind=%s" % kind
- c = a.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
- c = b.copy()
- c.sort(kind=kind)
- assert_equal(c, a, msg)
+ # Test unsigned dtypes and nonnegative numbers
+ for dtype in [np.uint8, np.uint16, np.uint32, np.uint64, np.float16, np.float32, np.float64, np.longdouble]:
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype)
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+
+ # Test signed dtypes and negative numbers as well
+ for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16, np.float32, np.float64, np.longdouble]:
+ a = np.arange(-50, 51, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar sort, kind=%s, dtype=%s" % (kind, dtype)
+ c = a.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
+ c = b.copy()
+ c.sort(kind=kind)
+ assert_equal(c, a, msg)
# test complex sorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "complex sort, real part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
@@ -1565,7 +1774,7 @@ class TestMethods(object):
assert_equal(c, ai, msg)
ai = a + 1j
bi = b + 1j
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "complex sort, imag part == 1, kind=%s" % kind
c = ai.copy()
c.sort(kind=kind)
@@ -1587,7 +1796,7 @@ class TestMethods(object):
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)])
b = a[::-1].copy()
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "string sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
@@ -1600,7 +1809,7 @@ class TestMethods(object):
s = 'aaaaaaaa'
a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
b = a[::-1].copy()
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "unicode sort, kind=%s" % kind
c = a.copy()
c.sort(kind=kind)
@@ -1690,7 +1899,7 @@ class TestMethods(object):
return True
a = np.array([Boom()]*100, dtype=object)
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "bogus comparison object sort, kind=%s" % kind
c.sort(kind=kind)
@@ -1710,7 +1919,7 @@ class TestMethods(object):
def test_sort_raises(self):
#gh-9404
arr = np.array([0, datetime.now(), 1], dtype=object)
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
#gh-3879
class Raiser(object):
@@ -1719,7 +1928,7 @@ class TestMethods(object):
__eq__ = __ne__ = __lt__ = __gt__ = __ge__ = __le__ = raises_anything
arr = np.array([[Raiser(), n] for n in range(10)]).reshape(-1)
np.random.shuffle(arr)
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
assert_raises(TypeError, arr.sort, kind=kind)
def test_sort_degraded(self):
@@ -1805,24 +2014,26 @@ class TestMethods(object):
# of sorted items must be greater than ~50 to check the actual
# algorithm because quick and merge sort fall over to insertion
# sort for small arrays.
- a = np.arange(101)
- b = a[::-1].copy()
- for kind in ['q', 'm', 'h']:
- msg = "scalar argsort, kind=%s" % kind
- assert_equal(a.copy().argsort(kind=kind), a, msg)
- assert_equal(b.copy().argsort(kind=kind), b, msg)
+
+ for dtype in [np.int32, np.uint32, np.float32]:
+ a = np.arange(101, dtype=dtype)
+ b = a[::-1].copy()
+ for kind in self.sort_kinds:
+ msg = "scalar argsort, kind=%s, dtype=%s" % (kind, dtype)
+ assert_equal(a.copy().argsort(kind=kind), a, msg)
+ assert_equal(b.copy().argsort(kind=kind), b, msg)
# test complex argsorts. These use the same code as the scalars
# but the compare function differs.
ai = a*1j + 1
bi = b*1j + 1
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
ai = a + 1j
bi = b + 1j
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "complex argsort, kind=%s" % kind
assert_equal(ai.copy().argsort(kind=kind), a, msg)
assert_equal(bi.copy().argsort(kind=kind), b, msg)
@@ -1841,7 +2052,7 @@ class TestMethods(object):
b = a[::-1].copy()
r = np.arange(101)
rr = r[::-1]
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "string argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
@@ -1852,7 +2063,7 @@ class TestMethods(object):
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "unicode argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
@@ -1863,7 +2074,7 @@ class TestMethods(object):
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "object argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
@@ -1874,7 +2085,7 @@ class TestMethods(object):
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
- for kind in ['q', 'm', 'h']:
+ for kind in self.sort_kinds:
msg = "structured array argsort, kind=%s" % kind
assert_equal(a.copy().argsort(kind=kind), r, msg)
assert_equal(b.copy().argsort(kind=kind), rr, msg)
@@ -2578,7 +2789,14 @@ class TestMethods(object):
assert_equal(x1.flatten('F'), y1f)
assert_equal(x1.flatten('F'), x1.T.flatten())
- def test_dot(self):
+ def test_flatten_invalid_order(self):
+ # invalid after gh-14596
+ for order in ['Z', 'c', False, True, 0, 8]:
+ x = np.array([[1, 2, 3], [4, 5, 6]], np.int32)
+ assert_raises(ValueError, x.flatten, {"order": order})
+
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ def test_arr_mult(self, func):
a = np.array([[1, 0], [0, 1]])
b = np.array([[0, 1], [1, 0]])
c = np.array([[9, 1], [1, -9]])
@@ -2602,49 +2820,49 @@ class TestMethods(object):
# gemm vs syrk optimizations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
- assert_equal(np.dot(eaf, eaf), eaf)
- assert_equal(np.dot(eaf.T, eaf), eaf)
- assert_equal(np.dot(eaf, eaf.T), eaf)
- assert_equal(np.dot(eaf.T, eaf.T), eaf)
- assert_equal(np.dot(eaf.T.copy(), eaf), eaf)
- assert_equal(np.dot(eaf, eaf.T.copy()), eaf)
- assert_equal(np.dot(eaf.T.copy(), eaf.T.copy()), eaf)
+ assert_equal(func(eaf, eaf), eaf)
+ assert_equal(func(eaf.T, eaf), eaf)
+ assert_equal(func(eaf, eaf.T), eaf)
+ assert_equal(func(eaf.T, eaf.T), eaf)
+ assert_equal(func(eaf.T.copy(), eaf), eaf)
+ assert_equal(func(eaf, eaf.T.copy()), eaf)
+ assert_equal(func(eaf.T.copy(), eaf.T.copy()), eaf)
# syrk validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
eaf = a.astype(et)
ebf = b.astype(et)
- assert_equal(np.dot(ebf, ebf), eaf)
- assert_equal(np.dot(ebf.T, ebf), eaf)
- assert_equal(np.dot(ebf, ebf.T), eaf)
- assert_equal(np.dot(ebf.T, ebf.T), eaf)
+ assert_equal(func(ebf, ebf), eaf)
+ assert_equal(func(ebf.T, ebf), eaf)
+ assert_equal(func(ebf, ebf.T), eaf)
+ assert_equal(func(ebf.T, ebf.T), eaf)
# syrk - different shape, stride, and view validations
for et in [np.float32, np.float64, np.complex64, np.complex128]:
edf = d.astype(et)
assert_equal(
- np.dot(edf[::-1, :], edf.T),
- np.dot(edf[::-1, :].copy(), edf.T.copy())
+ func(edf[::-1, :], edf.T),
+ func(edf[::-1, :].copy(), edf.T.copy())
)
assert_equal(
- np.dot(edf[:, ::-1], edf.T),
- np.dot(edf[:, ::-1].copy(), edf.T.copy())
+ func(edf[:, ::-1], edf.T),
+ func(edf[:, ::-1].copy(), edf.T.copy())
)
assert_equal(
- np.dot(edf, edf[::-1, :].T),
- np.dot(edf, edf[::-1, :].T.copy())
+ func(edf, edf[::-1, :].T),
+ func(edf, edf[::-1, :].T.copy())
)
assert_equal(
- np.dot(edf, edf[:, ::-1].T),
- np.dot(edf, edf[:, ::-1].T.copy())
+ func(edf, edf[:, ::-1].T),
+ func(edf, edf[:, ::-1].T.copy())
)
assert_equal(
- np.dot(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
- np.dot(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
+ func(edf[:edf.shape[0] // 2, :], edf[::2, :].T),
+ func(edf[:edf.shape[0] // 2, :].copy(), edf[::2, :].T.copy())
)
assert_equal(
- np.dot(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
- np.dot(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
+ func(edf[::2, :], edf[:edf.shape[0] // 2, :].T),
+ func(edf[::2, :].copy(), edf[:edf.shape[0] // 2, :].T.copy())
)
# syrk - different shape
@@ -2652,9 +2870,43 @@ class TestMethods(object):
edf = d.astype(et)
eddtf = ddt.astype(et)
edtdf = dtd.astype(et)
- assert_equal(np.dot(edf, edf.T), eddtf)
- assert_equal(np.dot(edf.T, edf), edtdf)
+ assert_equal(func(edf, edf.T), eddtf)
+ assert_equal(func(edf.T, edf), edtdf)
+
+ @pytest.mark.parametrize('func', (np.dot, np.matmul))
+ @pytest.mark.parametrize('dtype', 'ifdFD')
+ def test_no_dgemv(self, func, dtype):
+ # check vector arg for contiguous before gemv
+ # gh-12156
+ a = np.arange(8.0, dtype=dtype).reshape(2, 4)
+ b = np.broadcast_to(1., (4, 1))
+ ret1 = func(a, b)
+ ret2 = func(a, b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T)
+ assert_equal(ret1, ret2)
+
+ # check for unaligned data
+ dt = np.dtype(dtype)
+ a = np.zeros(8 * dt.itemsize // 2 + 1, dtype='int16')[1:].view(dtype)
+ a = a.reshape(2, 4)
+ b = a[0]
+ # make sure it is not aligned
+ assert_(a.__array_interface__['data'][0] % dt.itemsize != 0)
+ ret1 = func(a, b)
+ ret2 = func(a.copy(), b.copy())
+ assert_equal(ret1, ret2)
+
+ ret1 = func(b.T, a.T)
+ ret2 = func(b.T.copy(), a.T.copy())
+ assert_equal(ret1, ret2)
+ def test_dot(self):
+ a = np.array([[1, 0], [0, 1]])
+ b = np.array([[0, 1], [1, 0]])
+ c = np.array([[9, 1], [1, -9]])
# function versus methods
assert_equal(np.dot(a, b), a.dot(b))
assert_equal(np.dot(np.dot(a, b), c), a.dot(b).dot(c))
@@ -2710,6 +2962,29 @@ class TestMethods(object):
np.dot(a, b, out=out)
np.matmul(a, b, out=out)
+ def test_dot_matmul_inner_array_casting_fails(self):
+
+ class A(object):
+ def __array__(self, *args, **kwargs):
+ raise NotImplementedError
+
+ # Don't override the error from calling __array__()
+ assert_raises(NotImplementedError, np.dot, A(), A())
+ assert_raises(NotImplementedError, np.matmul, A(), A())
+ assert_raises(NotImplementedError, np.inner, A(), A())
+
+ def test_matmul_out(self):
+ # overlapping memory
+ a = np.arange(18).reshape(2, 3, 3)
+ b = np.matmul(a, a)
+ c = np.matmul(a, a, out=a)
+ assert_(c is a)
+ assert_equal(c, b)
+ a = np.arange(18).reshape(2, 3, 3)
+ c = np.matmul(a, a, out=a[::-1, ...])
+ assert_(c.base is a.base)
+ assert_equal(c, b)
+
def test_diagonal(self):
a = np.arange(12).reshape((3, 4))
assert_equal(a.diagonal(), [0, 5, 10])
@@ -2734,8 +3009,6 @@ class TestMethods(object):
assert_equal(b.diagonal(0, 2, 1), [[0, 3], [4, 7]])
def test_diagonal_view_notwriteable(self):
- # this test is only for 1.9, the diagonal view will be
- # writeable in 1.10.
a = np.eye(3).diagonal()
assert_(not a.flags.writeable)
assert_(not a.flags.owndata)
@@ -3009,8 +3282,8 @@ class TestMethods(object):
assert_equal(ac, np.conjugate(a))
a = np.array([1-1j, 1, 2.0, 'f'], object)
- assert_raises(AttributeError, lambda: a.conj())
- assert_raises(AttributeError, lambda: a.conjugate())
+ assert_raises(TypeError, lambda: a.conj())
+ assert_raises(TypeError, lambda: a.conjugate())
def test__complex__(self):
dtypes = ['i1', 'i2', 'i4', 'i8',
@@ -3119,6 +3392,8 @@ class TestBinop(object):
# 'eq': (np.equal, False),
# 'ne': (np.not_equal, False),
}
+ if sys.version_info >= (3, 5):
+ ops['matmul'] = (np.matmul, False, float)
class Coerced(Exception):
pass
@@ -3161,7 +3436,7 @@ class TestBinop(object):
if issubclass(MyType, np.ndarray):
# Use this range to avoid special case weirdnesses around
# divide-by-0, pow(x, 2), overflow due to pow(big, big), etc.
- return np.arange(3, 5).view(MyType)
+ return np.arange(3, 7).reshape(2, 2).view(MyType)
else:
return MyType()
@@ -3170,7 +3445,7 @@ class TestBinop(object):
for op, (ufunc, has_inplace, dtype) in ops.items():
err_msg = ('op: %s, ufunc: %s, has_inplace: %s, dtype: %s'
% (op, ufunc, has_inplace, dtype))
- check_objs = [np.arange(3, 5, dtype=dtype)]
+ check_objs = [np.arange(3, 7, dtype=dtype).reshape(2, 2)]
if check_scalar:
check_objs.append(check_objs[0][0])
for arr in check_objs:
@@ -3549,8 +3824,79 @@ class TestSubscripting(object):
class TestPickling(object):
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL >= 5,
+ reason=('this tests the error messages when trying to'
+ 'protocol 5 although it is not available'))
+ def test_correct_protocol5_error_message(self):
+ array = np.arange(10)
+
+ if sys.version_info[:2] in ((3, 6), (3, 7)):
+ # For the specific case of python3.6 and 3.7, raise a clear import
+ # error about the pickle5 backport when trying to use protocol=5
+ # without the pickle5 package
+ with pytest.raises(ImportError):
+ array.__reduce_ex__(5)
+
+ elif sys.version_info[:2] < (3, 6):
+ # when calling __reduce_ex__ explicitly with protocol=5 on python
+ # raise a ValueError saying that protocol 5 is not available for
+ # this python version
+ with pytest.raises(ValueError):
+ array.__reduce_ex__(5)
+
+ def test_record_array_with_object_dtype(self):
+ my_object = object()
+
+ arr_with_object = np.array(
+ [(my_object, 1, 2.0)],
+ dtype=[('a', object), ('b', int), ('c', float)])
+ arr_without_object = np.array(
+ [('xxx', 1, 2.0)],
+ dtype=[('a', str), ('b', int), ('c', float)])
+
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_arr_with_object = pickle.loads(
+ pickle.dumps(arr_with_object, protocol=proto))
+ depickled_arr_without_object = pickle.loads(
+ pickle.dumps(arr_without_object, protocol=proto))
+
+ assert_equal(arr_with_object.dtype,
+ depickled_arr_with_object.dtype)
+ assert_equal(arr_without_object.dtype,
+ depickled_arr_without_object.dtype)
+
+ @pytest.mark.skipif(pickle.HIGHEST_PROTOCOL < 5,
+ reason="requires pickle protocol 5")
+ def test_f_contiguous_array(self):
+ f_contiguous_array = np.array([[1, 2, 3], [4, 5, 6]], order='F')
+ buffers = []
+
+ # When using pickle protocol 5, Fortran-contiguous arrays can be
+ # serialized using out-of-band buffers
+ bytes_string = pickle.dumps(f_contiguous_array, protocol=5,
+ buffer_callback=buffers.append)
+
+ assert len(buffers) > 0
+
+ depickled_f_contiguous_array = pickle.loads(bytes_string,
+ buffers=buffers)
+
+ assert_equal(f_contiguous_array, depickled_f_contiguous_array)
+
+ def test_non_contiguous_array(self):
+ non_contiguous_array = np.arange(12).reshape(3, 4)[:, :2]
+ assert not non_contiguous_array.flags.c_contiguous
+ assert not non_contiguous_array.flags.f_contiguous
+
+ # make sure non-contiguous arrays can be pickled-depickled
+ # using any protocol
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ depickled_non_contiguous_array = pickle.loads(
+ pickle.dumps(non_contiguous_array, protocol=proto))
+
+ assert_equal(non_contiguous_array, depickled_non_contiguous_array)
+
def test_roundtrip(self):
- import pickle
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
carray = np.array([[2, 9], [7, 0], [3, 8]])
DATA = [
@@ -3560,13 +3906,18 @@ class TestPickling(object):
('c', float)])
]
+ refs = [weakref.ref(a) for a in DATA]
for a in DATA:
assert_equal(
a, pickle.loads(pickle.dumps(a, protocol=proto)),
err_msg="%r" % a)
+ del a, DATA, carray
+ break_cycles()
+ # check for reference leaks (gh-12793)
+ for ref in refs:
+ assert ref() is None
def _loads(self, obj):
- import pickle
if sys.version_info[0] >= 3:
return pickle.loads(obj, encoding='latin1')
else:
@@ -3617,6 +3968,17 @@ class TestPickling(object):
p = self._loads(s)
assert_equal(a, p)
+ def test_datetime64_byteorder(self):
+ original = np.array([['2015-02-24T00:00:00.000000000']], dtype='datetime64[ns]')
+
+ original_byte_reversed = original.copy(order='K')
+ original_byte_reversed.dtype = original_byte_reversed.dtype.newbyteorder('S')
+ original_byte_reversed.byteswap(inplace=True)
+
+ new = pickle.loads(pickle.dumps(original_byte_reversed))
+
+ assert_equal(original.dtype, new.dtype)
+
class TestFancyIndexing(object):
def test_list(self):
@@ -3827,6 +4189,7 @@ class TestArgmax(object):
assert_equal(a.argmax(out=out1, axis=0), np.argmax(a, out=out2, axis=0))
assert_equal(out1, out2)
+ @pytest.mark.leaks_references(reason="replaces None with NULL.")
def test_object_argmax_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
@@ -3975,6 +4338,7 @@ class TestArgmin(object):
assert_equal(a.argmin(out=out1, axis=0), np.argmin(a, out=out2, axis=0))
assert_equal(out1, out2)
+ @pytest.mark.leaks_references(reason="replaces None with NULL.")
def test_object_argmin_with_NULLs(self):
# See gh-6032
a = np.empty(4, dtype='O')
@@ -4047,7 +4411,11 @@ class TestClip(object):
x = (np.random.random(1000) * array_max).astype(dtype)
if inplace:
- x.clip(clip_min, clip_max, x)
+ # The tests that call us pass clip_min and clip_max that
+ # might not fit in the destination dtype. They were written
+ # assuming the previous unsafe casting, which now must be
+ # passed explicitly to avoid a warning.
+ x.clip(clip_min, clip_max, x, casting='unsafe')
else:
x = x.clip(clip_min, clip_max)
byteorder = '='
@@ -4066,7 +4434,7 @@ class TestClip(object):
'float', 1024, 0, 0, inplace=inplace)
self._clip_type(
- 'int', 1024, -120, 100.5, inplace=inplace)
+ 'int', 1024, -120, 100, inplace=inplace)
self._clip_type(
'int', 1024, 0, 0, inplace=inplace)
@@ -4160,6 +4528,16 @@ class TestPutmask(object):
assert_array_equal(rec['y'], [11, 4])
assert_array_equal(rec['z'], [3, 3])
+ def test_overlaps(self):
+ # gh-6272 check overlap
+ x = np.array([True, False, True, False])
+ np.putmask(x[1:4], [True, True, True], x[:3])
+ assert_equal(x, np.array([True, True, False, True]))
+
+ x = np.array([True, False, True, False])
+ np.putmask(x[1:4], x[:3], [True, False, True])
+ assert_equal(x, np.array([True, True, True, True]))
+
class TestTake(object):
def tst_basic(self, x):
@@ -4208,20 +4586,33 @@ class TestTake(object):
rec1 = rec.take([1])
assert_(rec1['x'] == 5.0 and rec1['y'] == 4.0)
+ def test_out_overlap(self):
+ # gh-6272 check overlap on out
+ x = np.arange(5)
+ y = np.take(x, [1, 2, 3], out=x[2:5], mode='wrap')
+ assert_equal(y, np.array([1, 2, 3]))
class TestLexsort(object):
- def test_basic(self):
- a = [1, 2, 1, 3, 1, 5]
- b = [0, 4, 5, 6, 2, 3]
+ @pytest.mark.parametrize('dtype',[
+ np.uint8, np.uint16, np.uint32, np.uint64,
+ np.int8, np.int16, np.int32, np.int64,
+ np.float16, np.float32, np.float64
+ ])
+ def test_basic(self, dtype):
+ a = np.array([1, 2, 1, 3, 1, 5], dtype=dtype)
+ b = np.array([0, 4, 5, 6, 2, 3], dtype=dtype)
idx = np.lexsort((b, a))
expected_idx = np.array([0, 4, 2, 1, 3, 5])
assert_array_equal(idx, expected_idx)
+ assert_array_equal(a[idx], np.sort(a))
- x = np.vstack((b, a))
- idx = np.lexsort(x)
- assert_array_equal(idx, expected_idx)
+ def test_mixed(self):
+ a = np.array([1, 2, 1, 3, 1, 5])
+ b = np.array([0, 4, 5, 6, 2, 3], dtype='datetime64[D]')
- assert_array_equal(x[1][idx], np.sort(x[1]))
+ idx = np.lexsort((b, a))
+ expected_idx = np.array([0, 4, 2, 1, 3, 5])
+ assert_array_equal(idx, expected_idx)
def test_datetime(self):
a = np.array([0,0,0], dtype='datetime64[D]')
@@ -4323,6 +4714,20 @@ class TestIO(object):
y = np.fromfile(self.filename, dtype=self.dtype)
assert_array_equal(y, self.x.flat)
+ @pytest.mark.skipif(pathlib is None, reason="pathlib not found")
+ def test_roundtrip_pathlib(self):
+ p = pathlib.Path(self.filename)
+ self.x.tofile(p)
+ y = np.fromfile(p, dtype=self.dtype)
+ assert_array_equal(y, self.x.flat)
+
+ @pytest.mark.skipif(pathlib is None, reason="pathlib not found")
+ def test_roundtrip_dump_pathlib(self):
+ p = pathlib.Path(self.filename)
+ self.x.dump(p)
+ y = np.load(p, allow_pickle=True)
+ assert_array_equal(y, self.x)
+
def test_roundtrip_binary_str(self):
s = self.x.tobytes()
y = np.frombuffer(s, dtype=self.dtype)
@@ -4442,6 +4847,49 @@ class TestIO(object):
f.close()
assert_equal(pos, 10, err_msg=err_msg)
+ def test_load_object_array_fromfile(self):
+ # gh-12300
+ with open(self.filename, 'w') as f:
+ # Ensure we have a file with consistent contents
+ pass
+
+ with open(self.filename, 'rb') as f:
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, f, dtype=object)
+
+ assert_raises_regex(ValueError, "Cannot read into object array",
+ np.fromfile, self.filename, dtype=object)
+
+ def test_fromfile_offset(self):
+ with open(self.filename, 'wb') as f:
+ self.x.tofile(f)
+
+ with open(self.filename, 'rb') as f:
+ y = np.fromfile(f, dtype=self.dtype, offset=0)
+ assert_array_equal(y, self.x.flat)
+
+ with open(self.filename, 'rb') as f:
+ count_items = len(self.x.flat) // 8
+ offset_items = len(self.x.flat) // 4
+ offset_bytes = self.dtype.itemsize * offset_items
+ y = np.fromfile(f, dtype=self.dtype, count=count_items, offset=offset_bytes)
+ assert_array_equal(y, self.x.flat[offset_items:offset_items+count_items])
+
+ # subsequent seeks should stack
+ offset_bytes = self.dtype.itemsize
+ z = np.fromfile(f, dtype=self.dtype, offset=offset_bytes)
+ assert_array_equal(z, self.x.flat[offset_items+count_items+1:])
+
+ with open(self.filename, 'wb') as f:
+ self.x.tofile(f, sep=",")
+
+ with open(self.filename, 'rb') as f:
+ assert_raises_regex(
+ TypeError,
+ "'offset' argument only permitted for binary files",
+ np.fromfile, self.filename, dtype=self.dtype,
+ sep=",", offset=1)
+
def _check_from(self, s, value, **kw):
if 'sep' not in kw:
y = np.frombuffer(s, **kw)
@@ -4521,7 +4969,8 @@ class TestIO(object):
self._check_from(b'1,2,3,4', [1., 2., 3., 4.], dtype=float, sep=',')
def test_malformed(self):
- self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
+ with assert_warns(DeprecationWarning):
+ self._check_from(b'1.234 1,234', [1.234, 1.], sep=' ')
def test_long_sep(self):
self._check_from(b'1_x_3_x_4_x_5', [1, 3, 4, 5], sep='_x_')
@@ -4574,6 +5023,19 @@ class TestIO(object):
self.test_tofile_sep()
self.test_tofile_format()
+ def test_fromfile_subarray_binary(self):
+ # Test subarray dtypes which are absorbed into the shape
+ x = np.arange(24, dtype="i4").reshape(2, 3, 4)
+ x.tofile(self.filename)
+ res = np.fromfile(self.filename, dtype="(3,4)i4")
+ assert_array_equal(x, res)
+
+ x_str = x.tobytes()
+ with assert_warns(DeprecationWarning):
+ # binary fromstring is deprecated
+ res = np.fromstring(x_str, dtype="(3,4)i4")
+ assert_array_equal(x, res)
+
class TestFromBuffer(object):
@pytest.mark.parametrize('byteorder', ['<', '>'])
@@ -4643,6 +5105,22 @@ class TestFlat(object):
assert_(e.flags.writebackifcopy is False)
assert_(f.flags.writebackifcopy is False)
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ def test_refcount(self):
+ # includes regression test for reference count error gh-13165
+ inds = [np.intp(0), np.array([True]*self.a.size), np.array([0]), None]
+ indtype = np.dtype(np.intp)
+ rc_indtype = sys.getrefcount(indtype)
+ for ind in inds:
+ rc_ind = sys.getrefcount(ind)
+ for _ in range(100):
+ try:
+ self.a.flat[ind]
+ except IndexError:
+ pass
+ assert_(abs(sys.getrefcount(ind) - rc_ind) < 50)
+ assert_(abs(sys.getrefcount(indtype) - rc_indtype) < 50)
+
class TestResize(object):
def test_basic(self):
@@ -4730,6 +5208,12 @@ class TestResize(object):
x_view.resize((0, 10))
x_view.resize((0, 100))
+ def test_check_weakref(self):
+ x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
+ xref = weakref.ref(x)
+ assert_raises(ValueError, x.resize, (5, 1))
+ del xref # avoid pyflakes unused variable warning.
+
class TestRecord(object):
def test_field_rename(self):
@@ -4738,11 +5222,11 @@ class TestRecord(object):
assert_equal(dt.names, ['p', 'q'])
def test_multiple_field_name_occurrence(self):
- def test_assign():
- dtype = np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
+ def test_dtype_init():
+ np.dtype([("A", "f8"), ("B", "f8"), ("A", "f8")])
# Error raised when multiple fields have the same name
- assert_raises(ValueError, test_assign)
+ assert_raises(ValueError, test_dtype_init)
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_bytes_fields(self):
@@ -4762,13 +5246,11 @@ class TestRecord(object):
@pytest.mark.skipif(sys.version_info[0] < 3, reason="Not Python 3")
def test_multiple_field_name_unicode(self):
- def test_assign_unicode():
- dt = np.dtype([("\u20B9", "f8"),
- ("B", "f8"),
- ("\u20B9", "f8")])
+ def test_dtype_unicode():
+ np.dtype([("\u20B9", "f8"), ("B", "f8"), ("\u20B9", "f8")])
# Error raised when multiple fields have the same name(unicode included)
- assert_raises(ValueError, test_assign_unicode)
+ assert_raises(ValueError, test_dtype_unicode)
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_unicode_field_titles(self):
@@ -4850,25 +5332,9 @@ class TestRecord(object):
fn2 = func('f2')
b[fn2] = 3
- # In 1.16 code below can be replaced by:
- # assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- # assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- # assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- with suppress_warnings() as sup:
- sup.filter(FutureWarning,
- ".* selecting multiple fields .*")
-
- assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
- assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
- assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
- # view of subfield view/copy
- assert_equal(b[['f1', 'f2']][0].view(('i4', 2)).tolist(),
- (2, 3))
- assert_equal(b[['f2', 'f1']][0].view(('i4', 2)).tolist(),
- (3, 2))
- view_dtype = [('f1', 'i4'), ('f3', [('', 'i4')])]
- assert_equal(b[['f1', 'f3']][0].view(view_dtype).tolist(),
- (2, (1,)))
+ assert_equal(b[['f1', 'f2']][0].tolist(), (2, 3))
+ assert_equal(b[['f2', 'f1']][0].tolist(), (3, 2))
+ assert_equal(b[['f1', 'f3']][0].tolist(), (2, (1,)))
# non-ascii unicode field indexing is well behaved
if not is_py3:
@@ -4878,50 +5344,6 @@ class TestRecord(object):
assert_raises(ValueError, a.__setitem__, u'\u03e0', 1)
assert_raises(ValueError, a.__getitem__, u'\u03e0')
- # can be removed in 1.16
- def test_field_names_deprecation(self):
-
- def collect_warnings(f, *args, **kwargs):
- with warnings.catch_warnings(record=True) as log:
- warnings.simplefilter("always")
- f(*args, **kwargs)
- return [w.category for w in log]
-
- a = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- a['f1'][0] = 1
- a['f2'][0] = 2
- a['f3'][0] = (3,)
- b = np.zeros((1,), dtype=[('f1', 'i4'),
- ('f2', 'i4'),
- ('f3', [('sf1', 'i4')])])
- b['f1'][0] = 1
- b['f2'][0] = 2
- b['f3'][0] = (3,)
-
- # All the different functions raise a warning, but not an error
- assert_equal(collect_warnings(a[['f1', 'f2']].__setitem__, 0, (10, 20)),
- [FutureWarning])
- # For <=1.12 a is not modified, but it will be in 1.13
- assert_equal(a, b)
-
- # Views also warn
- subset = a[['f1', 'f2']]
- subset_view = subset.view()
- assert_equal(collect_warnings(subset_view['f1'].__setitem__, 0, 10),
- [FutureWarning])
- # But the write goes through:
- assert_equal(subset['f1'][0], 10)
- # Only one warning per multiple field indexing, though (even if there
- # are multiple views involved):
- assert_equal(collect_warnings(subset['f1'].__setitem__, 0, 10), [])
-
- # make sure views of a multi-field index warn too
- c = np.zeros(3, dtype='i8,i8,i8')
- assert_equal(collect_warnings(c[['f0', 'f2']].view, 'i8,i8'),
- [FutureWarning])
-
def test_record_hash(self):
a = np.array([(1, 2), (1, 2)], dtype='i1,i2')
a.flags.writeable = False
@@ -4945,6 +5367,16 @@ class TestRecord(object):
np.array([(), (), (), (), ()], dtype={'names': [], 'formats': [],
'offsets': [], 'itemsize': 12})
+ def test_multifield_indexing_view(self):
+ a = np.ones(3, dtype=[('a', 'i4'), ('b', 'f4'), ('c', 'u4')])
+ v = a[['a', 'c']]
+ assert_(v.base is a)
+ assert_(v.dtype == np.dtype({'names': ['a', 'c'],
+ 'formats': ['i4', 'u4'],
+ 'offsets': [0, 8]}))
+ v[:] = (4,5)
+ assert_equal(a[0].item(), (4, 1, 5))
+
class TestView(object):
def test_basic(self):
x = np.array([(1, 2, 3, 4), (5, 6, 7, 8)],
@@ -5514,7 +5946,7 @@ class MatmulCommon(object):
"""
# Should work with these types. Will want to add
# "O" at some point
- types = "?bhilqBHILQefdgFDG"
+ types = "?bhilqBHILQefdgFDGO"
def test_exceptions(self):
dims = [
@@ -5565,16 +5997,40 @@ class MatmulCommon(object):
assert_(res.dtype == dt)
# vector vector returns scalars
- res = self.matmul(v, v)
- assert_(type(res) is np.dtype(dt).type)
+ if dt != "O":
+ res = self.matmul(v, v)
+ assert_(type(res) is np.dtype(dt).type)
+
+ def test_scalar_output(self):
+ vec1 = np.array([2])
+ vec2 = np.array([3, 4]).reshape(1, -1)
+ tgt = np.array([6, 8])
+ for dt in self.types[1:]:
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt)
+ res = self.matmul(v2.T, v1)
+ assert_equal(res, tgt)
+
+ # boolean type
+ vec = np.array([True, True], dtype='?').reshape(1, -1)
+ res = self.matmul(vec[:, 0], vec)
+ assert_equal(res, True)
def test_vector_vector_values(self):
- vec = np.array([1, 2])
- tgt = 5
+ vec1 = np.array([1, 2])
+ vec2 = np.array([3, 4]).reshape(-1, 1)
+ tgt1 = np.array([11])
+ tgt2 = np.array([[3, 6], [4, 8]])
for dt in self.types[1:]:
- v1 = vec.astype(dt)
- res = self.matmul(v1, v1)
- assert_equal(res, tgt)
+ v1 = vec1.astype(dt)
+ v2 = vec2.astype(dt)
+ res = self.matmul(v1, v2)
+ assert_equal(res, tgt1)
+ # no broadcast, we must make v1 into a 2d ndarray
+ res = self.matmul(v2, v1.reshape(1, -1))
+ assert_equal(res, tgt2)
# boolean type
vec = np.array([True, True], dtype='?')
@@ -5705,44 +6161,158 @@ class TestMatmul(MatmulCommon):
matmul = np.matmul
def test_out_arg(self):
- a = np.ones((2, 2), dtype=float)
- b = np.ones((2, 2), dtype=float)
- tgt = np.full((2,2), 2, dtype=float)
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ tgt = np.dot(a, b)
# test as positional argument
msg = "out positional argument"
- out = np.zeros((2, 2), dtype=float)
+ out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out)
assert_array_equal(out, tgt, err_msg=msg)
# test as keyword argument
msg = "out keyword argument"
- out = np.zeros((2, 2), dtype=float)
+ out = np.zeros((5, 2), dtype=float)
self.matmul(a, b, out=out)
assert_array_equal(out, tgt, err_msg=msg)
# test out with not allowed type cast (safe casting)
- # einsum and cblas raise different error types, so
- # use Exception.
- msg = "out argument with illegal cast"
- out = np.zeros((2, 2), dtype=np.int32)
- assert_raises(Exception, self.matmul, a, b, out=out)
-
- # skip following tests for now, cblas does not allow non-contiguous
- # outputs and consistency with dot would require same type,
- # dimensions, subtype, and c_contiguous.
-
- # test out with allowed type cast
- # msg = "out argument with allowed cast"
- # out = np.zeros((2, 2), dtype=np.complex128)
- # self.matmul(a, b, out=out)
- # assert_array_equal(out, tgt, err_msg=msg)
+ msg = "Cannot cast ufunc .* output"
+ out = np.zeros((5, 2), dtype=np.int32)
+ assert_raises_regex(TypeError, msg, self.matmul, a, b, out=out)
+
+ # test out with type upcast to complex
+ out = np.zeros((5, 2), dtype=np.complex128)
+ c = self.matmul(a, b, out=out)
+ assert_(c is out)
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, '')
+ c = c.astype(tgt.dtype)
+ assert_array_equal(c, tgt)
+
+ def test_out_contiguous(self):
+ a = np.ones((5, 2), dtype=float)
+ b = np.array([[1, 3], [5, 7]], dtype=float)
+ v = np.array([1, 3], dtype=float)
+ tgt = np.dot(a, b)
+ tgt_mv = np.dot(a, v)
# test out non-contiguous
- # msg = "out argument with non-contiguous layout"
- # c = np.zeros((2, 2, 2), dtype=float)
- # self.matmul(a, b, out=c[..., 0])
- # assert_array_equal(c, tgt, err_msg=msg)
+ out = np.ones((5, 2, 2), dtype=float)
+ c = self.matmul(a, b, out=out[..., 0])
+ assert c.base is out
+ assert_array_equal(c, tgt)
+ c = self.matmul(a, v, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+ c = self.matmul(v, a.T, out=out[:, 0, 0])
+ assert_array_equal(c, tgt_mv)
+
+ # test out contiguous in only last dim
+ out = np.ones((10, 2), dtype=float)
+ c = self.matmul(a, b, out=out[::2, :])
+ assert_array_equal(c, tgt)
+
+ # test transposes of out, args
+ out = np.ones((5, 2), dtype=float)
+ c = self.matmul(b.T, a.T, out=out.T)
+ assert_array_equal(out, tgt)
+
+ m1 = np.arange(15.).reshape(5, 3)
+ m2 = np.arange(21.).reshape(3, 7)
+ m3 = np.arange(30.).reshape(5, 6)[:, ::2] # non-contiguous
+ vc = np.arange(10.)
+ vr = np.arange(6.)
+ m0 = np.zeros((3, 0))
+ @pytest.mark.parametrize('args', (
+ # matrix-matrix
+ (m1, m2), (m2.T, m1.T), (m2.T.copy(), m1.T), (m2.T, m1.T.copy()),
+ # matrix-matrix-transpose, contiguous and non
+ (m1, m1.T), (m1.T, m1), (m1, m3.T), (m3, m1.T),
+ (m3, m3.T), (m3.T, m3),
+ # matrix-matrix non-contiguous
+ (m3, m2), (m2.T, m3.T), (m2.T.copy(), m3.T),
+ # vector-matrix, matrix-vector, contiguous
+ (m1, vr[:3]), (vc[:5], m1), (m1.T, vc[:5]), (vr[:3], m1.T),
+ # vector-matrix, matrix-vector, vector non-contiguous
+ (m1, vr[::2]), (vc[::2], m1), (m1.T, vc[::2]), (vr[::2], m1.T),
+ # vector-matrix, matrix-vector, matrix non-contiguous
+ (m3, vr[:3]), (vc[:5], m3), (m3.T, vc[:5]), (vr[:3], m3.T),
+ # vector-matrix, matrix-vector, both non-contiguous
+ (m3, vr[::2]), (vc[::2], m3), (m3.T, vc[::2]), (vr[::2], m3.T),
+ # size == 0
+ (m0, m0.T), (m0.T, m0), (m1, m0), (m0.T, m1.T),
+ ))
+ def test_dot_equivalent(self, args):
+ r1 = np.matmul(*args)
+ r2 = np.dot(*args)
+ assert_equal(r1, r2)
+
+ r3 = np.matmul(args[0].copy(), args[1].copy())
+ assert_equal(r1, r3)
+
+ def test_matmul_object(self):
+ import fractions
+
+ f = np.vectorize(fractions.Fraction)
+ def random_ints():
+ return np.random.randint(1, 1000, size=(10, 3, 3))
+ M1 = f(random_ints(), random_ints())
+ M2 = f(random_ints(), random_ints())
+
+ M3 = self.matmul(M1, M2)
+
+ [N1, N2, N3] = [a.astype(float) for a in [M1, M2, M3]]
+
+ assert_allclose(N3, self.matmul(N1, N2))
+
+ def test_matmul_object_type_scalar(self):
+ from fractions import Fraction as F
+ v = np.array([F(2,3), F(5,7)])
+ res = self.matmul(v, v)
+ assert_(type(res) is F)
+
+ def test_matmul_empty(self):
+ a = np.empty((3, 0), dtype=object)
+ b = np.empty((0, 3), dtype=object)
+ c = np.zeros((3, 3))
+ assert_array_equal(np.matmul(a, b), c)
+
+ def test_matmul_exception_multiply(self):
+ # test that matmul fails if `__mul__` is missing
+ class add_not_multiply():
+ def __add__(self, other):
+ return self
+ a = np.full((3,3), add_not_multiply())
+ with assert_raises(TypeError):
+ b = np.matmul(a, a)
+
+ def test_matmul_exception_add(self):
+ # test that matmul fails if `__add__` is missing
+ class multiply_not_add():
+ def __mul__(self, other):
+ return self
+ a = np.full((3,3), multiply_not_add())
+ with assert_raises(TypeError):
+ b = np.matmul(a, a)
+
+ def test_matmul_bool(self):
+ # gh-14439
+ a = np.array([[1, 0],[1, 1]], dtype=bool)
+ assert np.max(a.view(np.uint8)) == 1
+ b = np.matmul(a, a)
+ # matmul with boolean output should always be 0, 1
+ assert np.max(b.view(np.uint8)) == 1
+
+ rg = np.random.default_rng(np.random.PCG64(43))
+ d = rg.integers(2, size=4*5, dtype=np.int8)
+ d = d.reshape(4, 5) > 0
+ out1 = np.matmul(d, d.reshape(5, 4))
+ out2 = np.dot(d, d.reshape(5, 4))
+ assert_equal(out1, out2)
+
+ c = np.matmul(np.zeros((2, 0), dtype=bool), np.zeros(0, dtype=bool))
+ assert not np.any(c)
if sys.version_info[:2] >= (3, 5):
@@ -5766,6 +6336,11 @@ if sys.version_info[:2] >= (3, 5):
assert_equal(self.matmul(a, b), "A")
assert_equal(self.matmul(b, a), "A")
+ def test_matmul_raises(self):
+ assert_raises(TypeError, self.matmul, np.int8(5), np.int8(5))
+ assert_raises(TypeError, self.matmul, np.void(b'abc'), np.void(b'abc'))
+ assert_raises(ValueError, self.matmul, np.arange(10), np.void(b'abc'))
+
def test_matmul_inplace():
# It would be nice to support in-place matmul eventually, but for now
# we don't have a working implementation, so better just to error out
@@ -5780,6 +6355,17 @@ if sys.version_info[:2] >= (3, 5):
exec_ = getattr(builtins, "exec")
assert_raises(TypeError, exec_, "a @= b", globals(), locals())
+ def test_matmul_axes():
+ a = np.arange(3*4*5).reshape(3, 4, 5)
+ c = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (1, 2)])
+ assert c.shape == (3, 4, 4)
+ d = np.matmul(a, a, axes=[(-2, -1), (-1, -2), (0, 1)])
+ assert d.shape == (4, 4, 3)
+ e = np.swapaxes(d, 0, 2)
+ assert_array_equal(e, c)
+ f = np.matmul(a, np.arange(3), axes=[(1, 0), (0), (0)])
+ assert f.shape == (4, 5)
+
class TestInner(object):
@@ -5861,20 +6447,22 @@ class TestInner(object):
class TestAlen(object):
def test_basic(self):
- m = np.array([1, 2, 3])
- assert_equal(np.alen(m), 3)
+ with pytest.warns(DeprecationWarning):
+ m = np.array([1, 2, 3])
+ assert_equal(np.alen(m), 3)
- m = np.array([[1, 2, 3], [4, 5, 7]])
- assert_equal(np.alen(m), 2)
+ m = np.array([[1, 2, 3], [4, 5, 7]])
+ assert_equal(np.alen(m), 2)
- m = [1, 2, 3]
- assert_equal(np.alen(m), 3)
+ m = [1, 2, 3]
+ assert_equal(np.alen(m), 3)
- m = [[1, 2, 3], [4, 5, 7]]
- assert_equal(np.alen(m), 2)
+ m = [[1, 2, 3], [4, 5, 7]]
+ assert_equal(np.alen(m), 2)
def test_singleton(self):
- assert_equal(np.alen(5), 1)
+ with pytest.warns(DeprecationWarning):
+ assert_equal(np.alen(5), 1)
class TestChoose(object):
@@ -6661,6 +7249,13 @@ class TestNewBufferProtocol(object):
RuntimeError, "ndim",
np.array, m)
+ # The above seems to create some deep cycles, clean them up for
+ # easier reference count debugging:
+ del c_u8_33d, m
+ for i in range(33):
+ if gc.collect() == 0:
+ break
+
def test_error_pointer_type(self):
# gh-6741
m = memoryview(ctypes.pointer(ctypes.c_uint8()))
@@ -6670,7 +7265,7 @@ class TestNewBufferProtocol(object):
ValueError, "format string",
np.array, m)
- def test_error_message(self):
+ def test_error_message_unsupported(self):
# wchar has no corresponding numpy type - if this changes in future, we
# need a better way to construct an invalid memoryview format.
t = ctypes.c_wchar * 4
@@ -6679,14 +7274,17 @@ class TestNewBufferProtocol(object):
exc = cm.exception
if sys.version_info.major > 2:
- with assert_raises_regex(ValueError, "Unknown .* specifier 'u'"):
+ with assert_raises_regex(
+ NotImplementedError,
+ r"Unrepresentable .* 'u' \(UCS-2 strings\)"
+ ):
raise exc.__cause__
def test_ctypes_integer_via_memoryview(self):
# gh-11150, due to bpo-10746
for c_integer in {ctypes.c_int, ctypes.c_long, ctypes.c_longlong}:
value = c_integer(42)
- with warnings.catch_warnings(record=True) as w:
+ with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
np.asarray(value)
@@ -6696,7 +7294,7 @@ class TestNewBufferProtocol(object):
_fields_ = [('a', ctypes.c_uint8), ('b', ctypes.c_uint32)]
f = foo(a=1, b=2)
- with warnings.catch_warnings(record=True) as w:
+ with warnings.catch_warnings(record=True):
warnings.filterwarnings('always', r'.*\bctypes\b', RuntimeWarning)
arr = np.asarray(f)
@@ -6740,12 +7338,11 @@ class TestArrayAttributeDeletion(object):
assert_raises(AttributeError, delattr, a, s)
-def test_array_interface():
- # Test scalar coercion within the array interface
+class TestArrayInterface():
class Foo(object):
def __init__(self, value):
self.value = value
- self.iface = {'typestr': '=f8'}
+ self.iface = {'typestr': 'f8'}
def __float__(self):
return float(self.value)
@@ -6754,22 +7351,39 @@ def test_array_interface():
def __array_interface__(self):
return self.iface
+
f = Foo(0.5)
- assert_equal(np.array(f), 0.5)
- assert_equal(np.array([f]), [0.5])
- assert_equal(np.array([f, f]), [0.5, 0.5])
- assert_equal(np.array(f).dtype, np.dtype('=f8'))
- # Test various shape definitions
- f.iface['shape'] = ()
- assert_equal(np.array(f), 0.5)
- f.iface['shape'] = None
- assert_raises(TypeError, np.array, f)
- f.iface['shape'] = (1, 1)
- assert_equal(np.array(f), [[0.5]])
- f.iface['shape'] = (2,)
- assert_raises(ValueError, np.array, f)
-
- # test scalar with no shape
+
+ @pytest.mark.parametrize('val, iface, expected', [
+ (f, {}, 0.5),
+ ([f], {}, [0.5]),
+ ([f, f], {}, [0.5, 0.5]),
+ (f, {'shape': ()}, 0.5),
+ (f, {'shape': None}, TypeError),
+ (f, {'shape': (1, 1)}, [[0.5]]),
+ (f, {'shape': (2,)}, ValueError),
+ (f, {'strides': ()}, 0.5),
+ (f, {'strides': (2,)}, ValueError),
+ (f, {'strides': 16}, TypeError),
+ ])
+ def test_scalar_interface(self, val, iface, expected):
+ # Test scalar coercion within the array interface
+ self.f.iface = {'typestr': 'f8'}
+ self.f.iface.update(iface)
+ if HAS_REFCOUNT:
+ pre_cnt = sys.getrefcount(np.dtype('f8'))
+ if isinstance(expected, type):
+ assert_raises(expected, np.array, val)
+ else:
+ result = np.array(val)
+ assert_equal(np.array(val), expected)
+ assert result.dtype == 'f8'
+ del result
+ if HAS_REFCOUNT:
+ post_cnt = sys.getrefcount(np.dtype('f8'))
+ assert_equal(pre_cnt, post_cnt)
+
+def test_interface_no_shape():
class ArrayLike(object):
array = np.array(1)
__array_interface__ = array.__array_interface__
@@ -6811,6 +7425,19 @@ def test_array_interface_empty_shape():
assert_equal(arr1, arr2)
assert_equal(arr1, arr3)
+def test_array_interface_offset():
+ arr = np.array([1, 2, 3], dtype='int32')
+ interface = dict(arr.__array_interface__)
+ interface['data'] = memoryview(arr)
+ interface['shape'] = (2,)
+ interface['offset'] = 4
+
+
+ class DummyArray(object):
+ __array_interface__ = interface
+
+ arr1 = np.asarray(DummyArray())
+ assert_equal(arr1, arr[1:])
def test_flat_element_deletion():
it = np.ones(3).flat
@@ -6837,7 +7464,7 @@ class TestMemEventHook(object):
# needs to be larger then limit of small memory cacher in ctors.c
a = np.zeros(1000)
del a
- gc.collect()
+ break_cycles()
_multiarray_tests.test_pydatamem_seteventhook_end()
class TestMapIter(object):
@@ -6942,6 +7569,7 @@ class TestConversion(object):
except NameError:
Error = RuntimeError # python < 3.5
assert_raises(Error, bool, self_containing) # previously stack overflow
+ self_containing[0] = None # resolve circular reference
def test_to_int_scalar(self):
# gh-9972 means that these aren't always the same
@@ -7367,6 +7995,55 @@ class TestCTypes(object):
finally:
_internal.ctypes = ctypes
+ def _make_readonly(x):
+ x.flags.writeable = False
+ return x
+
+ @pytest.mark.parametrize('arr', [
+ np.array([1, 2, 3]),
+ np.array([['one', 'two'], ['three', 'four']]),
+ np.array((1, 2), dtype='i4,i4'),
+ np.zeros((2,), dtype=
+ np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['a', 'b'],
+ offsets=[0, 2],
+ itemsize=6
+ ))
+ ),
+ np.array([None], dtype=object),
+ np.array([]),
+ np.empty((0, 0)),
+ _make_readonly(np.array([1, 2, 3])),
+ ], ids=[
+ '1d',
+ '2d',
+ 'structured',
+ 'overlapping',
+ 'object',
+ 'empty',
+ 'empty-2d',
+ 'readonly'
+ ])
+ def test_ctypes_data_as_holds_reference(self, arr):
+ # gh-9647
+ # create a copy to ensure that pytest does not mess with the refcounts
+ arr = arr.copy()
+
+ arr_ref = weakref.ref(arr)
+
+ ctypes_ptr = arr.ctypes.data_as(ctypes.c_void_p)
+
+ # `ctypes_ptr` should hold onto `arr`
+ del arr
+ break_cycles()
+ assert_(arr_ref() is not None, "ctypes pointer did not hold onto a reference")
+
+ # but when the `ctypes_ptr` object dies, so should `arr`
+ del ctypes_ptr
+ break_cycles()
+ assert_(arr_ref() is None, "unknowable whether ctypes pointer holds a reference")
+
class TestWritebackIfCopy(object):
# all these tests use the WRITEBACKIFCOPY mechanism
@@ -7382,17 +8059,13 @@ class TestWritebackIfCopy(object):
res = np.argmin(mat, 0, out=out)
assert_equal(res, range(5))
- def test_clip_with_out(self):
- mat = np.eye(5)
- out = np.eye(5, dtype='i2')
- res = np.clip(mat, a_min=-10, a_max=0, out=out)
- assert_equal(np.sum(out), 0)
-
def test_insert_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
# uses arr_insert
np.place(a, a>2, [44, 55])
assert_equal(a, np.array([[0, 44], [1, 55], [2, 44]]))
+ # hit one of the failing paths
+ assert_raises(ValueError, np.place, a, a>20, [])
def test_put_noncontiguous(self):
a = np.arange(6).reshape(2,3).T # force non-c-contiguous
@@ -7451,6 +8124,8 @@ class TestWritebackIfCopy(object):
arr_wb[...] = 100
assert_equal(arr, -100)
+ @pytest.mark.leaks_references(
+ reason="increments self in dealloc; ignore since deprecated path.")
def test_dealloc_warning(self):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
@@ -7543,15 +8218,15 @@ class TestArrayFinalize(object):
assert_(isinstance(obj_subarray, RaisesInFinalize))
# reference should still be held by obj_arr
- gc.collect()
+ break_cycles()
assert_(obj_ref() is not None, "object should not already be dead")
del obj_arr
- gc.collect()
+ break_cycles()
assert_(obj_ref() is not None, "obj_arr should not hold the last reference")
del obj_subarray
- gc.collect()
+ break_cycles()
assert_(obj_ref() is None, "no references should remain")
@@ -7663,6 +8338,77 @@ def test_uintalignment_and_alignment():
dst = np.zeros((2,2), dtype='c8')
dst[:,1] = src[:,1] # assert in lowlevel_strided_loops fails?
+class TestAlignment(object):
+ # adapted from scipy._lib.tests.test__util.test__aligned_zeros
+ # Checks that unusual memory alignments don't trip up numpy.
+ # In particular, check RELAXED_STRIDES don't trip alignment assertions in
+ # NDEBUG mode for size-0 arrays (gh-12503)
+
+ def check(self, shape, dtype, order, align):
+ err_msg = repr((shape, dtype, order, align))
+ x = _aligned_zeros(shape, dtype, order, align=align)
+ if align is None:
+ align = np.dtype(dtype).alignment
+ assert_equal(x.__array_interface__['data'][0] % align, 0)
+ if hasattr(shape, '__len__'):
+ assert_equal(x.shape, shape, err_msg)
+ else:
+ assert_equal(x.shape, (shape,), err_msg)
+ assert_equal(x.dtype, dtype)
+ if order == "C":
+ assert_(x.flags.c_contiguous, err_msg)
+ elif order == "F":
+ if x.size > 0:
+ assert_(x.flags.f_contiguous, err_msg)
+ elif order is None:
+ assert_(x.flags.c_contiguous, err_msg)
+ else:
+ raise ValueError()
+
+ def test_various_alignments(self):
+ for align in [1, 2, 3, 4, 8, 12, 16, 32, 64, None]:
+ for n in [0, 1, 3, 11]:
+ for order in ["C", "F", None]:
+ for dtype in list(np.typecodes["All"]) + ['i4,i4,i4']:
+ if dtype == 'O':
+ # object dtype can't be misaligned
+ continue
+ for shape in [n, (1, 2, 3, n)]:
+ self.check(shape, np.dtype(dtype), order, align)
+
+ def test_strided_loop_alignments(self):
+ # particularly test that complex64 and float128 use right alignment
+ # code-paths, since these are particularly problematic. It is useful to
+ # turn on USE_DEBUG for this test, so lowlevel-loop asserts are run.
+ for align in [1, 2, 4, 8, 12, 16, None]:
+ xf64 = _aligned_zeros(3, np.float64)
+
+ xc64 = _aligned_zeros(3, np.complex64, align=align)
+ xf128 = _aligned_zeros(3, np.longdouble, align=align)
+
+ # test casting, both to and from misaligned
+ with suppress_warnings() as sup:
+ sup.filter(np.ComplexWarning, "Casting complex values")
+ xc64.astype('f8')
+ xf64.astype(np.complex64)
+ test = xc64 + xf64
+
+ xf128.astype('f8')
+ xf64.astype(np.longdouble)
+ test = xf128 + xf64
+
+ test = xf128 + xc64
+
+ # test copy, both to and from misaligned
+ # contig copy
+ xf64[:] = xf64.copy()
+ xc64[:] = xc64.copy()
+ xf128[:] = xf128.copy()
+ # strided copy
+ xf64[::2] = xf64[::2].copy()
+ xc64[::2] = xc64[::2].copy()
+ xf128[::2] = xf128[::2].copy()
+
def test_getfield():
a = np.arange(32, dtype='uint16')
if sys.byteorder == 'little':
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index 5e8165bc5..cf66751f8 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -1,14 +1,13 @@
from __future__ import division, absolute_import, print_function
import sys
-import warnings
import pytest
import numpy as np
import numpy.core._multiarray_tests as _multiarray_tests
from numpy import array, arange, nditer, all
from numpy.testing import (
- assert_, assert_equal, assert_array_equal, assert_raises, assert_warns,
+ assert_, assert_equal, assert_array_equal, assert_raises,
HAS_REFCOUNT, suppress_warnings
)
@@ -1865,7 +1864,7 @@ def test_iter_buffered_cast_structured_type():
# make sure multi-field struct type -> simple doesn't work
sdt = [('a', 'f4'), ('b', 'i8'), ('d', 'O')]
a = np.array([(5.5, 7, 'test'), (8, 10, 11)], dtype=sdt)
- assert_raises(ValueError, lambda: (
+ assert_raises(TypeError, lambda: (
nditer(a, ['buffered', 'refs_ok'], ['readonly'],
casting='unsafe',
op_dtypes='i4')))
@@ -2196,21 +2195,15 @@ class TestIterNested(object):
a = arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
def test_reorder(self):
@@ -2219,40 +2212,28 @@ class TestIterNested(object):
# In 'K' order (default), it gets reordered
i, j = np.nested_iters(a, [[0], [2, 1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, it doesn't
i, j = np.nested_iters(a, [[0], [2, 1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4, 1, 3, 5], [6, 8, 10, 7, 9, 11]])
i, j = np.nested_iters(a, [[1, 0], [2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [6, 7], [2, 3], [8, 9], [4, 5], [10, 11]])
i, j = np.nested_iters(a, [[2, 0], [1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [6, 8, 10], [1, 3, 5], [7, 9, 11]])
def test_flip_axes(self):
@@ -2261,40 +2242,28 @@ class TestIterNested(object):
# In 'K' order (default), the axes all get flipped
i, j = np.nested_iters(a, [[0], [1, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5], [6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[0, 1], [2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9], [10, 11]])
i, j = np.nested_iters(a, [[0, 2], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 2, 4], [1, 3, 5], [6, 8, 10], [7, 9, 11]])
# In 'C' order, flipping axes is disabled
i, j = np.nested_iters(a, [[0], [1, 2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10, 9, 8, 7, 6], [5, 4, 3, 2, 1, 0]])
i, j = np.nested_iters(a, [[0, 1], [2]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[11, 10], [9, 8], [7, 6], [5, 4], [3, 2], [1, 0]])
i, j = np.nested_iters(a, [[0, 2], [1]], order='C')
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[11, 9, 7], [10, 8, 6], [5, 3, 1], [4, 2, 0]])
def test_broadcast(self):
@@ -2303,15 +2272,11 @@ class TestIterNested(object):
b = arange(3).reshape(1, 3)
i, j = np.nested_iters([a, b], [[0], [1]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [0, 1], [0, 2]], [[1, 0], [1, 1], [1, 2]]])
i, j = np.nested_iters([a, b], [[1], [0]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[[0, 0], [1, 0]], [[0, 1], [1, 1]], [[0, 2], [1, 2]]])
def test_dtype_copy(self):
@@ -2323,13 +2288,11 @@ class TestIterNested(object):
op_flags=['readonly', 'copy'],
op_dtypes='f8')
assert_equal(j[0].dtype, np.dtype('f8'))
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2], [3, 4, 5]])
vals = None
- # writebackifcopy - using conext manager
+ # writebackifcopy - using context manager
a = arange(6, dtype='f4').reshape(2, 3)
i, j = np.nested_iters(a, [[0], [1]],
op_flags=['readwrite', 'updateifcopy'],
@@ -2376,15 +2339,11 @@ class TestIterNested(object):
def test_0d(self):
a = np.arange(12).reshape(2, 3, 2)
i, j = np.nested_iters(a, [[], [1, 0, 2]])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]])
i, j = np.nested_iters(a, [[1, 0, 2], []])
- vals = []
- for x in i:
- vals.append([y for y in j])
+ vals = [list(j) for _ in i]
assert_equal(vals, [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10], [11]])
i, j, k = np.nested_iters(a, [[2, 0], [], [1]])
@@ -2556,10 +2515,8 @@ def test_iter_buffering_reduction_reuse_reduce_loops():
op_flags=[['readonly'], ['readwrite']],
buffersize=5)
- bufsizes = []
with it:
- for x, y in it:
- bufsizes.append(x.shape[0])
+ bufsizes = [x.shape[0] for x, y in it]
assert_equal(bufsizes, [5, 2, 5, 2])
assert_equal(sum(bufsizes), a.size)
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index f264c4ab0..1358b45e9 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -13,7 +13,7 @@ from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- suppress_warnings, HAS_REFCOUNT
+ assert_warns, HAS_REFCOUNT
)
@@ -43,7 +43,7 @@ class TestResize(object):
def test_reshape_from_zero(self):
# See also gh-6740
- A = np.zeros(0, dtype=[('a', np.float32, 1)])
+ A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
@@ -152,7 +152,15 @@ class TestNonarrayArgs(object):
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
- assert_(np.squeeze(A).shape == (3, 3))
+ assert_equal(np.squeeze(A).shape, (3, 3))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
+ assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
+ assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
+ assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
+ assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
+ assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
@@ -208,6 +216,9 @@ class TestNonarrayArgs(object):
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
+ B = np.array([None, 0])
+ B[0] = 1j
+ assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
@@ -888,6 +899,41 @@ class TestTypes(object):
# Also test keyword arguments
assert_(np.can_cast(from_=np.int32, to=np.int64))
+ def test_can_cast_simple_to_structured(self):
+ # Non-structured can only be cast to structured in 'unsafe' mode.
+ assert_(not np.can_cast('i4', 'i4,i4'))
+ assert_(not np.can_cast('i4', 'i4,i2'))
+ assert_(np.can_cast('i4', 'i4,i4', casting='unsafe'))
+ assert_(np.can_cast('i4', 'i4,i2', casting='unsafe'))
+ # Even if there is just a single field which is OK.
+ assert_(not np.can_cast('i2', [('f1', 'i4')]))
+ assert_(not np.can_cast('i2', [('f1', 'i4')], casting='same_kind'))
+ assert_(np.can_cast('i2', [('f1', 'i4')], casting='unsafe'))
+ # It should be the same for recursive structured or subarrays.
+ assert_(not np.can_cast('i2', [('f1', 'i4,i4')]))
+ assert_(np.can_cast('i2', [('f1', 'i4,i4')], casting='unsafe'))
+ assert_(not np.can_cast('i2', [('f1', '(2,3)i4')]))
+ assert_(np.can_cast('i2', [('f1', '(2,3)i4')], casting='unsafe'))
+
+ def test_can_cast_structured_to_simple(self):
+ # Need unsafe casting for structured to simple.
+ assert_(not np.can_cast([('f1', 'i4')], 'i4'))
+ assert_(np.can_cast([('f1', 'i4')], 'i4', casting='unsafe'))
+ assert_(np.can_cast([('f1', 'i4')], 'i2', casting='unsafe'))
+ # Since it is unclear what is being cast, multiple fields to
+ # single should not work even for unsafe casting.
+ assert_(not np.can_cast('i4,i4', 'i4', casting='unsafe'))
+ # But a single field inside a single field is OK.
+ assert_(not np.can_cast([('f1', [('x', 'i4')])], 'i4'))
+ assert_(np.can_cast([('f1', [('x', 'i4')])], 'i4', casting='unsafe'))
+ # And a subarray is fine too - it will just take the first element
+ # (arguably not very consistently; might also take the first field).
+ assert_(not np.can_cast([('f0', '(3,)i4')], 'i4'))
+ assert_(np.can_cast([('f0', '(3,)i4')], 'i4', casting='unsafe'))
+ # But a structured subarray with multiple fields should fail.
+ assert_(not np.can_cast([('f0', ('i4,i4'), (2,))], 'i4',
+ casting='unsafe'))
+
def test_can_cast_values(self):
# gh-5917
for dt in np.sctypes['int'] + np.sctypes['uint']:
@@ -965,12 +1011,24 @@ class TestNonzero(object):
assert_equal(np.count_nonzero(np.array([], dtype='?')), 0)
assert_equal(np.nonzero(np.array([])), ([],))
+ assert_equal(np.count_nonzero(np.array([0])), 0)
+ assert_equal(np.count_nonzero(np.array([0], dtype='?')), 0)
+ assert_equal(np.nonzero(np.array([0])), ([],))
+
+ assert_equal(np.count_nonzero(np.array([1])), 1)
+ assert_equal(np.count_nonzero(np.array([1], dtype='?')), 1)
+ assert_equal(np.nonzero(np.array([1])), ([0],))
+
+ def test_nonzero_zerod(self):
assert_equal(np.count_nonzero(np.array(0)), 0)
assert_equal(np.count_nonzero(np.array(0, dtype='?')), 0)
- assert_equal(np.nonzero(np.array(0)), ([],))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.nonzero(np.array(0)), ([],))
+
assert_equal(np.count_nonzero(np.array(1)), 1)
assert_equal(np.count_nonzero(np.array(1, dtype='?')), 1)
- assert_equal(np.nonzero(np.array(1)), ([0],))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.nonzero(np.array(1)), ([0],))
def test_nonzero_onedim(self):
x = np.array([1, 0, 2, -1, 0, 0, 8])
@@ -1164,6 +1222,71 @@ class TestNonzero(object):
assert_raises(ValueError, np.nonzero, np.array([BoolErrors()]))
+ def test_nonzero_sideeffect_safety(self):
+ # gh-13631
+ class FalseThenTrue:
+ _val = False
+ def __bool__(self):
+ try:
+ return self._val
+ finally:
+ self._val = True
+
+ class TrueThenFalse:
+ _val = True
+ def __bool__(self):
+ try:
+ return self._val
+ finally:
+ self._val = False
+
+ # result grows on the second pass
+ a = np.array([True, FalseThenTrue()])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ a = np.array([[True], [FalseThenTrue()]])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ # result shrinks on the second pass
+ a = np.array([False, TrueThenFalse()])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ a = np.array([[False], [TrueThenFalse()]])
+ assert_raises(RuntimeError, np.nonzero, a)
+
+ def test_nonzero_exception_safe(self):
+ # gh-13930
+
+ class ThrowsAfter:
+ def __init__(self, iters):
+ self.iters_left = iters
+
+ def __bool__(self):
+ if self.iters_left == 0:
+ raise ValueError("called `iters` times")
+
+ self.iters_left -= 1
+ return True
+
+ """
+ Test that a ValueError is raised instead of a SystemError
+
+ If the __bool__ function is called after the error state is set,
+ Python (cpython) will raise a SystemError.
+ """
+
+ # assert that an exception in first pass is handled correctly
+ a = np.array([ThrowsAfter(5)]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ # raise exception in second pass for 1-dimensional loop
+ a = np.array([ThrowsAfter(15)]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
+ # raise exception in second pass for n-dimensional loop
+ a = np.array([[ThrowsAfter(15)]]*10)
+ assert_raises(ValueError, np.nonzero, a)
+
class TestIndex(object):
def test_boolean(self):
@@ -1218,6 +1341,11 @@ class TestBinaryRepr(object):
exp = '1' + (width - 1) * '0'
assert_equal(np.binary_repr(num, width=width), exp)
+ def test_large_neg_int64(self):
+ # See gh-14289.
+ assert_equal(np.binary_repr(np.int64(-2**62), width=64),
+ '11' + '0'*62)
+
class TestBaseRepr(object):
def test_base3(self):
@@ -1325,11 +1453,17 @@ class TestClip(object):
self.nr = 5
self.nc = 3
- def fastclip(self, a, m, M, out=None):
+ def fastclip(self, a, m, M, out=None, casting=None):
if out is None:
- return a.clip(m, M)
+ if casting is None:
+ return a.clip(m, M)
+ else:
+ return a.clip(m, M, casting=casting)
else:
- return a.clip(m, M, out)
+ if casting is None:
+ return a.clip(m, M, out)
+ else:
+ return a.clip(m, M, out, casting=casting)
def clip(self, a, m, M, out=None):
# use slow-clip
@@ -1367,6 +1501,20 @@ class TestClip(object):
return (10 * rand(n, m)).astype(np.int32)
# Now the real test cases
+
+ @pytest.mark.parametrize("dtype", '?bhilqpBHILQPefdgFDGO')
+ def test_ones_pathological(self, dtype):
+ # for preservation of behavior described in
+ # gh-12519; amin > amax behavior may still change
+ # in the future
+ arr = np.ones(10, dtype=dtype)
+ expected = np.zeros(10, dtype=dtype)
+ actual = np.clip(arr, 1, 0)
+ if dtype == 'O':
+ assert actual.tolist() == expected.tolist()
+ else:
+ assert_equal(actual, expected)
+
def test_simple_double(self):
# Test native double input with scalar min/max.
a = self._generate_data(self.nr, self.nc)
@@ -1465,14 +1613,21 @@ class TestClip(object):
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
- def test_simple_int32_inout(self):
+ @pytest.mark.parametrize("casting", [None, "unsafe"])
+ def test_simple_int32_inout(self, casting):
# Test native int32 input with double min/max and int32 out.
a = self._generate_int32_data(self.nr, self.nc)
m = np.float64(0)
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ if casting is None:
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac, casting=casting)
+ else:
+ # explicitly passing "unsafe" will silence warning
+ self.fastclip(a, m, M, ac, casting=casting)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1494,7 +1649,9 @@ class TestClip(object):
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1505,7 +1662,9 @@ class TestClip(object):
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1681,7 +1840,9 @@ class TestClip(object):
M = np.float64(2)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1703,7 +1864,9 @@ class TestClip(object):
M = np.float64(1)
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1714,7 +1877,9 @@ class TestClip(object):
M = 2.0
ac = np.zeros(a.shape, dtype=np.int32)
act = ac.copy()
- self.fastclip(a, m, M, ac)
+ with assert_warns(DeprecationWarning):
+ # NumPy 1.17.0, 2018-02-24 - casting is unsafe
+ self.fastclip(a, m, M, ac)
self.clip(a, m, M, act)
assert_array_strict_equal(ac, act)
@@ -1767,11 +1932,94 @@ class TestClip(object):
def test_clip_nan(self):
d = np.arange(7.)
- assert_equal(d.clip(min=np.nan), d)
- assert_equal(d.clip(max=np.nan), d)
- assert_equal(d.clip(min=np.nan, max=np.nan), d)
- assert_equal(d.clip(min=-2, max=np.nan), d)
- assert_equal(d.clip(min=np.nan, max=10), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan, max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=-2, max=np.nan), d)
+ with assert_warns(DeprecationWarning):
+ assert_equal(d.clip(min=np.nan, max=10), d)
+
+ def test_object_clip(self):
+ a = np.arange(10, dtype=object)
+ actual = np.clip(a, 1, 5)
+ expected = np.array([1, 1, 2, 3, 4, 5, 5, 5, 5, 5])
+ assert actual.tolist() == expected.tolist()
+
+ def test_clip_all_none(self):
+ a = np.arange(10, dtype=object)
+ with assert_raises_regex(ValueError, 'max or min'):
+ np.clip(a, None, None)
+
+ def test_clip_invalid_casting(self):
+ a = np.arange(10, dtype=object)
+ with assert_raises_regex(ValueError,
+ 'casting must be one of'):
+ self.fastclip(a, 1, 8, casting="garbage")
+
+ @pytest.mark.parametrize("amin, amax", [
+ # two scalars
+ (1, 0),
+ # mix scalar and array
+ (1, np.zeros(10)),
+ # two arrays
+ (np.ones(10), np.zeros(10)),
+ ])
+ def test_clip_value_min_max_flip(self, amin, amax):
+ a = np.arange(10, dtype=np.int64)
+ # requirement from ufunc_docstrings.py
+ expected = np.minimum(np.maximum(a, amin), amax)
+ actual = np.clip(a, amin, amax)
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("arr, amin, amax, exp", [
+ # for a bug in npy_ObjectClip, based on a
+ # case produced by hypothesis
+ (np.zeros(10, dtype=np.int64),
+ 0,
+ -2**64+1,
+ np.full(10, -2**64+1, dtype=object)),
+ # for bugs in NPY_TIMEDELTA_MAX, based on a case
+ # produced by hypothesis
+ (np.zeros(10, dtype='m8') - 1,
+ 0,
+ 0,
+ np.zeros(10, dtype='m8')),
+ ])
+ def test_clip_problem_cases(self, arr, amin, amax, exp):
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, exp)
+
+ @pytest.mark.xfail(reason="no scalar nan propagation yet")
+ @pytest.mark.parametrize("arr, amin, amax", [
+ # problematic scalar nan case from hypothesis
+ (np.zeros(10, dtype=np.int64),
+ np.array(np.nan),
+ np.zeros(10, dtype=np.int32)),
+ ])
+ def test_clip_scalar_nan_propagation(self, arr, amin, amax):
+ # enforcement of scalar nan propagation for comparisons
+ # called through clip()
+ expected = np.minimum(np.maximum(a, amin), amax)
+ with assert_warns(DeprecationWarning):
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
+
+ @pytest.mark.xfail(reason="propagation doesn't match spec")
+ @pytest.mark.parametrize("arr, amin, amax", [
+ (np.array([1] * 10, dtype='m8'),
+ np.timedelta64('NaT'),
+ np.zeros(10, dtype=np.int32)),
+ ])
+ def test_NaT_propagation(self, arr, amin, amax):
+ # NOTE: the expected function spec doesn't
+ # propagate NaT, but clip() now does
+ expected = np.minimum(np.maximum(a, amin), amax)
+ actual = np.clip(arr, amin, amax)
+ assert_equal(actual, expected)
class TestAllclose(object):
@@ -2146,6 +2394,7 @@ class TestLikeFuncs(object):
(np.arange(24).reshape(2, 3, 4).swapaxes(0, 1), None),
(np.arange(24).reshape(4, 3, 2).swapaxes(0, 1), '?'),
]
+ self.shapes = [(5,), (5,6,), (5,6,7,)]
def compare_array_value(self, dz, value, fill_value):
if value is not None:
@@ -2211,6 +2460,34 @@ class TestLikeFuncs(object):
assert_equal(dz.dtype, np.dtype(dtype))
self.compare_array_value(dz, value, fill_value)
+ # Test the 'shape' parameter
+ for s in self.shapes:
+ for o in 'CFA':
+ sz = like_function(d, dtype=dtype, shape=s, order=o,
+ **fill_kwarg)
+ assert_equal(sz.shape, s)
+ if dtype is None:
+ assert_equal(sz.dtype, d.dtype)
+ else:
+ assert_equal(sz.dtype, np.dtype(dtype))
+ if o == 'C' or (o == 'A' and d.flags.c_contiguous):
+ assert_(sz.flags.c_contiguous)
+ elif o == 'F' or (o == 'A' and d.flags.f_contiguous):
+ assert_(sz.flags.f_contiguous)
+ self.compare_array_value(sz, value, fill_value)
+
+ if (d.ndim != len(s)):
+ assert_equal(np.argsort(like_function(d, dtype=dtype,
+ shape=s, order='K',
+ **fill_kwarg).strides),
+ np.argsort(np.empty(s, dtype=dtype,
+ order='C').strides))
+ else:
+ assert_equal(np.argsort(like_function(d, dtype=dtype,
+ shape=s, order='K',
+ **fill_kwarg).strides),
+ np.argsort(d.strides))
+
# Test the 'subok' parameter
class MyNDArray(np.ndarray):
pass
@@ -2306,6 +2583,30 @@ class TestConvolve(object):
class TestArgwhere(object):
+
+ @pytest.mark.parametrize('nd', [0, 1, 2])
+ def test_nd(self, nd):
+ # get an nd array with multiple elements in every dimension
+ x = np.empty((2,)*nd, bool)
+
+ # none
+ x[...] = False
+ assert_equal(np.argwhere(x).shape, (0, nd))
+
+ # only one
+ x[...] = False
+ x.flat[0] = True
+ assert_equal(np.argwhere(x).shape, (1, nd))
+
+ # all but one
+ x[...] = True
+ x.flat[0] = False
+ assert_equal(np.argwhere(x).shape, (x.size - 1, nd))
+
+ # all
+ x[...] = True
+ assert_equal(np.argwhere(x).shape, (x.size, nd))
+
def test_2D(self):
x = np.arange(6).reshape((2, 3))
assert_array_equal(np.argwhere(x > 1),
@@ -2616,6 +2917,47 @@ def test_outer_out_param():
assert_equal(np.outer(arr2, arr3, out2), out2)
+class TestIndices(object):
+
+ def test_simple(self):
+ [x, y] = np.indices((4, 3))
+ assert_array_equal(x, np.array([[0, 0, 0],
+ [1, 1, 1],
+ [2, 2, 2],
+ [3, 3, 3]]))
+ assert_array_equal(y, np.array([[0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2],
+ [0, 1, 2]]))
+
+ def test_single_input(self):
+ [x] = np.indices((4,))
+ assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+ [x] = np.indices((4,), sparse=True)
+ assert_array_equal(x, np.array([0, 1, 2, 3]))
+
+ def test_scalar_input(self):
+ assert_array_equal([], np.indices(()))
+ assert_array_equal([], np.indices((), sparse=True))
+ assert_array_equal([[]], np.indices((0,)))
+ assert_array_equal([[]], np.indices((0,), sparse=True))
+
+ def test_sparse(self):
+ [x, y] = np.indices((4,3), sparse=True)
+ assert_array_equal(x, np.array([[0], [1], [2], [3]]))
+ assert_array_equal(y, np.array([[0, 1, 2]]))
+
+ @pytest.mark.parametrize("dtype", [np.int, np.float32, np.float64])
+ @pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
+ def test_return_type(self, dtype, dims):
+ inds = np.indices(dims, dtype=dtype)
+ assert_(inds.dtype == dtype)
+
+ for arr in np.indices(dims, dtype=dtype, sparse=True):
+ assert_(arr.dtype == dtype)
+
+
class TestRequire(object):
flag_names = ['C', 'C_CONTIGUOUS', 'CONTIGUOUS',
'F', 'F_CONTIGUOUS', 'FORTRAN',
@@ -2696,6 +3038,8 @@ class TestBroadcast(object):
arrs = [np.empty((6, 7)), np.empty((5, 6, 1)), np.empty((7,)),
np.empty((5, 1, 7))]
mits = [np.broadcast(*arrs),
+ np.broadcast(np.broadcast(*arrs[:0]), np.broadcast(*arrs[0:])),
+ np.broadcast(np.broadcast(*arrs[:1]), np.broadcast(*arrs[1:])),
np.broadcast(np.broadcast(*arrs[:2]), np.broadcast(*arrs[2:])),
np.broadcast(arrs[0], np.broadcast(*arrs[1:-1]), arrs[-1])]
for mit in mits:
@@ -2720,12 +3064,24 @@ class TestBroadcast(object):
arr = np.empty((5,))
for j in range(35):
arrs = [arr] * j
- if j < 1 or j > 32:
+ if j > 32:
assert_raises(ValueError, np.broadcast, *arrs)
else:
mit = np.broadcast(*arrs)
assert_equal(mit.numiter, j)
+ def test_broadcast_error_kwargs(self):
+ #gh-13455
+ arrs = [np.empty((5, 6, 7))]
+ mit = np.broadcast(*arrs)
+ mit2 = np.broadcast(*arrs, **{})
+ assert_equal(mit.shape, mit2.shape)
+ assert_equal(mit.ndim, mit2.ndim)
+ assert_equal(mit.nd, mit2.nd)
+ assert_equal(mit.numiter, mit2.numiter)
+ assert_(mit.iters[0].base is mit2.iters[0].base)
+
+ assert_raises(ValueError, np.broadcast, 1, **{'x': 1})
class TestKeepdims(object):
@@ -2748,3 +3104,9 @@ class TestTensordot(object):
td = np.tensordot(a, b, (1, 0))
assert_array_equal(td, np.dot(a, b))
assert_array_equal(td, np.einsum('ij,jk', a, b))
+
+ def test_zero_dimensional(self):
+ # gh-12130
+ arr_0d = np.array(1)
+ ret = np.tensordot(arr_0d, arr_0d, ([], [])) # contracting no axes is well defined
+ assert_array_equal(ret, arr_0d)
diff --git a/numpy/core/tests/test_numerictypes.py b/numpy/core/tests/test_numerictypes.py
index 27e4fdeec..387740e35 100644
--- a/numpy/core/tests/test_numerictypes.py
+++ b/numpy/core/tests/test_numerictypes.py
@@ -5,7 +5,7 @@ import itertools
import pytest
import numpy as np
-from numpy.testing import assert_, assert_equal, assert_raises
+from numpy.testing import assert_, assert_equal, assert_raises, IS_PYPY
# This is the structure of the table used for plain objects:
#
@@ -87,10 +87,8 @@ def normalize_descr(descr):
else:
nitem = (item[0], dtype)
out.append(nitem)
- elif isinstance(item[1], list):
- l = []
- for j in normalize_descr(item[1]):
- l.append(j)
+ elif isinstance(dtype, list):
+ l = normalize_descr(dtype)
out.append((item[0], l))
else:
raise ValueError("Expected a str or list and got %s" %
@@ -493,9 +491,39 @@ def test_issctype(rep, expected):
@pytest.mark.skipif(sys.flags.optimize > 1,
reason="no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1")
+@pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
class TestDocStrings(object):
def test_platform_dependent_aliases(self):
if np.int64 is np.int_:
assert_('int64' in np.int_.__doc__)
elif np.int64 is np.longlong:
assert_('int64' in np.longlong.__doc__)
+
+
+class TestScalarTypeNames:
+ # gh-9799
+
+ numeric_types = [
+ np.byte, np.short, np.intc, np.int_, np.longlong,
+ np.ubyte, np.ushort, np.uintc, np.uint, np.ulonglong,
+ np.half, np.single, np.double, np.longdouble,
+ np.csingle, np.cdouble, np.clongdouble,
+ ]
+
+ def test_names_are_unique(self):
+ # none of the above may be aliases for each other
+ assert len(set(self.numeric_types)) == len(self.numeric_types)
+
+ # names must be unique
+ names = [t.__name__ for t in self.numeric_types]
+ assert len(set(names)) == len(names)
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_reflect_attributes(self, t):
+ """ Test that names correspond to where the type is under ``np.`` """
+ assert getattr(np, t.__name__) is t
+
+ @pytest.mark.parametrize('t', numeric_types)
+ def test_names_are_undersood_by_dtype(self, t):
+ """ Test the dtype constructor maps names back to the type """
+ assert np.dtype(t.__name__).type is t
diff --git a/numpy/core/tests/test_overrides.py b/numpy/core/tests/test_overrides.py
index 895f221da..63b0e4539 100644
--- a/numpy/core/tests/test_overrides.py
+++ b/numpy/core/tests/test_overrides.py
@@ -1,51 +1,62 @@
from __future__ import division, absolute_import, print_function
-import pickle
+import inspect
import sys
+from unittest import mock
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
- get_overloaded_types_and_args, array_function_dispatch,
- verify_matching_signatures)
+ _get_implementing_args, array_function_dispatch,
+ verify_matching_signatures, ARRAY_FUNCTION_ENABLED)
+from numpy.compat import pickle
+import pytest
-def _get_overloaded_args(relevant_args):
- types, args = get_overloaded_types_and_args(relevant_args)
- return args
+requires_array_function = pytest.mark.skipif(
+ not ARRAY_FUNCTION_ENABLED,
+ reason="__array_function__ dispatch not enabled.")
-def _return_self(self, *args, **kwargs):
- return self
+def _return_not_implemented(self, *args, **kwargs):
+ return NotImplemented
-class TestGetOverloadedTypesAndArgs(object):
+# need to define this at the top level to test pickling
+@array_function_dispatch(lambda array: (array,))
+def dispatched_one_arg(array):
+ """Docstring."""
+ return 'original'
+
+
+@array_function_dispatch(lambda array1, array2: (array1, array2))
+def dispatched_two_arg(array1, array2):
+ """Docstring."""
+ return 'original'
+
+
+class TestGetImplementingArgs(object):
def test_ndarray(self):
array = np.array(1)
- types, args = get_overloaded_types_and_args([array])
- assert_equal(set(types), {np.ndarray})
- assert_equal(list(args), [])
+ args = _get_implementing_args([array])
+ assert_equal(list(args), [array])
- types, args = get_overloaded_types_and_args([array, array])
- assert_equal(len(types), 1)
- assert_equal(set(types), {np.ndarray})
- assert_equal(list(args), [])
+ args = _get_implementing_args([array, array])
+ assert_equal(list(args), [array])
- types, args = get_overloaded_types_and_args([array, 1])
- assert_equal(set(types), {np.ndarray})
- assert_equal(list(args), [])
+ args = _get_implementing_args([array, 1])
+ assert_equal(list(args), [array])
- types, args = get_overloaded_types_and_args([1, array])
- assert_equal(set(types), {np.ndarray})
- assert_equal(list(args), [])
+ args = _get_implementing_args([1, array])
+ assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
class OverrideSub(np.ndarray):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
@@ -54,122 +65,157 @@ class TestGetOverloadedTypesAndArgs(object):
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
- types, args = get_overloaded_types_and_args([array, override_sub])
- assert_equal(set(types), {np.ndarray, OverrideSub})
- assert_equal(list(args), [override_sub])
+ args = _get_implementing_args([array, override_sub])
+ assert_equal(list(args), [override_sub, array])
- types, args = get_overloaded_types_and_args([array, no_override_sub])
- assert_equal(set(types), {np.ndarray, NoOverrideSub})
- assert_equal(list(args), [])
+ args = _get_implementing_args([array, no_override_sub])
+ assert_equal(list(args), [no_override_sub, array])
- types, args = get_overloaded_types_and_args(
+ args = _get_implementing_args(
[override_sub, no_override_sub])
- assert_equal(set(types), {OverrideSub, NoOverrideSub})
- assert_equal(list(args), [override_sub])
+ assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
class Other(object):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
array = np.array(1)
other = Other()
- types, args = get_overloaded_types_and_args([other, array])
- assert_equal(set(types), {np.ndarray, Other})
- assert_equal(list(args), [other])
+ args = _get_implementing_args([other, array])
+ assert_equal(list(args), [other, array])
- types, args = get_overloaded_types_and_args([array, other])
- assert_equal(set(types), {np.ndarray, Other})
- assert_equal(list(args), [other])
+ args = _get_implementing_args([array, other])
+ assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
class OverrideSub(np.ndarray):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
class Other(object):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
array = np.array(1)
subarray = np.array(1).view(OverrideSub)
other = Other()
- assert_equal(_get_overloaded_args([array, subarray, other]),
- [subarray, other])
- assert_equal(_get_overloaded_args([array, other, subarray]),
- [subarray, other])
+ assert_equal(_get_implementing_args([array, subarray, other]),
+ [subarray, array, other])
+ assert_equal(_get_implementing_args([array, other, subarray]),
+ [subarray, array, other])
def test_many_duck_arrays(self):
class A(object):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
class B(A):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
class C(A):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
class D(object):
- __array_function__ = _return_self
+ __array_function__ = _return_not_implemented
a = A()
b = B()
c = C()
d = D()
- assert_equal(_get_overloaded_args([1]), [])
- assert_equal(_get_overloaded_args([a]), [a])
- assert_equal(_get_overloaded_args([a, 1]), [a])
- assert_equal(_get_overloaded_args([a, a, a]), [a])
- assert_equal(_get_overloaded_args([a, d, a]), [a, d])
- assert_equal(_get_overloaded_args([a, b]), [b, a])
- assert_equal(_get_overloaded_args([b, a]), [b, a])
- assert_equal(_get_overloaded_args([a, b, c]), [b, c, a])
- assert_equal(_get_overloaded_args([a, c, b]), [c, b, a])
+ assert_equal(_get_implementing_args([1]), [])
+ assert_equal(_get_implementing_args([a]), [a])
+ assert_equal(_get_implementing_args([a, 1]), [a])
+ assert_equal(_get_implementing_args([a, a, a]), [a])
+ assert_equal(_get_implementing_args([a, d, a]), [a, d])
+ assert_equal(_get_implementing_args([a, b]), [b, a])
+ assert_equal(_get_implementing_args([b, a]), [b, a])
+ assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
+ assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
+
+ def test_too_many_duck_arrays(self):
+ namespace = dict(__array_function__=_return_not_implemented)
+ types = [type('A' + str(i), (object,), namespace) for i in range(33)]
+ relevant_args = [t() for t in types]
+
+ actual = _get_implementing_args(relevant_args[:32])
+ assert_equal(actual, relevant_args[:32])
+
+ with assert_raises_regex(TypeError, 'distinct argument types'):
+ _get_implementing_args(relevant_args)
class TestNDArrayArrayFunction(object):
+ @requires_array_function
def test_method(self):
- class SubOverride(np.ndarray):
- __array_function__ = _return_self
+ class Other(object):
+ __array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
- array = np.array(1)
+ class OverrideSub(np.ndarray):
+ __array_function__ = _return_not_implemented
- def func():
- return 'original'
+ array = np.array([1])
+ other = Other()
+ no_override_sub = array.view(NoOverrideSub)
+ override_sub = array.view(OverrideSub)
- result = array.__array_function__(
- func=func, types=(np.ndarray,), args=(), kwargs={})
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray,),
+ args=(array, 1.), kwargs={})
assert_equal(result, 'original')
- result = array.__array_function__(
- func=func, types=(np.ndarray, SubOverride), args=(), kwargs={})
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, Other),
+ args=(array, other), kwargs={})
assert_(result is NotImplemented)
- result = array.__array_function__(
- func=func, types=(np.ndarray, NoOverrideSub), args=(), kwargs={})
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, NoOverrideSub),
+ args=(array, no_override_sub),
+ kwargs={})
assert_equal(result, 'original')
+ result = array.__array_function__(func=dispatched_two_arg,
+ types=(np.ndarray, OverrideSub),
+ args=(array, override_sub),
+ kwargs={})
+ assert_equal(result, 'original')
-# need to define this at the top level to test pickling
-@array_function_dispatch(lambda array: (array,))
-def dispatched_one_arg(array):
- """Docstring."""
- return 'original'
+ with assert_raises_regex(TypeError, 'no implementation found'):
+ np.concatenate((array, other))
+
+ expected = np.concatenate((array, array))
+ result = np.concatenate((array, no_override_sub))
+ assert_equal(result, expected.view(NoOverrideSub))
+ result = np.concatenate((array, override_sub))
+ assert_equal(result, expected.view(OverrideSub))
+
+ def test_no_wrapper(self):
+ # This shouldn't happen unless a user intentionally calls
+ # __array_function__ with invalid arguments, but check that we raise
+ # an appropriate error all the same.
+ array = np.array(1)
+ func = lambda x: x
+ with assert_raises_regex(AttributeError, '_implementation'):
+ array.__array_function__(func=func, types=(np.ndarray,),
+ args=(array,), kwargs={})
+@requires_array_function
class TestArrayFunctionDispatch(object):
def test_pickle(self):
- roundtripped = pickle.loads(pickle.dumps(dispatched_one_arg))
- assert_(roundtripped is dispatched_one_arg)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ roundtripped = pickle.loads(
+ pickle.dumps(dispatched_one_arg, protocol=proto))
+ assert_(roundtripped is dispatched_one_arg)
def test_name_and_docstring(self):
assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
@@ -187,7 +233,8 @@ class TestArrayFunctionDispatch(object):
assert_(obj is original)
assert_(func is dispatched_one_arg)
assert_equal(set(types), {MyArray})
- assert_equal(args, (original,))
+ # assert_equal uses the overloaded np.iscomplexobj() internally
+ assert_(args == (original,))
assert_equal(kwargs, {})
def test_not_implemented(self):
@@ -201,6 +248,7 @@ class TestArrayFunctionDispatch(object):
dispatched_one_arg(array)
+@requires_array_function
class TestVerifyMatchingSignatures(object):
def test_verify_matching_signatures(self):
@@ -253,6 +301,7 @@ def _new_duck_type_and_implements():
return (MyArray, implements)
+@requires_array_function
class TestArrayFunctionImplementation(object):
def test_one_arg(self):
@@ -293,12 +342,88 @@ class TestArrayFunctionImplementation(object):
def test_not_implemented(self):
MyArray, implements = _new_duck_type_and_implements()
- @array_function_dispatch(lambda array: (array,))
+ @array_function_dispatch(lambda array: (array,), module='my')
def func(array):
return array
array = np.array(1)
assert_(func(array) is array)
+ assert_equal(func.__module__, 'my')
- with assert_raises_regex(TypeError, 'no implementation found'):
+ with assert_raises_regex(
+ TypeError, "no implementation found for 'my.func'"):
func(MyArray())
+
+
+class TestNDArrayMethods(object):
+
+ def test_repr(self):
+ # gh-12162: should still be defined even if __array_function__ doesn't
+ # implement np.array_repr()
+
+ class MyArray(np.ndarray):
+ def __array_function__(*args, **kwargs):
+ return NotImplemented
+
+ array = np.array(1).view(MyArray)
+ assert_equal(repr(array), 'MyArray(1)')
+ assert_equal(str(array), '1')
+
+
+class TestNumPyFunctions(object):
+
+ def test_set_module(self):
+ assert_equal(np.sum.__module__, 'numpy')
+ assert_equal(np.char.equal.__module__, 'numpy.char')
+ assert_equal(np.fft.fft.__module__, 'numpy.fft')
+ assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
+
+ def test_inspect_sum(self):
+ signature = inspect.signature(np.sum)
+ assert_('axis' in signature.parameters)
+
+ @requires_array_function
+ def test_override_sum(self):
+ MyArray, implements = _new_duck_type_and_implements()
+
+ @implements(np.sum)
+ def _(array):
+ return 'yes'
+
+ assert_equal(np.sum(MyArray()), 'yes')
+
+ @requires_array_function
+ def test_sum_on_mock_array(self):
+
+ # We need a proxy for mocks because __array_function__ is only looked
+ # up in the class dict
+ class ArrayProxy:
+ def __init__(self, value):
+ self.value = value
+ def __array_function__(self, *args, **kwargs):
+ return self.value.__array_function__(*args, **kwargs)
+ def __array__(self, *args, **kwargs):
+ return self.value.__array__(*args, **kwargs)
+
+ proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
+ proxy.value.__array_function__.return_value = 1
+ result = np.sum(proxy)
+ assert_equal(result, 1)
+ proxy.value.__array_function__.assert_called_once_with(
+ np.sum, (ArrayProxy,), (proxy,), {})
+ proxy.value.__array__.assert_not_called()
+
+ @requires_array_function
+ def test_sum_forwarding_implementation(self):
+
+ class MyArray(np.ndarray):
+
+ def sum(self, axis, out):
+ return 'summed'
+
+ def __array_function__(self, func, types, args, kwargs):
+ return super().__array_function__(func, types, args, kwargs)
+
+ # note: the internal implementation of np.sum() calls the .sum() method
+ array = np.array(1).view(MyArray)
+ assert_equal(np.sum(array), 'summed')
diff --git a/numpy/core/tests/test_records.py b/numpy/core/tests/test_records.py
index 056d39db8..c1b794145 100644
--- a/numpy/core/tests/test_records.py
+++ b/numpy/core/tests/test_records.py
@@ -7,17 +7,17 @@ try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
-import pickle
-import warnings
import textwrap
from os import path
import pytest
import numpy as np
+from numpy.compat import Path
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_array_almost_equal,
- assert_raises, assert_warns
+ assert_raises, temppath
)
+from numpy.compat import pickle
class TestFromrecords(object):
@@ -325,6 +325,23 @@ class TestFromrecords(object):
assert_equal(rec['f1'], [b'', b'', b''])
+@pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+class TestPathUsage(object):
+ # Test that pathlib.Path can be used
+ def test_tofile_fromfile(self):
+ with temppath(suffix='.bin') as path:
+ path = Path(path)
+ np.random.seed(123)
+ a = np.random.rand(10).astype('f8,i4,a5')
+ a[5] = (0.5,10,'abcde')
+ with path.open("wb") as fd:
+ a.tofile(fd)
+ x = np.core.records.fromfile(path,
+ formats='f8,i4,a5',
+ shape=10)
+ assert_array_equal(x, a)
+
+
class TestRecord(object):
def setup(self):
self.data = np.rec.fromrecords([(1, 2, 3), (4, 5, 6)],
@@ -361,7 +378,6 @@ class TestRecord(object):
with assert_raises(ValueError):
r.setfield([2,3], *r.dtype.fields['f'])
- @pytest.mark.xfail(reason="See gh-10411, becomes real error in 1.16")
def test_out_of_order_fields(self):
# names in the same order, padding added to descr
x = self.data[['col1', 'col2']]
@@ -419,7 +435,56 @@ class TestRecord(object):
def test_missing_field(self):
# https://github.com/numpy/numpy/issues/4806
arr = np.zeros((3,), dtype=[('x', int), ('y', int)])
- assert_raises(ValueError, lambda: arr[['nofield']])
+ assert_raises(KeyError, lambda: arr[['nofield']])
+
+ def test_fromarrays_nested_structured_arrays(self):
+ arrays = [
+ np.arange(10),
+ np.ones(10, dtype=[('a', '<u2'), ('b', '<f4')]),
+ ]
+ arr = np.rec.fromarrays(arrays) # ValueError?
+
+ @pytest.mark.parametrize('nfields', [0, 1, 2])
+ def test_assign_dtype_attribute(self, nfields):
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+ data = np.zeros(3, dt).view(np.recarray)
+
+ # the original and resulting dtypes differ on whether they are records
+ assert data.dtype.type == np.record
+ assert dt.type != np.record
+
+ # ensure that the dtype remains a record even when assigned
+ data.dtype = dt
+ assert data.dtype.type == np.record
+
+ @pytest.mark.parametrize('nfields', [0, 1, 2])
+ def test_nested_fields_are_records(self, nfields):
+ """ Test that nested structured types are treated as records too """
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)][:nfields])
+ dt_outer = np.dtype([('inner', dt)])
+
+ data = np.zeros(3, dt_outer).view(np.recarray)
+ assert isinstance(data, np.recarray)
+ assert isinstance(data['inner'], np.recarray)
+
+ data0 = data[0]
+ assert isinstance(data0, np.record)
+ assert isinstance(data0['inner'], np.record)
+
+ def test_nested_dtype_padding(self):
+ """ test that trailing padding is preserved """
+ # construct a dtype with padding at the end
+ dt = np.dtype([('a', np.uint8), ('b', np.uint8), ('c', np.uint8)])
+ dt_padded_end = dt[['a', 'b']]
+ assert dt_padded_end.itemsize == dt.itemsize
+
+ dt_outer = np.dtype([('inner', dt_padded_end)])
+
+ data = np.zeros(3, dt_outer).view(np.recarray)
+ assert_equal(data['inner'].dtype, dt_padded_end)
+
+ data0 = data[0]
+ assert_equal(data0['inner'].dtype, dt_padded_end)
def test_find_duplicate():
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 947ee5f86..9dc231deb 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -1,11 +1,8 @@
from __future__ import division, absolute_import, print_function
import copy
-import pickle
import sys
-import platform
import gc
-import warnings
import tempfile
import pytest
from os import path
@@ -19,7 +16,7 @@ from numpy.testing import (
assert_raises_regex, assert_warns, suppress_warnings,
_assert_valid_refcount, HAS_REFCOUNT,
)
-from numpy.compat import asbytes, asunicode, long
+from numpy.compat import asbytes, asunicode, long, pickle
try:
RecursionError
@@ -39,15 +36,16 @@ class TestRegression(object):
def test_pickle_transposed(self):
# Ticket #16
a = np.transpose(np.array([[2, 9], [7, 0], [3, 8]]))
- f = BytesIO()
- pickle.dump(a, f)
- f.seek(0)
- b = pickle.load(f)
- f.close()
- assert_array_equal(a, b)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(a, f, protocol=proto)
+ f.seek(0)
+ b = pickle.load(f)
+ f.close()
+ assert_array_equal(a, b)
def test_typeNA(self):
- # Issue gh-515
+ # Issue gh-515
with suppress_warnings() as sup:
sup.filter(np.VisibleDeprecationWarning)
assert_equal(np.typeNA[np.int64], 'Int64')
@@ -95,12 +93,13 @@ class TestRegression(object):
def test_char_dump(self):
# Ticket #50
- f = BytesIO()
ca = np.char.array(np.arange(1000, 1010), itemsize=4)
- ca.dump(f)
- f.seek(0)
- ca = np.load(f)
- f.close()
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(ca, f, protocol=proto)
+ f.seek(0)
+ ca = np.load(f, allow_pickle=True)
+ f.close()
def test_noncontiguous_fill(self):
# Ticket #58.
@@ -359,12 +358,13 @@ class TestRegression(object):
def test_unpickle_dtype_with_object(self):
# Implemented in r2840
dt = np.dtype([('x', int), ('y', np.object_), ('z', 'O')])
- f = BytesIO()
- pickle.dump(dt, f)
- f.seek(0)
- dt_ = pickle.load(f)
- f.close()
- assert_equal(dt, dt_)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ f = BytesIO()
+ pickle.dump(dt, f, protocol=proto)
+ f.seek(0)
+ dt_ = pickle.load(f)
+ f.close()
+ assert_equal(dt, dt_)
def test_mem_array_creation_invalid_specification(self):
# Ticket #196
@@ -436,6 +436,32 @@ class TestRegression(object):
assert_raises(KeyError, np.lexsort, BuggySequence())
+ def test_lexsort_zerolen_custom_strides(self):
+ # Ticket #14228
+ xs = np.array([], dtype='i8')
+ assert xs.strides == (8,)
+ assert np.lexsort((xs,)).shape[0] == 0 # Works
+
+ xs.strides = (16,)
+ assert np.lexsort((xs,)).shape[0] == 0 # Was: MemoryError
+
+ def test_lexsort_zerolen_custom_strides_2d(self):
+ xs = np.array([], dtype='i8')
+
+ xs.shape = (0, 2)
+ xs.strides = (16, 16)
+ assert np.lexsort((xs,), axis=0).shape[0] == 0
+
+ xs.shape = (2, 0)
+ xs.strides = (16, 16)
+ assert np.lexsort((xs,), axis=0).shape[0] == 2
+
+ def test_lexsort_zerolen_element(self):
+ dt = np.dtype([]) # a void dtype with no fields
+ xs = np.empty(4, dt)
+
+ assert np.lexsort((xs,)).shape[0] == xs.shape[0]
+
def test_pickle_py2_bytes_encoding(self):
# Check that arrays and scalars pickled on Py2 are
# unpickleable on Py3 using encoding='bytes'
@@ -468,13 +494,14 @@ class TestRegression(object):
result = pickle.loads(data, encoding='bytes')
assert_equal(result, original)
- if isinstance(result, np.ndarray) and result.dtype.names:
+ if isinstance(result, np.ndarray) and result.dtype.names is not None:
for name in result.dtype.names:
assert_(isinstance(name, str))
def test_pickle_dtype(self):
# Ticket #251
- pickle.dumps(float)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ pickle.dumps(float, protocol=proto)
def test_swap_real(self):
# Ticket #265
@@ -1512,7 +1539,8 @@ class TestRegression(object):
def test_fromstring_crash(self):
# Ticket #1345: the following should not cause a crash
- np.fromstring(b'aa, aa, 1.0', sep=',')
+ with assert_warns(DeprecationWarning):
+ np.fromstring(b'aa, aa, 1.0', sep=',')
def test_ticket_1539(self):
dtypes = [x for x in np.typeDict.values()
@@ -1553,10 +1581,7 @@ class TestRegression(object):
def test_complex_nan_maximum(self):
cnan = complex(0, np.nan)
- with suppress_warnings() as sup:
- sup.record(RuntimeWarning)
- assert_equal(np.maximum(1, cnan), cnan)
- assert_equal(len(sup.log), 1)
+ assert_equal(np.maximum(1, cnan), cnan)
def test_subclass_int_tuple_assignment(self):
# ticket #1563
@@ -2408,11 +2433,81 @@ class TestRegression(object):
t = np.dtype([((s, 'f1'), np.float64)])
data = np.zeros(10, t)
for i in range(10):
- v = str(data[['f1']])
+ str(data[['f1']])
if HAS_REFCOUNT:
assert_(base <= sys.getrefcount(s))
+ @pytest.mark.parametrize('val', [
+ # arrays and scalars
+ np.ones((10, 10), dtype='int32'),
+ np.uint64(10),
+ ])
+ @pytest.mark.parametrize('protocol',
+ range(2, pickle.HIGHEST_PROTOCOL + 1)
+ )
+ def test_pickle_module(self, protocol, val):
+ # gh-12837
+ s = pickle.dumps(val, protocol)
+ assert b'_multiarray_umath' not in s
+ if protocol == 5 and len(val.shape) > 0:
+ # unpickling ndarray goes through _frombuffer for protocol 5
+ assert b'numpy.core.numeric' in s
+ else:
+ assert b'numpy.core.multiarray' in s
+
def test_object_casting_errors(self):
# gh-11993
arr = np.array(['AAAAA', 18465886.0, 18465886.0], dtype=object)
assert_raises(TypeError, arr.astype, 'c8')
+
+ def test_eff1d_casting(self):
+ # gh-12711
+ x = np.array([1, 2, 4, 7, 0], dtype=np.int16)
+ res = np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
+ assert_equal(res, [-99, 1, 2, 3, -7, 88, 99])
+ assert_raises(ValueError, np.ediff1d, x, to_begin=(1<<20))
+ assert_raises(ValueError, np.ediff1d, x, to_end=(1<<20))
+
+ def test_pickle_datetime64_array(self):
+ # gh-12745 (would fail with pickle5 installed)
+ d = np.datetime64('2015-07-04 12:59:59.50', 'ns')
+ arr = np.array([d])
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ dumped = pickle.dumps(arr, protocol=proto)
+ assert_equal(pickle.loads(dumped), arr)
+
+ def test_bad_array_interface(self):
+ class T(object):
+ __array_interface__ = {}
+
+ np.array([T()])
+
+ def test_2d__array__shape(self):
+ class T(object):
+ def __array__(self):
+ return np.ndarray(shape=(0,0))
+
+ # Make sure __array__ is used instead of Sequence methods.
+ def __iter__(self):
+ return iter([])
+
+ def __getitem__(self, idx):
+ raise AssertionError("__getitem__ was called")
+
+ def __len__(self):
+ return 0
+
+
+ t = T()
+ #gh-13659, would raise in broadcasting [x=t for x in result]
+ np.array([t])
+
+ @pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
+ @pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
+ reason='overflows on windows, fixed in bpo-16865')
+ def test_to_ctypes(self):
+ #gh-14214
+ arr = np.zeros((2 ** 31 + 1,), 'b')
+ assert arr.size * arr.itemsize > 2 ** 31
+ c_arr = np.ctypeslib.as_ctypes(arr)
+ assert_equal(c_arr._length_, arr.size)
diff --git a/numpy/core/tests/test_scalar_methods.py b/numpy/core/tests/test_scalar_methods.py
new file mode 100644
index 000000000..93434dd1b
--- /dev/null
+++ b/numpy/core/tests/test_scalar_methods.py
@@ -0,0 +1,109 @@
+"""
+Test the scalar constructors, which also do type-coercion
+"""
+from __future__ import division, absolute_import, print_function
+
+import os
+import fractions
+import platform
+
+import pytest
+import numpy as np
+
+from numpy.testing import (
+ run_module_suite,
+ assert_equal, assert_almost_equal, assert_raises, assert_warns,
+ dec
+)
+
+class TestAsIntegerRatio(object):
+ # derived in part from the cpython test "test_floatasratio"
+
+ @pytest.mark.parametrize("ftype", [
+ np.half, np.single, np.double, np.longdouble])
+ @pytest.mark.parametrize("f, ratio", [
+ (0.875, (7, 8)),
+ (-0.875, (-7, 8)),
+ (0.0, (0, 1)),
+ (11.5, (23, 2)),
+ ])
+ def test_small(self, ftype, f, ratio):
+ assert_equal(ftype(f).as_integer_ratio(), ratio)
+
+ @pytest.mark.parametrize("ftype", [
+ np.half, np.single, np.double, np.longdouble])
+ def test_simple_fractions(self, ftype):
+ R = fractions.Fraction
+ assert_equal(R(0, 1),
+ R(*ftype(0.0).as_integer_ratio()))
+ assert_equal(R(5, 2),
+ R(*ftype(2.5).as_integer_ratio()))
+ assert_equal(R(1, 2),
+ R(*ftype(0.5).as_integer_ratio()))
+ assert_equal(R(-2100, 1),
+ R(*ftype(-2100.0).as_integer_ratio()))
+
+ @pytest.mark.parametrize("ftype", [
+ np.half, np.single, np.double, np.longdouble])
+ def test_errors(self, ftype):
+ assert_raises(OverflowError, ftype('inf').as_integer_ratio)
+ assert_raises(OverflowError, ftype('-inf').as_integer_ratio)
+ assert_raises(ValueError, ftype('nan').as_integer_ratio)
+
+ def test_against_known_values(self):
+ R = fractions.Fraction
+ assert_equal(R(1075, 512),
+ R(*np.half(2.1).as_integer_ratio()))
+ assert_equal(R(-1075, 512),
+ R(*np.half(-2.1).as_integer_ratio()))
+ assert_equal(R(4404019, 2097152),
+ R(*np.single(2.1).as_integer_ratio()))
+ assert_equal(R(-4404019, 2097152),
+ R(*np.single(-2.1).as_integer_ratio()))
+ assert_equal(R(4728779608739021, 2251799813685248),
+ R(*np.double(2.1).as_integer_ratio()))
+ assert_equal(R(-4728779608739021, 2251799813685248),
+ R(*np.double(-2.1).as_integer_ratio()))
+ # longdouble is platform dependent
+
+ @pytest.mark.parametrize("ftype, frac_vals, exp_vals", [
+ # dtype test cases generated using hypothesis
+ # first five generated cases per dtype
+ (np.half, [0.0, 0.01154830649280303, 0.31082276347447274,
+ 0.527350517124794, 0.8308562335072596],
+ [0, 1, 0, -8, 12]),
+ (np.single, [0.0, 0.09248576989263226, 0.8160498218131407,
+ 0.17389442853722373, 0.7956044195067877],
+ [0, 12, 10, 17, -26]),
+ (np.double, [0.0, 0.031066908499895136, 0.5214135908877832,
+ 0.45780736035689296, 0.5906586745934036],
+ [0, -801, 51, 194, -653]),
+ pytest.param(
+ np.longdouble,
+ [0.0, 0.20492557202724854, 0.4277180662199366, 0.9888085019891495,
+ 0.9620175814461964],
+ [0, -7400, 14266, -7822, -8721],
+ marks=[
+ pytest.mark.skipif(
+ np.finfo(np.double) == np.finfo(np.longdouble),
+ reason="long double is same as double"),
+ pytest.mark.skipif(
+ platform.machine().startswith("ppc"),
+ reason="IBM double double"),
+ ]
+ )
+ ])
+ def test_roundtrip(self, ftype, frac_vals, exp_vals):
+ for frac, exp in zip(frac_vals, exp_vals):
+ f = np.ldexp(frac, exp, dtype=ftype)
+ n, d = f.as_integer_ratio()
+
+ try:
+ # workaround for gh-9968
+ nf = np.longdouble(str(n))
+ df = np.longdouble(str(d))
+ except (OverflowError, RuntimeWarning):
+ # the values may not fit in any float type
+ pytest.skip("longdouble too small on this platform")
+
+ assert_equal(nf / df, f, "{}/{}".format(n, d))
diff --git a/numpy/core/tests/test_scalarbuffer.py b/numpy/core/tests/test_scalarbuffer.py
index cd520d99b..3ded7eecd 100644
--- a/numpy/core/tests/test_scalarbuffer.py
+++ b/numpy/core/tests/test_scalarbuffer.py
@@ -65,7 +65,7 @@ class TestScalarPEP3118(object):
assert_(isinstance(x, np.void))
mv_x = memoryview(x)
expected_size = 16 * np.dtype((np.unicode_, 1)).itemsize
- expected_size += 2 * np.dtype((np.float64, 1)).itemsize
+ expected_size += 2 * np.dtype(np.float64).itemsize
assert_equal(mv_x.itemsize, expected_size)
assert_equal(mv_x.ndim, 0)
assert_equal(mv_x.shape, ())
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index 28436f6c7..9e32cf624 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -69,6 +69,7 @@ class TestCharacter(object):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
np_i = np.int(5)
- res_np = np_s * np_i
res_s = b'abc' * 5
- assert_(res_np == res_s)
+ res_u = u'abc' * 5
+ assert_(np_s * np_i == res_s)
+ assert_(np_u * np_i == res_u)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index a55f06b69..854df5590 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -9,7 +9,7 @@ import pytest
import numpy as np
from numpy.testing import (
- assert_, assert_equal, assert_raises, assert_almost_equal, assert_allclose,
+ assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
assert_warns
)
@@ -184,7 +184,7 @@ class TestPower(object):
a = 5
b = 4
c = 10
- expected = pow(a, b, c)
+ expected = pow(a, b, c) # noqa: F841
for t in (np.int32, np.float32, np.complex64):
# note that 3-operand power only dispatches on the first argument
assert_raises(TypeError, operator.pow, t(a), b, c)
@@ -422,7 +422,7 @@ class TestConversion(object):
@pytest.mark.skipif(np.finfo(np.double) == np.finfo(np.longdouble),
reason="long double is same as double")
- @pytest.mark.skipif(platform.machine().startswith("ppc64"),
+ @pytest.mark.skipif(platform.machine().startswith("ppc"),
reason="IBM double double")
def test_int_from_huge_longdouble(self):
# Produce a longdouble that would overflow a double,
@@ -565,10 +565,10 @@ class TestMultiply(object):
# Some of this behaviour may be controversial and could be open for
# change.
accepted_types = set(np.typecodes["AllInteger"])
- deprecated_types = set('?')
+ deprecated_types = {'?'}
forbidden_types = (
set(np.typecodes["All"]) - accepted_types - deprecated_types)
- forbidden_types -= set('V') # can't default-construct void scalars
+ forbidden_types -= {'V'} # can't default-construct void scalars
for seq_type in (list, tuple):
seq = seq_type([1, 2, 3])
@@ -664,3 +664,31 @@ class TestAbs(object):
def test_numpy_abs(self):
self._test_abs_func(np.abs)
+
+
+class TestBitShifts(object):
+
+ @pytest.mark.parametrize('type_code', np.typecodes['AllInteger'])
+ @pytest.mark.parametrize('op',
+ [operator.rshift, operator.lshift], ids=['>>', '<<'])
+ def test_shift_all_bits(self, type_code, op):
+ """ Shifts where the shift amount is the width of the type or wider """
+ # gh-2449
+ dt = np.dtype(type_code)
+ nbits = dt.itemsize * 8
+ for val in [5, -5]:
+ for shift in [nbits, nbits + 4]:
+ val_scl = dt.type(val)
+ shift_scl = dt.type(shift)
+ res_scl = op(val_scl, shift_scl)
+ if val_scl < 0 and op is operator.rshift:
+ # sign bit is preserved
+ assert_equal(res_scl, -1)
+ else:
+ assert_equal(res_scl, 0)
+
+ # Result on scalars should be the same as on arrays
+ val_arr = np.array([val]*32, dtype=dt)
+ shift_arr = np.array([shift]*32, dtype=dt)
+ res_arr = op(val_arr, shift_arr)
+ assert_equal(res_arr, res_scl)
diff --git a/numpy/core/tests/test_scalarprint.py b/numpy/core/tests/test_scalarprint.py
index 472ff691d..86b0ca199 100644
--- a/numpy/core/tests/test_scalarprint.py
+++ b/numpy/core/tests/test_scalarprint.py
@@ -10,7 +10,7 @@ import pytest
from tempfile import TemporaryFile
import numpy as np
-from numpy.testing import assert_, assert_equal, suppress_warnings, dec
+from numpy.testing import assert_, assert_equal, suppress_warnings
class TestRealScalars(object):
def test_str(self):
@@ -51,7 +51,7 @@ class TestRealScalars(object):
def test_py2_float_print(self):
# gh-10753
- # In python2, the python float type implements an obsolte method
+ # In python2, the python float type implements an obsolete method
# tp_print, which overrides tp_repr and tp_str when using "print" to
# output to a "real file" (ie, not a StringIO). Make sure we don't
# inherit it.
diff --git a/numpy/core/tests/test_shape_base.py b/numpy/core/tests/test_shape_base.py
index df819b73f..53d272fc5 100644
--- a/numpy/core/tests/test_shape_base.py
+++ b/numpy/core/tests/test_shape_base.py
@@ -1,14 +1,17 @@
from __future__ import division, absolute_import, print_function
-import warnings
+import pytest
+import sys
import numpy as np
from numpy.core import (
array, arange, atleast_1d, atleast_2d, atleast_3d, block, vstack, hstack,
newaxis, concatenate, stack
)
+from numpy.core.shape_base import (_block_dispatcher, _block_setup,
+ _block_concatenate, _block_slicing)
from numpy.testing import (
assert_, assert_raises, assert_array_equal, assert_equal,
- assert_raises_regex, assert_almost_equal
+ assert_raises_regex, assert_warns
)
from numpy.compat import long
@@ -153,6 +156,14 @@ class TestHstack(object):
desired = array([[1, 1], [2, 2]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ hstack((np.arange(3) for _ in range(2)))
+ if sys.version_info.major > 2:
+ # map returns a list on Python 2
+ with assert_warns(FutureWarning):
+ hstack(map(lambda x: x, np.ones((3, 2))))
+
class TestVstack(object):
def test_non_iterable(self):
@@ -189,6 +200,10 @@ class TestVstack(object):
desired = array([[1, 2], [1, 2]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ vstack((np.arange(3) for _ in range(2)))
+
class TestConcatenate(object):
def test_returns_copy(self):
@@ -209,13 +224,27 @@ class TestConcatenate(object):
assert_raises(ValueError, concatenate, (0,))
assert_raises(ValueError, concatenate, (np.array(0),))
+ # dimensionality must match
+ assert_raises_regex(
+ ValueError,
+ r"all the input arrays must have same number of dimensions, but "
+ r"the array at index 0 has 1 dimension\(s\) and the array at "
+ r"index 1 has 2 dimension\(s\)",
+ np.concatenate, (np.zeros(1), np.zeros((1, 1))))
+
# test shapes must match except for concatenation axis
a = np.ones((1, 2, 3))
b = np.ones((2, 2, 3))
axis = list(range(3))
for i in range(3):
np.concatenate((a, b), axis=axis[0]) # OK
- assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
+ assert_raises_regex(
+ ValueError,
+ "all the input array dimensions for the concatenation axis "
+ "must match exactly, but along dimension {}, the array at "
+ "index 0 has size 1 and the array at index 1 has size 2"
+ .format(i),
+ np.concatenate, (a, b), axis=axis[1])
assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
a = np.moveaxis(a, -1, 0)
b = np.moveaxis(b, -1, 0)
@@ -352,12 +381,16 @@ def test_stack():
arrays = [np.random.randn(3, 4) for _ in range(10)]
axes = [0, 1, 2, -1, -2, -3]
expected_shapes = [(10, 3, 4), (3, 10, 4), (3, 4, 10),
- (3, 4, 10), (3, 10, 4), (10, 3, 4)]
+ (3, 4, 10), (3, 10, 4), (10, 3, 4)]
for axis, expected_shape in zip(axes, expected_shapes):
assert_equal(np.stack(arrays, axis).shape, expected_shape)
# empty arrays
assert_(stack([[], [], []]).shape == (3, 0))
assert_(stack([[], [], []], axis=1).shape == (0, 3))
+ # out
+ out = np.zeros_like(r1)
+ np.stack((a, b), out=out)
+ assert_array_equal(out, r1)
# edge cases
assert_raises_regex(ValueError, 'need at least one array', stack, [])
assert_raises_regex(ValueError, 'must have the same shape',
@@ -370,16 +403,62 @@ def test_stack():
stack, [np.zeros((3, 3)), np.zeros(3)], axis=1)
assert_raises_regex(ValueError, 'must have the same shape',
stack, [np.arange(2), np.arange(3)])
+ # generator is deprecated
+ with assert_warns(FutureWarning):
+ result = stack((x for x in range(3)))
+ assert_array_equal(result, np.array([0, 1, 2]))
class TestBlock(object):
- def test_returns_copy(self):
+ @pytest.fixture(params=['block', 'force_concatenate', 'force_slicing'])
+ def block(self, request):
+ # blocking small arrays and large arrays go through different paths.
+ # the algorithm is triggered depending on the number of element
+ # copies required.
+ # We define a test fixture that forces most tests to go through
+ # both code paths.
+ # Ultimately, this should be removed if a single algorithm is found
+ # to be faster for both small and large arrays.
+ def _block_force_concatenate(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_concatenate(arrays, list_ndim, result_ndim)
+
+ def _block_force_slicing(arrays):
+ arrays, list_ndim, result_ndim, _ = _block_setup(arrays)
+ return _block_slicing(arrays, list_ndim, result_ndim)
+
+ if request.param == 'force_concatenate':
+ return _block_force_concatenate
+ elif request.param == 'force_slicing':
+ return _block_force_slicing
+ elif request.param == 'block':
+ return block
+ else:
+ raise ValueError('Unknown blocking request. There is a typo in the tests.')
+
+ def test_returns_copy(self, block):
a = np.eye(3)
- b = np.block(a)
+ b = block(a)
b[0, 0] = 2
assert b[0, 0] != a[0, 0]
- def test_block_simple_row_wise(self):
+ def test_block_total_size_estimate(self, block):
+ _, _, _, total_size = _block_setup([1])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1]])
+ assert total_size == 1
+
+ _, _, _, total_size = _block_setup([[1, 1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1], [1]])
+ assert total_size == 2
+
+ _, _, _, total_size = _block_setup([[1, 2], [3, 4]])
+ assert total_size == 4
+
+ def test_block_simple_row_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
desired = np.array([[1, 1, 2, 2],
@@ -387,7 +466,7 @@ class TestBlock(object):
result = block([a_2d, b_2d])
assert_equal(desired, result)
- def test_block_simple_column_wise(self):
+ def test_block_simple_column_wise(self, block):
a_2d = np.ones((2, 2))
b_2d = 2 * a_2d
expected = np.array([[1, 1],
@@ -397,7 +476,7 @@ class TestBlock(object):
result = block([[a_2d], [b_2d]])
assert_equal(expected, result)
- def test_block_with_1d_arrays_row_wise(self):
+ def test_block_with_1d_arrays_row_wise(self, block):
# # # 1-D vectors are treated as row arrays
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
@@ -405,7 +484,7 @@ class TestBlock(object):
result = block([a, b])
assert_equal(expected, result)
- def test_block_with_1d_arrays_multiple_rows(self):
+ def test_block_with_1d_arrays_multiple_rows(self, block):
a = np.array([1, 2, 3])
b = np.array([2, 3, 4])
expected = np.array([[1, 2, 3, 2, 3, 4],
@@ -413,7 +492,7 @@ class TestBlock(object):
result = block([[a, b], [a, b]])
assert_equal(expected, result)
- def test_block_with_1d_arrays_column_wise(self):
+ def test_block_with_1d_arrays_column_wise(self, block):
# # # 1-D vectors are treated as row arrays
a_1d = np.array([1, 2, 3])
b_1d = np.array([2, 3, 4])
@@ -422,7 +501,7 @@ class TestBlock(object):
result = block([[a_1d], [b_1d]])
assert_equal(expected, result)
- def test_block_mixed_1d_and_2d(self):
+ def test_block_mixed_1d_and_2d(self, block):
a_2d = np.ones((2, 2))
b_1d = np.array([2, 2])
result = block([[a_2d], [b_1d]])
@@ -431,7 +510,7 @@ class TestBlock(object):
[2, 2]])
assert_equal(expected, result)
- def test_block_complicated(self):
+ def test_block_complicated(self, block):
# a bit more complicated
one_2d = np.array([[1, 1, 1]])
two_2d = np.array([[2, 2, 2]])
@@ -455,7 +534,7 @@ class TestBlock(object):
[zero_2d]])
assert_equal(result, expected)
- def test_nested(self):
+ def test_nested(self, block):
one = np.array([1, 1, 1])
two = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
three = np.array([3, 3, 3])
@@ -464,9 +543,9 @@ class TestBlock(object):
six = np.array([6, 6, 6, 6, 6])
zero = np.zeros((2, 6))
- result = np.block([
+ result = block([
[
- np.block([
+ block([
[one],
[three],
[four]
@@ -485,7 +564,7 @@ class TestBlock(object):
assert_equal(result, expected)
- def test_3d(self):
+ def test_3d(self, block):
a000 = np.ones((2, 2, 2), int) * 1
a100 = np.ones((3, 2, 2), int) * 2
@@ -498,7 +577,7 @@ class TestBlock(object):
a111 = np.ones((3, 3, 3), int) * 8
- result = np.block([
+ result = block([
[
[a000, a001],
[a010, a011],
@@ -540,55 +619,102 @@ class TestBlock(object):
assert_array_equal(result, expected)
- def test_block_with_mismatched_shape(self):
+ def test_block_with_mismatched_shape(self, block):
a = np.array([0, 0])
b = np.eye(2)
- assert_raises(ValueError, np.block, [a, b])
- assert_raises(ValueError, np.block, [b, a])
+ assert_raises(ValueError, block, [a, b])
+ assert_raises(ValueError, block, [b, a])
- def test_no_lists(self):
- assert_equal(np.block(1), np.array(1))
- assert_equal(np.block(np.eye(3)), np.eye(3))
+ to_block = [[np.ones((2,3)), np.ones((2,2))],
+ [np.ones((2,2)), np.ones((2,2))]]
+ assert_raises(ValueError, block, to_block)
+ def test_no_lists(self, block):
+ assert_equal(block(1), np.array(1))
+ assert_equal(block(np.eye(3)), np.eye(3))
- def test_invalid_nesting(self):
+ def test_invalid_nesting(self, block):
msg = 'depths are mismatched'
- assert_raises_regex(ValueError, msg, np.block, [1, [2]])
- assert_raises_regex(ValueError, msg, np.block, [1, []])
- assert_raises_regex(ValueError, msg, np.block, [[1], 2])
- assert_raises_regex(ValueError, msg, np.block, [[], 2])
- assert_raises_regex(ValueError, msg, np.block, [
+ assert_raises_regex(ValueError, msg, block, [1, [2]])
+ assert_raises_regex(ValueError, msg, block, [1, []])
+ assert_raises_regex(ValueError, msg, block, [[1], 2])
+ assert_raises_regex(ValueError, msg, block, [[], 2])
+ assert_raises_regex(ValueError, msg, block, [
[[1], [2]],
[[3, 4]],
[5] # missing brackets
])
- def test_empty_lists(self):
- assert_raises_regex(ValueError, 'empty', np.block, [])
- assert_raises_regex(ValueError, 'empty', np.block, [[]])
- assert_raises_regex(ValueError, 'empty', np.block, [[1], []])
+ def test_empty_lists(self, block):
+ assert_raises_regex(ValueError, 'empty', block, [])
+ assert_raises_regex(ValueError, 'empty', block, [[]])
+ assert_raises_regex(ValueError, 'empty', block, [[1], []])
- def test_tuple(self):
- assert_raises_regex(TypeError, 'tuple', np.block, ([1, 2], [3, 4]))
- assert_raises_regex(TypeError, 'tuple', np.block, [(1, 2), (3, 4)])
+ def test_tuple(self, block):
+ assert_raises_regex(TypeError, 'tuple', block, ([1, 2], [3, 4]))
+ assert_raises_regex(TypeError, 'tuple', block, [(1, 2), (3, 4)])
- def test_different_ndims(self):
+ def test_different_ndims(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 1, 3))
- result = np.block([a, b, c])
+ result = block([a, b, c])
expected = np.array([[[1., 2., 2., 3., 3., 3.]]])
assert_equal(result, expected)
- def test_different_ndims_depths(self):
+ def test_different_ndims_depths(self, block):
a = 1.
b = 2 * np.ones((1, 2))
c = 3 * np.ones((1, 2, 3))
- result = np.block([[a, b], [c]])
+ result = block([[a, b], [c]])
expected = np.array([[[1., 2., 2.],
[3., 3., 3.],
[3., 3., 3.]]])
assert_equal(result, expected)
+
+ def test_block_memory_order(self, block):
+ # 3D
+ arr_c = np.zeros((3,)*3, order='C')
+ arr_f = np.zeros((3,)*3, order='F')
+
+ b_c = [[[arr_c, arr_c],
+ [arr_c, arr_c]],
+ [[arr_c, arr_c],
+ [arr_c, arr_c]]]
+
+ b_f = [[[arr_f, arr_f],
+ [arr_f, arr_f]],
+ [[arr_f, arr_f],
+ [arr_f, arr_f]]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+ arr_c = np.zeros((3, 3), order='C')
+ arr_f = np.zeros((3, 3), order='F')
+ # 2D
+ b_c = [[arr_c, arr_c],
+ [arr_c, arr_c]]
+
+ b_f = [[arr_f, arr_f],
+ [arr_f, arr_f]]
+
+ assert block(b_c).flags['C_CONTIGUOUS']
+ assert block(b_f).flags['F_CONTIGUOUS']
+
+
+def test_block_dispatcher():
+ class ArrayLike(object):
+ pass
+ a = ArrayLike()
+ b = ArrayLike()
+ c = ArrayLike()
+ assert_equal(list(_block_dispatcher(a)), [a])
+ assert_equal(list(_block_dispatcher([a])), [a])
+ assert_equal(list(_block_dispatcher([a, b])), [a, b])
+ assert_equal(list(_block_dispatcher([[a], [b, [c]]])), [a, b, c])
+ # don't recurse into non-lists
+ assert_equal(list(_block_dispatcher((a, b))), [(a, b)])
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index 85d9f41bd..707c690dd 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -3,6 +3,8 @@ from __future__ import division, absolute_import, print_function
import warnings
import itertools
+import pytest
+
import numpy as np
import numpy.core._umath_tests as umt
import numpy.linalg._umath_linalg as uml
@@ -13,6 +15,7 @@ from numpy.testing import (
assert_almost_equal, assert_array_almost_equal, assert_no_warnings,
assert_allclose,
)
+from numpy.compat import pickle
class TestUfuncKwargs(object):
@@ -41,18 +44,116 @@ class TestUfuncKwargs(object):
assert_raises(TypeError, np.add, 1, 2, extobj=[4096], parrot=True)
+class TestUfuncGenericLoops(object):
+ """Test generic loops.
+
+ The loops to be tested are:
+
+ PyUFunc_ff_f_As_dd_d
+ PyUFunc_ff_f
+ PyUFunc_dd_d
+ PyUFunc_gg_g
+ PyUFunc_FF_F_As_DD_D
+ PyUFunc_DD_D
+ PyUFunc_FF_F
+ PyUFunc_GG_G
+ PyUFunc_OO_O
+ PyUFunc_OO_O_method
+ PyUFunc_f_f_As_d_d
+ PyUFunc_d_d
+ PyUFunc_f_f
+ PyUFunc_g_g
+ PyUFunc_F_F_As_D_D
+ PyUFunc_F_F
+ PyUFunc_D_D
+ PyUFunc_G_G
+ PyUFunc_O_O
+ PyUFunc_O_O_method
+ PyUFunc_On_Om
+
+ Where:
+
+ f -- float
+ d -- double
+ g -- long double
+ F -- complex float
+ D -- complex double
+ G -- complex long double
+ O -- python object
+
+ It is difficult to assure that each of these loops is entered from the
+ Python level as the special cased loops are a moving target and the
+ corresponding types are architecture dependent. We probably need to
+ define C level testing ufuncs to get at them. For the time being, I've
+ just looked at the signatures registered in the build directory to find
+ relevant functions.
+
+ """
+ np_dtypes = [
+ (np.single, np.single), (np.single, np.double),
+ (np.csingle, np.csingle), (np.csingle, np.cdouble),
+ (np.double, np.double), (np.longdouble, np.longdouble),
+ (np.cdouble, np.cdouble), (np.clongdouble, np.clongdouble)]
+
+ @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
+ def test_unary_PyUFunc(self, input_dtype, output_dtype, f=np.exp, x=0, y=1):
+ xs = np.full(10, input_dtype(x), dtype=output_dtype)
+ ys = f(xs)[::2]
+ assert_allclose(ys, y)
+ assert_equal(ys.dtype, output_dtype)
+
+ def f2(x, y):
+ return x**y
+
+ @pytest.mark.parametrize('input_dtype,output_dtype', np_dtypes)
+ def test_binary_PyUFunc(self, input_dtype, output_dtype, f=f2, x=0, y=1):
+ xs = np.full(10, input_dtype(x), dtype=output_dtype)
+ ys = f(xs, xs)[::2]
+ assert_allclose(ys, y)
+ assert_equal(ys.dtype, output_dtype)
+
+ # class to use in testing object method loops
+ class foo(object):
+ def conjugate(self):
+ return np.bool_(1)
+
+ def logical_xor(self, obj):
+ return np.bool_(1)
+
+ def test_unary_PyUFunc_O_O(self):
+ x = np.ones(10, dtype=object)
+ assert_(np.all(np.abs(x) == 1))
+
+ def test_unary_PyUFunc_O_O_method(self, foo=foo):
+ x = np.full(10, foo(), dtype=object)
+ assert_(np.all(np.conjugate(x) == True))
+
+ def test_binary_PyUFunc_OO_O(self):
+ x = np.ones(10, dtype=object)
+ assert_(np.all(np.add(x, x) == 2))
+
+ def test_binary_PyUFunc_OO_O_method(self, foo=foo):
+ x = np.full(10, foo(), dtype=object)
+ assert_(np.all(np.logical_xor(x, x)))
+
+ def test_binary_PyUFunc_On_Om_method(self, foo=foo):
+ x = np.full((10, 2, 3), foo(), dtype=object)
+ assert_(np.all(np.logical_xor(x, x)))
+
+
class TestUfunc(object):
def test_pickle(self):
- import pickle
- assert_(pickle.loads(pickle.dumps(np.sin)) is np.sin)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_(pickle.loads(pickle.dumps(np.sin,
+ protocol=proto)) is np.sin)
- # Check that ufunc not defined in the top level numpy namespace such as
- # numpy.core._rational_tests.test_add can also be pickled
- res = pickle.loads(pickle.dumps(_rational_tests.test_add))
- assert_(res is _rational_tests.test_add)
+ # Check that ufunc not defined in the top level numpy namespace
+ # such as numpy.core._rational_tests.test_add can also be pickled
+ res = pickle.loads(pickle.dumps(_rational_tests.test_add,
+ protocol=proto))
+ assert_(res is _rational_tests.test_add)
def test_pickle_withstring(self):
- import pickle
astring = (b"cnumpy.core\n_ufunc_reconstruct\np0\n"
b"(S'numpy.core.umath'\np1\nS'cos'\np2\ntp3\nRp4\n.")
assert_(pickle.loads(astring) is np.cos)
@@ -63,146 +164,6 @@ class TestUfunc(object):
idx = np.array(list(zip(np.arange(L - 2), np.arange(L - 2) + 2))).ravel()
assert_array_equal(np.add.reduceat(x, idx)[::2], [1, 3, 5, 7])
- def test_generic_loops(self):
- """Test generic loops.
-
- The loops to be tested are:
-
- PyUFunc_ff_f_As_dd_d
- PyUFunc_ff_f
- PyUFunc_dd_d
- PyUFunc_gg_g
- PyUFunc_FF_F_As_DD_D
- PyUFunc_DD_D
- PyUFunc_FF_F
- PyUFunc_GG_G
- PyUFunc_OO_O
- PyUFunc_OO_O_method
- PyUFunc_f_f_As_d_d
- PyUFunc_d_d
- PyUFunc_f_f
- PyUFunc_g_g
- PyUFunc_F_F_As_D_D
- PyUFunc_F_F
- PyUFunc_D_D
- PyUFunc_G_G
- PyUFunc_O_O
- PyUFunc_O_O_method
- PyUFunc_On_Om
-
- Where:
-
- f -- float
- d -- double
- g -- long double
- F -- complex float
- D -- complex double
- G -- complex long double
- O -- python object
-
- It is difficult to assure that each of these loops is entered from the
- Python level as the special cased loops are a moving target and the
- corresponding types are architecture dependent. We probably need to
- define C level testing ufuncs to get at them. For the time being, I've
- just looked at the signatures registered in the build directory to find
- relevant functions.
-
- Fixme, currently untested:
-
- PyUFunc_ff_f_As_dd_d
- PyUFunc_FF_F_As_DD_D
- PyUFunc_f_f_As_d_d
- PyUFunc_F_F_As_D_D
- PyUFunc_On_Om
-
- """
- fone = np.exp
- ftwo = lambda x, y: x**y
- fone_val = 1
- ftwo_val = 1
- # check unary PyUFunc_f_f.
- msg = "PyUFunc_f_f"
- x = np.zeros(10, dtype=np.single)[0::2]
- assert_almost_equal(fone(x), fone_val, err_msg=msg)
- # check unary PyUFunc_d_d.
- msg = "PyUFunc_d_d"
- x = np.zeros(10, dtype=np.double)[0::2]
- assert_almost_equal(fone(x), fone_val, err_msg=msg)
- # check unary PyUFunc_g_g.
- msg = "PyUFunc_g_g"
- x = np.zeros(10, dtype=np.longdouble)[0::2]
- assert_almost_equal(fone(x), fone_val, err_msg=msg)
- # check unary PyUFunc_F_F.
- msg = "PyUFunc_F_F"
- x = np.zeros(10, dtype=np.csingle)[0::2]
- assert_almost_equal(fone(x), fone_val, err_msg=msg)
- # check unary PyUFunc_D_D.
- msg = "PyUFunc_D_D"
- x = np.zeros(10, dtype=np.cdouble)[0::2]
- assert_almost_equal(fone(x), fone_val, err_msg=msg)
- # check unary PyUFunc_G_G.
- msg = "PyUFunc_G_G"
- x = np.zeros(10, dtype=np.clongdouble)[0::2]
- assert_almost_equal(fone(x), fone_val, err_msg=msg)
-
- # check binary PyUFunc_ff_f.
- msg = "PyUFunc_ff_f"
- x = np.ones(10, dtype=np.single)[0::2]
- assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
- # check binary PyUFunc_dd_d.
- msg = "PyUFunc_dd_d"
- x = np.ones(10, dtype=np.double)[0::2]
- assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
- # check binary PyUFunc_gg_g.
- msg = "PyUFunc_gg_g"
- x = np.ones(10, dtype=np.longdouble)[0::2]
- assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
- # check binary PyUFunc_FF_F.
- msg = "PyUFunc_FF_F"
- x = np.ones(10, dtype=np.csingle)[0::2]
- assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
- # check binary PyUFunc_DD_D.
- msg = "PyUFunc_DD_D"
- x = np.ones(10, dtype=np.cdouble)[0::2]
- assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
- # check binary PyUFunc_GG_G.
- msg = "PyUFunc_GG_G"
- x = np.ones(10, dtype=np.clongdouble)[0::2]
- assert_almost_equal(ftwo(x, x), ftwo_val, err_msg=msg)
-
- # class to use in testing object method loops
- class foo(object):
- def conjugate(self):
- return np.bool_(1)
-
- def logical_xor(self, obj):
- return np.bool_(1)
-
- # check unary PyUFunc_O_O
- msg = "PyUFunc_O_O"
- x = np.ones(10, dtype=object)[0::2]
- assert_(np.all(np.abs(x) == 1), msg)
- # check unary PyUFunc_O_O_method
- msg = "PyUFunc_O_O_method"
- x = np.zeros(10, dtype=object)[0::2]
- for i in range(len(x)):
- x[i] = foo()
- assert_(np.all(np.conjugate(x) == True), msg)
-
- # check binary PyUFunc_OO_O
- msg = "PyUFunc_OO_O"
- x = np.ones(10, dtype=object)[0::2]
- assert_(np.all(np.add(x, x) == 2), msg)
- # check binary PyUFunc_OO_O_method
- msg = "PyUFunc_OO_O_method"
- x = np.zeros(10, dtype=object)[0::2]
- for i in range(len(x)):
- x[i] = foo()
- assert_(np.all(np.logical_xor(x, x)), msg)
-
- # check PyUFunc_On_Om
- # fixme -- I don't know how to do this yet
-
def test_all_ufunc(self):
"""Try to check presence and results of all ufuncs.
@@ -286,67 +247,112 @@ class TestUfunc(object):
"""
pass
+ # from include/numpy/ufuncobject.h
+ size_inferred = 2
+ can_ignore = 4
def test_signature0(self):
# the arguments to test_signature are: nin, nout, core_signature
- # pass
- enabled, num_dims, ixs = umt.test_signature(2, 1, "(i),(i)->()")
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i),(i)->()")
assert_equal(enabled, 1)
assert_equal(num_dims, (1, 1, 0))
assert_equal(ixs, (0, 0))
+ assert_equal(flags, (self.size_inferred,))
+ assert_equal(sizes, (-1,))
def test_signature1(self):
# empty core signature; treat as plain ufunc (with trivial core)
- enabled, num_dims, ixs = umt.test_signature(2, 1, "(),()->()")
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(),()->()")
assert_equal(enabled, 0)
assert_equal(num_dims, (0, 0, 0))
assert_equal(ixs, ())
+ assert_equal(flags, ())
+ assert_equal(sizes, ())
def test_signature2(self):
# more complicated names for variables
- enabled, num_dims, ixs = umt.test_signature(2, 1, "(i1,i2),(J_1)->(_kAB)")
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(i1,i2),(J_1)->(_kAB)")
assert_equal(enabled, 1)
assert_equal(num_dims, (2, 1, 1))
assert_equal(ixs, (0, 1, 2, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
- def test_signature_failure0(self):
- # in the following calls, a ValueError should be raised because
- # of error in core signature
- # FIXME These should be using assert_raises
+ def test_signature3(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, u"(i1, i12), (J_1)->(i12, i2)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 1, 2))
+ assert_equal(ixs, (0, 1, 2, 1, 3))
+ assert_equal(flags, (self.size_inferred,)*4)
+ assert_equal(sizes, (-1, -1, -1, -1))
+
+ def test_signature4(self):
+ # matrix_multiply signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n,k),(k,m)->(n,m)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred,)*3)
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature5(self):
+ # matmul signature from _umath_tests
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 2, 1, "(n?,k),(k,m?)->(n?,m?)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (2, 2, 2))
+ assert_equal(ixs, (0, 1, 1, 2, 0, 2))
+ assert_equal(flags, (self.size_inferred | self.can_ignore,
+ self.size_inferred,
+ self.size_inferred | self.can_ignore))
+ assert_equal(sizes, (-1, -1, -1))
+
+ def test_signature6(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 1, 1, "(3)->()")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 0))
+ assert_equal(ixs, (0,))
+ assert_equal(flags, (0,))
+ assert_equal(sizes, (3,))
+
+ def test_signature7(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3),(03,3),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (0, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
+
+ def test_signature8(self):
+ enabled, num_dims, ixs, flags, sizes = umt.test_signature(
+ 3, 1, "(3?),(3?,3?),(n)->(9)")
+ assert_equal(enabled, 1)
+ assert_equal(num_dims, (1, 2, 1, 1))
+ assert_equal(ixs, (0, 0, 0, 1, 2))
+ assert_equal(flags, (self.can_ignore, self.size_inferred, 0))
+ assert_equal(sizes, (3, -1, 9))
- # error: extra parenthesis
- msg = "core_sig: extra parenthesis"
- try:
- ret = umt.test_signature(2, 1, "((i)),(i)->()")
- assert_equal(ret, None, err_msg=msg)
- except ValueError:
- pass
+ def test_signature_failure_extra_parenthesis(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 1, "((i)),(i)->()")
- def test_signature_failure1(self):
- # error: parenthesis matching
- msg = "core_sig: parenthesis matching"
- try:
- ret = umt.test_signature(2, 1, "(i),)i(->()")
- assert_equal(ret, None, err_msg=msg)
- except ValueError:
- pass
+ def test_signature_failure_mismatching_parenthesis(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 1, "(i),)i(->()")
- def test_signature_failure2(self):
- # error: incomplete signature. letters outside of parenthesis are ignored
- msg = "core_sig: incomplete signature"
- try:
- ret = umt.test_signature(2, 1, "(i),->()")
- assert_equal(ret, None, err_msg=msg)
- except ValueError:
- pass
+ def test_signature_failure_signature_missing_input_arg(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 1, "(i),->()")
- def test_signature_failure3(self):
- # error: incomplete signature. 2 output arguments are specified
- msg = "core_sig: incomplete signature"
- try:
- ret = umt.test_signature(2, 2, "(i),(i)->()")
- assert_equal(ret, None, err_msg=msg)
- except ValueError:
- pass
+ def test_signature_failure_signature_missing_output_arg(self):
+ with assert_raises(ValueError):
+ umt.test_signature(2, 2, "(i),(i)->()")
def test_get_signature(self):
assert_equal(umt.inner1d.signature, "(i),(i)->()")
@@ -525,6 +531,12 @@ class TestUfunc(object):
assert_equal(np.sum(np.ones((2, 3, 5), dtype=np.int64), axis=(0, 2), initial=2),
[12, 12, 12])
+ def test_sum_where(self):
+ # More extensive tests done in test_reduction_with_where.
+ assert_equal(np.sum([[1., 2.], [3., 4.]], where=[True, False]), 4.)
+ assert_equal(np.sum([[1., 2.], [3., 4.]], axis=0, initial=5.,
+ where=[True, False]), [9., 5.])
+
def test_inner1d(self):
a = np.arange(6).reshape((2, 3))
assert_array_equal(umt.inner1d(a, a), np.sum(a*a, axis=-1))
@@ -552,6 +564,18 @@ class TestUfunc(object):
b = np.arange(3).reshape((3, 1, 1))
assert_raises(ValueError, umt.inner1d, a, b)
+ # Writing to a broadcasted array with overlap should warn, gh-2705
+ a = np.arange(2)
+ b = np.arange(4).reshape((2, 2))
+ u, v = np.broadcast_arrays(a, b)
+ assert_equal(u.strides[0], 0)
+ x = u + v
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ u += v
+ assert_equal(len(w), 1)
+ assert_(x[0,0] != u[0, 0])
+
def test_type_cast(self):
msg = "type cast"
a = np.arange(6, dtype='short').reshape((2, 3))
@@ -872,6 +896,89 @@ class TestUfunc(object):
w = np.array([], dtype='f8')
assert_array_equal(umt.innerwt(a, b, w), np.sum(a*b*w, axis=-1))
+ def test_cross1d(self):
+ """Test with fixed-sized signature."""
+ a = np.eye(3)
+ assert_array_equal(umt.cross1d(a, a), np.zeros((3, 3)))
+ out = np.zeros((3, 3))
+ result = umt.cross1d(a[0], a, out)
+ assert_(result is out)
+ assert_array_equal(result, np.vstack((np.zeros(3), a[2], -a[1])))
+ assert_raises(ValueError, umt.cross1d, np.eye(4), np.eye(4))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(4.))
+ assert_raises(ValueError, umt.cross1d, a, np.arange(3.), np.zeros((3, 4)))
+
+ def test_can_ignore_signature(self):
+ # Comparing the effects of ? in signature:
+ # matrix_multiply: (m,n),(n,p)->(m,p) # all must be there.
+ # matmul: (m?,n),(n,p?)->(m?,p?) # allow missing m, p.
+ mat = np.arange(12).reshape((2, 3, 2))
+ single_vec = np.arange(2)
+ col_vec = single_vec[:, np.newaxis]
+ col_vec_array = np.arange(8).reshape((2, 2, 2, 1)) + 1
+ # matrix @ single column vector with proper dimension
+ mm_col_vec = umt.matrix_multiply(mat, col_vec)
+ # matmul does the same thing
+ matmul_col_vec = umt.matmul(mat, col_vec)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # matrix @ vector without dimension making it a column vector.
+ # matrix multiply fails -> missing core dim.
+ assert_raises(ValueError, umt.matrix_multiply, mat, single_vec)
+ # matmul mimicker passes, and returns a vector.
+ matmul_col = umt.matmul(mat, single_vec)
+ assert_array_equal(matmul_col, mm_col_vec.squeeze())
+ # Now with a column array: same as for column vector,
+ # broadcasting sensibly.
+ mm_col_vec = umt.matrix_multiply(mat, col_vec_array)
+ matmul_col_vec = umt.matmul(mat, col_vec_array)
+ assert_array_equal(matmul_col_vec, mm_col_vec)
+ # As above, but for row vector
+ single_vec = np.arange(3)
+ row_vec = single_vec[np.newaxis, :]
+ row_vec_array = np.arange(24).reshape((4, 2, 1, 1, 3)) + 1
+ # row vector @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec, mat)
+ matmul_row_vec = umt.matmul(row_vec, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # single row vector @ matrix
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, mat)
+ matmul_row = umt.matmul(single_vec, mat)
+ assert_array_equal(matmul_row, mm_row_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_vec = umt.matrix_multiply(row_vec_array, mat)
+ matmul_row_vec = umt.matmul(row_vec_array, mat)
+ assert_array_equal(matmul_row_vec, mm_row_vec)
+ # Now for vector combinations
+ # row vector @ column vector
+ col_vec = row_vec.T
+ col_vec_array = row_vec_array.swapaxes(-2, -1)
+ mm_row_col_vec = umt.matrix_multiply(row_vec, col_vec)
+ matmul_row_col_vec = umt.matmul(row_vec, col_vec)
+ assert_array_equal(matmul_row_col_vec, mm_row_col_vec)
+ # single row vector @ single col vector
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec)
+ matmul_row_col = umt.matmul(single_vec, single_vec)
+ assert_array_equal(matmul_row_col, mm_row_col_vec.squeeze())
+ # row vector array @ matrix
+ mm_row_col_array = umt.matrix_multiply(row_vec_array, col_vec_array)
+ matmul_row_col_array = umt.matmul(row_vec_array, col_vec_array)
+ assert_array_equal(matmul_row_col_array, mm_row_col_array)
+ # Finally, check that things are *not* squeezed if one gives an
+ # output.
+ out = np.zeros_like(mm_row_col_array)
+ out = umt.matrix_multiply(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ out[:] = 0
+ out = umt.matmul(row_vec_array, col_vec_array, out=out)
+ assert_array_equal(out, mm_row_col_array)
+ # And check one cannot put missing dimensions back.
+ out = np.zeros_like(mm_row_col_vec)
+ assert_raises(ValueError, umt.matrix_multiply, single_vec, single_vec,
+ out)
+ # But fine for matmul, since it is just a broadcast.
+ out = umt.matmul(single_vec, single_vec, out)
+ assert_array_equal(out, mm_row_col_vec.squeeze())
+
def test_matrix_multiply(self):
self.compare_matrix_multiply_results(np.long)
self.compare_matrix_multiply_results(np.double)
@@ -1008,6 +1115,8 @@ class TestUfunc(object):
assert_equal(np.array([[1]], dtype=object).sum(), 1)
assert_equal(np.array([[[1, 2]]], dtype=object).sum((0, 1)), [1, 2])
assert_equal(np.array([1], dtype=object).sum(initial=1), 2)
+ assert_equal(np.array([[1], [2, 3]], dtype=object)
+ .sum(initial=[0], where=[False, True]), [0, 2, 3])
def test_object_array_accumulate_inplace(self):
# Checks that in-place accumulates work, see also gh-7402
@@ -1242,6 +1351,44 @@ class TestUfunc(object):
res = np.add.reduce(a, initial=5)
assert_equal(res, 15)
+ @pytest.mark.parametrize('axis', (0, 1, None))
+ @pytest.mark.parametrize('where', (np.array([False, True, True]),
+ np.array([[True], [False], [True]]),
+ np.array([[True, False, False],
+ [False, True, False],
+ [False, True, True]])))
+ def test_reduction_with_where(self, axis, where):
+ a = np.arange(9.).reshape(3, 3)
+ a_copy = a.copy()
+ a_check = np.zeros_like(a)
+ np.positive(a, out=a_check, where=where)
+
+ res = np.add.reduce(a, axis=axis, where=where)
+ check = a_check.sum(axis)
+ assert_equal(res, check)
+ # Check we do not overwrite elements of a internally.
+ assert_array_equal(a, a_copy)
+
+ @pytest.mark.parametrize(('axis', 'where'),
+ ((0, np.array([True, False, True])),
+ (1, [True, True, False]),
+ (None, True)))
+ @pytest.mark.parametrize('initial', (-np.inf, 5.))
+ def test_reduction_with_where_and_initial(self, axis, where, initial):
+ a = np.arange(9.).reshape(3, 3)
+ a_copy = a.copy()
+ a_check = np.full(a.shape, -np.inf)
+ np.positive(a, out=a_check, where=where)
+
+ res = np.maximum.reduce(a, axis=axis, where=where, initial=initial)
+ check = a_check.max(axis, initial=initial)
+ assert_equal(res, check)
+
+ def test_reduction_where_initial_needed(self):
+ a = np.arange(9.).reshape(3, 3)
+ m = [False, True, False]
+ assert_raises(ValueError, np.maximum.reduce, a, where=m)
+
def test_identityless_reduction_nonreorderable(self):
a = np.array([[8.0, 2.0, 2.0], [1.0, 0.5, 0.25]])
@@ -1378,6 +1525,7 @@ class TestUfunc(object):
result = struct_ufunc.add_triplet(a, b)
assert_equal(result, np.array([(2, 4, 6)], dtype='u8,u8,u8'))
+ assert_raises(RuntimeError, struct_ufunc.register_fail)
def test_custom_ufunc(self):
a = np.array(
@@ -1595,16 +1743,19 @@ class TestUfunc(object):
assert_equal(f(d, 0, None, None, True), r.reshape((1,) + r.shape))
assert_equal(f(d, 0, None, None, False, 0), r)
assert_equal(f(d, 0, None, None, False, initial=0), r)
+ assert_equal(f(d, 0, None, None, False, 0, True), r)
+ assert_equal(f(d, 0, None, None, False, 0, where=True), r)
# multiple keywords
assert_equal(f(d, axis=0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, dtype=None, out=None, keepdims=False), r)
assert_equal(f(d, 0, None, out=None, keepdims=False), r)
- assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0), r)
+ assert_equal(f(d, 0, None, out=None, keepdims=False, initial=0,
+ where=True), r)
# too little
assert_raises(TypeError, f)
# too much
- assert_raises(TypeError, f, d, 0, None, None, False, 0, 1)
+ assert_raises(TypeError, f, d, 0, None, None, False, 0, True, 1)
# invalid axis
assert_raises(TypeError, f, d, "invalid")
assert_raises(TypeError, f, d, axis="invalid")
@@ -1703,3 +1854,96 @@ class TestUfunc(object):
def test_no_doc_string(self):
# gh-9337
assert_('\n' not in umt.inner1d_no_doc.__doc__)
+
+ def test_invalid_args(self):
+ # gh-7961
+ exc = pytest.raises(TypeError, np.sqrt, None)
+ # minimally check the exception text
+ assert exc.match('loop of ufunc does not support')
+
+ @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
+ def test_nat_is_not_finite(self, nat):
+ try:
+ assert not np.isfinite(nat)
+ except TypeError:
+ pass # ok, just not implemented
+
+ @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
+ def test_nat_is_nan(self, nat):
+ try:
+ assert np.isnan(nat)
+ except TypeError:
+ pass # ok, just not implemented
+
+ @pytest.mark.parametrize('nat', [np.datetime64('nat'), np.timedelta64('nat')])
+ def test_nat_is_not_inf(self, nat):
+ try:
+ assert not np.isinf(nat)
+ except TypeError:
+ pass # ok, just not implemented
+
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+ if isinstance(getattr(np, x), np.ufunc)])
+def test_ufunc_types(ufunc):
+ '''
+ Check all ufuncs that the correct type is returned. Avoid
+ object and boolean types since many operations are not defined for
+ for them.
+
+ Choose the shape so even dot and matmul will succeed
+ '''
+ for typ in ufunc.types:
+ # types is a list of strings like ii->i
+ if 'O' in typ or '?' in typ:
+ continue
+ inp, out = typ.split('->')
+ args = [np.ones((3, 3), t) for t in inp]
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings("always")
+ res = ufunc(*args)
+ if isinstance(res, tuple):
+ outs = tuple(out)
+ assert len(res) == len(outs)
+ for r, t in zip(res, outs):
+ assert r.dtype == np.dtype(t)
+ else:
+ assert res.dtype == np.dtype(out)
+
+@pytest.mark.parametrize('ufunc', [getattr(np, x) for x in dir(np)
+ if isinstance(getattr(np, x), np.ufunc)])
+def test_ufunc_noncontiguous(ufunc):
+ '''
+ Check that contiguous and non-contiguous calls to ufuncs
+ have the same results for values in range(9)
+ '''
+ for typ in ufunc.types:
+ # types is a list of strings like ii->i
+ if any(set('O?mM') & set(typ)):
+ # bool, object, datetime are too irregular for this simple test
+ continue
+ inp, out = typ.split('->')
+ args_c = [np.empty(6, t) for t in inp]
+ args_n = [np.empty(18, t)[::3] for t in inp]
+ for a in args_c:
+ a.flat = range(1,7)
+ for a in args_n:
+ a.flat = range(1,7)
+ with warnings.catch_warnings(record=True):
+ warnings.filterwarnings("always")
+ res_c = ufunc(*args_c)
+ res_n = ufunc(*args_n)
+ if len(out) == 1:
+ res_c = (res_c,)
+ res_n = (res_n,)
+ for c_ar, n_ar in zip(res_c, res_n):
+ dt = c_ar.dtype
+ if np.issubdtype(dt, np.floating):
+ # for floating point results allow a small fuss in comparisons
+ # since different algorithms (libm vs. intrinsics) can be used
+ # for different input strides
+ res_eps = np.finfo(dt).eps
+ tol = 2*res_eps
+ assert_allclose(res_c, res_n, atol=tol, rtol=tol)
+ else:
+ assert_equal(c_ar, n_ar)
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index c15ce83f6..9b4ce9e47 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -1,11 +1,11 @@
from __future__ import division, absolute_import, print_function
-import sys
import platform
import warnings
import fnmatch
import itertools
import pytest
+from fractions import Fraction
import numpy.core.umath as ncu
from numpy.core import _umath_tests as ncu_tests
@@ -13,11 +13,10 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
- assert_allclose, assert_no_warnings, suppress_warnings,
- _gen_alignment_data, assert_warns
+ assert_array_max_ulp, assert_allclose, assert_no_warnings, suppress_warnings,
+ _gen_alignment_data, assert_array_almost_equal_nulp
)
-
def on_powerpc():
""" True if we are running on a Power PC platform."""
return platform.processor() == 'powerpc' or \
@@ -76,11 +75,9 @@ class TestOut(object):
assert_(r1 is o1)
assert_(r2 is o2)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
- assert_(r1 is o1)
- assert_(w[0].category is DeprecationWarning)
assert_raises(ValueError, np.add, a, 2, o, o, subok=subok)
assert_raises(ValueError, np.add, a, 2, o, out=o, subok=subok)
@@ -166,14 +163,9 @@ class TestOut(object):
else:
assert_(type(r1) == np.ndarray)
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
r1, r2 = np.frexp(d, out=o1, subok=subok)
- if subok:
- assert_(isinstance(r2, ArrayWrap))
- else:
- assert_(type(r2) == np.ndarray)
- assert_(w[0].category is DeprecationWarning)
class TestComparisons(object):
@@ -274,6 +266,12 @@ class TestDivision(object):
y = np.floor_divide(x**2, x)
assert_equal(y, [1.e+110, 0], err_msg=msg)
+ def test_floor_division_signed_zero(self):
+ # Check that the sign bit is correctly set when dividing positive and
+ # negative zero by one.
+ x = np.zeros(10)
+ assert_equal(np.signbit(x//1), 0)
+ assert_equal(np.signbit((-x)//1), 1)
def floor_divide_and_remainder(x, y):
return (np.floor_divide(x, y), np.remainder(x, y))
@@ -644,6 +642,180 @@ class TestExp(object):
yf = np.array(y, dtype=dt)*log2_
assert_almost_equal(np.exp(yf), xf)
+class TestSpecialFloats(object):
+ def test_exp_values(self):
+ x = [np.nan, np.nan, np.inf, 0.]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.exp(yf), xf)
+
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.exp, np.float32(100.))
+ assert_raises(FloatingPointError, np.exp, np.float32(1E19))
+
+ def test_log_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.inf, np.nan, -np.inf, np.nan]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -1.0]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.log(yf), xf)
+
+ with np.errstate(divide='raise'):
+ assert_raises(FloatingPointError, np.log, np.float32(0.))
+
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, np.log, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.log, np.float32(-1.0))
+
+ def test_sincos_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.nan, np.nan]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sin(yf), xf)
+ assert_equal(np.cos(yf), xf)
+
+ with np.errstate(invalid='raise'):
+ assert_raises(FloatingPointError, np.sin, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.sin, np.float32(np.inf))
+ assert_raises(FloatingPointError, np.cos, np.float32(-np.inf))
+ assert_raises(FloatingPointError, np.cos, np.float32(np.inf))
+
+ def test_sqrt_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, np.inf, np.nan, 0.]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0.]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.sqrt(yf), xf)
+
+ #with np.errstate(invalid='raise'):
+ # for dt in ['f', 'd', 'g']:
+ # assert_raises(FloatingPointError, np.sqrt, np.array(-100., dtype=dt))
+
+ def test_abs_values(self):
+ x = [np.nan, np.nan, np.inf, np.inf, 0., 0., 1.0, 1.0]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0., -1.0, 1.0]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.abs(yf), xf)
+
+ def test_square_values(self):
+ x = [np.nan, np.nan, np.inf, np.inf]
+ y = [np.nan, -np.nan, np.inf, -np.inf]
+ with np.errstate(all='ignore'):
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.square(yf), xf)
+
+ with np.errstate(over='raise'):
+ assert_raises(FloatingPointError, np.square, np.array(1E32, dtype='f'))
+ assert_raises(FloatingPointError, np.square, np.array(1E200, dtype='d'))
+
+ def test_reciprocal_values(self):
+ with np.errstate(all='ignore'):
+ x = [np.nan, np.nan, 0.0, -0.0, np.inf, -np.inf]
+ y = [np.nan, -np.nan, np.inf, -np.inf, 0., -0.]
+ for dt in ['f', 'd', 'g']:
+ xf = np.array(x, dtype=dt)
+ yf = np.array(y, dtype=dt)
+ assert_equal(np.reciprocal(yf), xf)
+
+ with np.errstate(divide='raise'):
+ for dt in ['f', 'd', 'g']:
+ assert_raises(FloatingPointError, np.reciprocal, np.array(-0.0, dtype=dt))
+
+# func : [maxulperror, low, high]
+avx_ufuncs = {'sqrt' :[1, 0., 100.],
+ 'absolute' :[0, -100., 100.],
+ 'reciprocal' :[1, 1., 100.],
+ 'square' :[1, -100., 100.],
+ 'rint' :[0, -100., 100.],
+ 'floor' :[0, -100., 100.],
+ 'ceil' :[0, -100., 100.],
+ 'trunc' :[0, -100., 100.]}
+
+class TestAVXUfuncs(object):
+ def test_avx_based_ufunc(self):
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ np.random.seed(42)
+ for func, prop in avx_ufuncs.items():
+ maxulperr = prop[0]
+ minval = prop[1]
+ maxval = prop[2]
+ # various array sizes to ensure masking in AVX is tested
+ for size in range(1,32):
+ myfunc = getattr(np, func)
+ x_f32 = np.float32(np.random.uniform(low=minval, high=maxval,
+ size=size))
+ x_f64 = np.float64(x_f32)
+ x_f128 = np.longdouble(x_f32)
+ y_true128 = myfunc(x_f128)
+ if maxulperr == 0:
+ assert_equal(myfunc(x_f32), np.float32(y_true128))
+ assert_equal(myfunc(x_f64), np.float64(y_true128))
+ else:
+ assert_array_max_ulp(myfunc(x_f32), np.float32(y_true128),
+ maxulp=maxulperr)
+ assert_array_max_ulp(myfunc(x_f64), np.float64(y_true128),
+ maxulp=maxulperr)
+ # various strides to test gather instruction
+ if size > 1:
+ y_true32 = myfunc(x_f32)
+ y_true64 = myfunc(x_f64)
+ for jj in strides:
+ assert_equal(myfunc(x_f64[::jj]), y_true64[::jj])
+ assert_equal(myfunc(x_f32[::jj]), y_true32[::jj])
+
+class TestAVXFloat32Transcendental(object):
+ def test_exp_float32(self):
+ np.random.seed(42)
+ x_f32 = np.float32(np.random.uniform(low=0.0,high=88.1,size=1000000))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.exp(x_f32), np.float32(np.exp(x_f64)), maxulp=3)
+
+ def test_log_float32(self):
+ np.random.seed(42)
+ x_f32 = np.float32(np.random.uniform(low=0.0,high=1000,size=1000000))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.log(x_f32), np.float32(np.log(x_f64)), maxulp=4)
+
+ def test_sincos_float32(self):
+ np.random.seed(42)
+ N = 1000000
+ M = np.int(N/20)
+ index = np.random.randint(low=0, high=N, size=M)
+ x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
+ # test coverage for elements > 117435.992f for which glibc is used
+ x_f32[index] = np.float32(10E+10*np.random.rand(M))
+ x_f64 = np.float64(x_f32)
+ assert_array_max_ulp(np.sin(x_f32), np.float32(np.sin(x_f64)), maxulp=2)
+ assert_array_max_ulp(np.cos(x_f32), np.float32(np.cos(x_f64)), maxulp=2)
+
+ def test_strided_float32(self):
+ np.random.seed(42)
+ strides = np.array([-4,-3,-2,-1,1,2,3,4])
+ sizes = np.arange(2,100)
+ for ii in sizes:
+ x_f32 = np.float32(np.random.uniform(low=0.01,high=88.1,size=ii))
+ exp_true = np.exp(x_f32)
+ log_true = np.log(x_f32)
+ sin_true = np.sin(x_f32)
+ cos_true = np.cos(x_f32)
+ for jj in strides:
+ assert_array_almost_equal_nulp(np.exp(x_f32[::jj]), exp_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.log(x_f32[::jj]), log_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.sin(x_f32[::jj]), sin_true[::jj], nulp=2)
+ assert_array_almost_equal_nulp(np.cos(x_f32[::jj]), cos_true[::jj], nulp=2)
class TestLogAddExp(_FilterInvalids):
def test_logaddexp_values(self):
@@ -685,6 +857,10 @@ class TestLogAddExp(_FilterInvalids):
assert_(np.isnan(np.logaddexp(0, np.nan)))
assert_(np.isnan(np.logaddexp(np.nan, np.nan)))
+ def test_reduce(self):
+ assert_equal(np.logaddexp.identity, -np.inf)
+ assert_equal(np.logaddexp.reduce([]), -np.inf)
+
class TestLog1p(object):
def test_log1p(self):
@@ -1294,6 +1470,7 @@ class TestSign(object):
# In reference to github issue #6229
def test_nan():
foo = np.array([np.nan])
+ # FIXME: a not used
a = np.sign(foo.astype(object))
assert_raises(TypeError, test_nan)
@@ -1327,21 +1504,18 @@ class TestMinMax(object):
assert_equal(d.max(), d[0])
assert_equal(d.min(), d[0])
- def test_reduce_warns(self):
+ def test_reduce_reorder(self):
# gh 10370, 11029 Some compilers reorder the call to npy_getfloatstatus
# and put it before the call to an intrisic function that causes
- # invalid status to be set. Also make sure warnings are emitted
+ # invalid status to be set. Also make sure warnings are not emitted
for n in (2, 4, 8, 16, 32):
for dt in (np.float32, np.float16, np.complex64):
- with suppress_warnings() as sup:
- sup.record(RuntimeWarning)
- for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
- assert_equal(np.min(r), np.nan)
- assert_equal(len(sup.log), n)
+ for r in np.diagflat(np.array([np.nan] * n, dtype=dt)):
+ assert_equal(np.min(r), np.nan)
- def test_minimize_warns(self):
- # gh 11589
- assert_warns(RuntimeWarning, np.minimum, np.nan, 1)
+ def test_minimize_no_warns(self):
+ a = np.minimum(np.nan, 1)
+ assert_equal(a, np.nan)
class TestAbsoluteNegative(object):
@@ -1595,7 +1769,6 @@ class TestSpecialMethods(object):
ok = np.empty(1).view(Ok)
bad = np.empty(1).view(Bad)
-
# double-free (segfault) of "ok" if "bad" raises an exception
for i in range(10):
assert_raises(RuntimeError, ncu.frexp, 1, ok, bad)
@@ -1893,7 +2066,8 @@ class TestSpecialMethods(object):
# reduce, kwargs
res = np.multiply.reduce(a, axis='axis0', dtype='dtype0', out='out0',
- keepdims='keep0', initial='init0')
+ keepdims='keep0', initial='init0',
+ where='where0')
assert_equal(res[0], a)
assert_equal(res[1], np.multiply)
assert_equal(res[2], 'reduce')
@@ -1902,7 +2076,8 @@ class TestSpecialMethods(object):
'out': ('out0',),
'keepdims': 'keep0',
'axis': 'axis0',
- 'initial': 'init0'})
+ 'initial': 'init0',
+ 'where': 'where0'})
# reduce, output equal to None removed, but not other explicit ones,
# even if they are at their default value.
@@ -1912,14 +2087,18 @@ class TestSpecialMethods(object):
assert_equal(res[4], {'axis': 0, 'keepdims': True})
res = np.multiply.reduce(a, None, out=(None,), dtype=None)
assert_equal(res[4], {'axis': None, 'dtype': None})
- res = np.multiply.reduce(a, 0, None, None, False, 2)
- assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': 2})
- # np._NoValue ignored for initial.
- res = np.multiply.reduce(a, 0, None, None, False, np._NoValue)
- assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False})
- # None kept for initial.
- res = np.multiply.reduce(a, 0, None, None, False, None)
- assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False, 'initial': None})
+ res = np.multiply.reduce(a, 0, None, None, False, 2, True)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+ 'initial': 2, 'where': True})
+ # np._NoValue ignored for initial
+ res = np.multiply.reduce(a, 0, None, None, False,
+ np._NoValue, True)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+ 'where': True})
+ # None kept for initial, True for where.
+ res = np.multiply.reduce(a, 0, None, None, False, None, True)
+ assert_equal(res[4], {'axis': 0, 'dtype': None, 'keepdims': False,
+ 'initial': None, 'where': True})
# reduce, wrong args
assert_raises(ValueError, np.multiply.reduce, a, out=())
@@ -2063,10 +2242,9 @@ class TestSpecialMethods(object):
assert_(np.modf(a, None) == {})
assert_(np.modf(a, None, None) == {})
assert_(np.modf(a, out=(None, None)) == {})
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', DeprecationWarning)
- assert_(np.modf(a, out=None) == {})
- assert_(w[0].category is DeprecationWarning)
+ with assert_raises(TypeError):
+ # Out argument must be tuple, since there are multiple outputs.
+ np.modf(a, out=None)
# don't give positional and output argument, or too many arguments.
# wrong number of arguments in the tuple is an error too.
@@ -2447,9 +2625,40 @@ class TestRationalFunctions(object):
assert_equal(np.gcd(2**100, 3**100), 1)
-def is_longdouble_finfo_bogus():
- info = np.finfo(np.longcomplex)
- return not np.isfinite(np.log10(info.tiny/info.eps))
+class TestRoundingFunctions(object):
+
+ def test_object_direct(self):
+ """ test direct implementation of these magic methods """
+ class C:
+ def __floor__(self):
+ return 1
+ def __ceil__(self):
+ return 2
+ def __trunc__(self):
+ return 3
+
+ arr = np.array([C(), C()])
+ assert_equal(np.floor(arr), [1, 1])
+ assert_equal(np.ceil(arr), [2, 2])
+ assert_equal(np.trunc(arr), [3, 3])
+
+ def test_object_indirect(self):
+ """ test implementations via __float__ """
+ class C:
+ def __float__(self):
+ return -2.5
+
+ arr = np.array([C(), C()])
+ assert_equal(np.floor(arr), [-3, -3])
+ assert_equal(np.ceil(arr), [-2, -2])
+ with pytest.raises(TypeError):
+ np.trunc(arr) # consistent with math.trunc
+
+ def test_fraction(self):
+ f = Fraction(-4, 3)
+ assert_equal(np.floor(f), -2)
+ assert_equal(np.ceil(f), -1)
+ assert_equal(np.trunc(f), -1)
class TestComplexFunctions(object):
@@ -2547,7 +2756,8 @@ class TestComplexFunctions(object):
b = cfunc(p)
assert_(abs(a - b) < atol, "%s %s: %s; cmath: %s" % (fname, p, a, b))
- def check_loss_of_precision(self, dtype):
+ @pytest.mark.parametrize('dtype', [np.complex64, np.complex_, np.longcomplex])
+ def test_loss_of_precision(self, dtype):
"""Check loss of precision in complex arc* functions"""
# Check against known-good functions
@@ -2589,10 +2799,11 @@ class TestComplexFunctions(object):
# It's not guaranteed that the system-provided arc functions
# are accurate down to a few epsilons. (Eg. on Linux 64-bit)
# So, give more leeway for long complex tests here:
- check(x_series, 50*eps)
+ # Can use 2.1 for > Ubuntu LTS Trusty (2014), glibc = 2.19.
+ check(x_series, 50.0*eps)
else:
check(x_series, 2.1*eps)
- check(x_basic, 2*eps/1e-3)
+ check(x_basic, 2.0*eps/1e-3)
# Check a few points
@@ -2632,15 +2843,6 @@ class TestComplexFunctions(object):
check(func, pts, 1j)
check(func, pts, 1+1j)
- def test_loss_of_precision(self):
- for dtype in [np.complex64, np.complex_]:
- self.check_loss_of_precision(dtype)
-
- @pytest.mark.skipif(is_longdouble_finfo_bogus(),
- reason="Bogus long double finfo")
- def test_loss_of_precision_longcomplex(self):
- self.check_loss_of_precision(np.longcomplex)
-
class TestAttributes(object):
def test_attributes(self):
@@ -2923,3 +3125,14 @@ def test_signaling_nan_exceptions():
with assert_no_warnings():
a = np.ndarray(shape=(), dtype='float32', buffer=b'\x00\xe0\xbf\xff')
np.isnan(a)
+
+@pytest.mark.parametrize("arr", [
+ np.arange(2),
+ np.matrix([0, 1]),
+ np.matrix([[0, 1], [2, 5]]),
+ ])
+def test_outer_subclass_preserve(arr):
+ # for gh-8661
+ class foo(np.ndarray): pass
+ actual = np.multiply.outer(arr.view(foo), arr.view(foo))
+ assert actual.__class__.__name__ == 'foo'
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
new file mode 100644
index 000000000..0bab04df2
--- /dev/null
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -0,0 +1,54 @@
+import numpy as np
+import platform
+from os import path
+import sys
+import pytest
+from ctypes import *
+from numpy.testing import assert_array_max_ulp
+
+runtest = sys.platform.startswith('linux') and (platform.machine() == 'x86_64')
+platform_skip = pytest.mark.skipif(not runtest,
+ reason="""
+ stick to x86_64 and linux platforms.
+ test seems to fail on some of ARM and power
+ architectures.
+ """)
+
+# convert string to hex function taken from:
+# https://stackoverflow.com/questions/1592158/convert-hex-to-float #
+def convert(s):
+ i = int(s, 16) # convert from hex to a Python int
+ cp = pointer(c_int(i)) # make this into a c integer
+ fp = cast(cp, POINTER(c_float)) # cast the int pointer to a float pointer
+ return fp.contents.value # dereference the pointer, get the float
+
+str_to_float = np.vectorize(convert)
+files = ['umath-validation-set-exp',
+ 'umath-validation-set-log',
+ 'umath-validation-set-sin',
+ 'umath-validation-set-cos']
+
+class TestAccuracy(object):
+ @pytest.mark.xfail(reason="Fails for MacPython/numpy-wheels builds")
+ def test_validate_transcendentals(self):
+ with np.errstate(all='ignore'):
+ for filename in files:
+ data_dir = path.join(path.dirname(__file__), 'data')
+ filepath = path.join(data_dir, filename)
+ with open(filepath) as fid:
+ file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
+ data = np.genfromtxt(file_without_comments,
+ dtype=('|S39','|S39','|S39',np.int),
+ names=('type','input','output','ulperr'),
+ delimiter=',',
+ skip_header=1)
+ npfunc = getattr(np, filename.split('-')[3])
+ for datatype in np.unique(data['type']):
+ data_subset = data[data['type'] == datatype]
+ inval = np.array(str_to_float(data_subset['input'].astype(str)), dtype=eval(datatype))
+ outval = np.array(str_to_float(data_subset['output'].astype(str)), dtype=eval(datatype))
+ perm = np.random.permutation(len(inval))
+ inval = inval[perm]
+ outval = outval[perm]
+ maxulperr = data_subset['ulperr'].max()
+ assert_array_max_ulp(npfunc(inval), outval, maxulperr)
diff --git a/numpy/core/tests/test_umath_complex.py b/numpy/core/tests/test_umath_complex.py
index 785ae8c57..1f5b4077f 100644
--- a/numpy/core/tests/test_umath_complex.py
+++ b/numpy/core/tests/test_umath_complex.py
@@ -5,7 +5,8 @@ import platform
import pytest
import numpy as np
-import numpy.core.umath as ncu
+# import the c-extension module directly since _arg is not exported via umath
+import numpy.core._multiarray_umath as ncu
from numpy.testing import (
assert_raises, assert_equal, assert_array_equal, assert_almost_equal
)
diff --git a/numpy/core/umath.py b/numpy/core/umath.py
index a0e8ad427..f3b26ab72 100644
--- a/numpy/core/umath.py
+++ b/numpy/core/umath.py
@@ -9,7 +9,7 @@ by importing from the extension module.
from . import _multiarray_umath
from numpy.core._multiarray_umath import *
from numpy.core._multiarray_umath import (
- _UFUNC_API, _add_newdoc_ufunc, _arg, _ones_like
+ _UFUNC_API, _add_newdoc_ufunc, _ones_like
)
__all__ = [
@@ -18,7 +18,7 @@ __all__ = [
'FPE_DIVIDEBYZERO', 'FPE_INVALID', 'FPE_OVERFLOW', 'FPE_UNDERFLOW', 'NAN',
'NINF', 'NZERO', 'PINF', 'PZERO', 'SHIFT_DIVIDEBYZERO', 'SHIFT_INVALID',
'SHIFT_OVERFLOW', 'SHIFT_UNDERFLOW', 'UFUNC_BUFSIZE_DEFAULT',
- 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', '_arg', 'absolute', 'add',
+ 'UFUNC_PYVALS_NAME', '_add_newdoc_ufunc', 'absolute', 'add',
'arccos', 'arccosh', 'arcsin', 'arcsinh', 'arctan', 'arctan2', 'arctanh',
'bitwise_and', 'bitwise_or', 'bitwise_xor', 'cbrt', 'ceil', 'conj',
'conjugate', 'copysign', 'cos', 'cosh', 'deg2rad', 'degrees', 'divide',
diff --git a/numpy/ctypeslib.py b/numpy/ctypeslib.py
index 24cfc6762..58f3ef9d3 100644
--- a/numpy/ctypeslib.py
+++ b/numpy/ctypeslib.py
@@ -51,11 +51,13 @@ Then, we're ready to call ``foo_func``:
"""
from __future__ import division, absolute_import, print_function
-__all__ = ['load_library', 'ndpointer', 'test', 'ctypes_load_library',
+__all__ = ['load_library', 'ndpointer', 'ctypes_load_library',
'c_intp', 'as_ctypes', 'as_array']
import os
-from numpy import integer, ndarray, dtype as _dtype, deprecate, array
+from numpy import (
+ integer, ndarray, dtype as _dtype, deprecate, array, frombuffer
+)
from numpy.core.multiarray import _flagdict, flagsobj
try:
@@ -90,11 +92,11 @@ else:
# Adapted from Albert Strasheim
def load_library(libname, loader_path):
"""
- It is possible to load a library using
- >>> lib = ctypes.cdll[<full_path_name>]
+ It is possible to load a library using
+ >>> lib = ctypes.cdll[<full_path_name>] # doctest: +SKIP
But there are cross-platform considerations, such as library file extensions,
- plus the fact Windows will just load the first library it finds with that name.
+ plus the fact Windows will just load the first library it finds with that name.
NumPy supplies the load_library function as a convenience.
Parameters
@@ -108,17 +110,17 @@ else:
Returns
-------
ctypes.cdll[libpath] : library object
- A ctypes library object
+ A ctypes library object
Raises
------
OSError
- If there is no library with the expected extension, or the
+ If there is no library with the expected extension, or the
library is defective and cannot be loaded.
"""
if ctypes.__version__ < '1.0.1':
import warnings
- warnings.warn("All features of ctypes interface may not work " \
+ warnings.warn("All features of ctypes interface may not work "
"with ctypes < 1.0.1", stacklevel=2)
ext = os.path.splitext(libname)[1]
@@ -175,24 +177,6 @@ def _flags_fromnum(num):
class _ndptr(_ndptr_base):
-
- def _check_retval_(self):
- """This method is called when this class is used as the .restype
- attribute for a shared-library function. It constructs a numpy
- array from a void pointer."""
- return array(self)
-
- @property
- def __array_interface__(self):
- return {'descr': self._dtype_.descr,
- '__ref': self,
- 'strides': None,
- 'shape': self._shape_,
- 'version': 3,
- 'typestr': self._dtype_.descr[0][1],
- 'data': (self.value, False),
- }
-
@classmethod
def from_param(cls, obj):
if not isinstance(obj, ndarray):
@@ -213,6 +197,34 @@ class _ndptr(_ndptr_base):
return obj.ctypes
+class _concrete_ndptr(_ndptr):
+ """
+ Like _ndptr, but with `_shape_` and `_dtype_` specified.
+
+ Notably, this means the pointer has enough information to reconstruct
+ the array, which is not generally true.
+ """
+ def _check_retval_(self):
+ """
+ This method is called when this class is used as the .restype
+ attribute for a shared-library function, to automatically wrap the
+ pointer into an array.
+ """
+ return self.contents
+
+ @property
+ def contents(self):
+ """
+ Get an ndarray viewing the data pointed to by this pointer.
+
+ This mirrors the `contents` attribute of a normal ctypes pointer
+ """
+ full_dtype = _dtype((self._dtype_, self._shape_))
+ full_ctype = ctypes.c_char * full_dtype.itemsize
+ buffer = ctypes.cast(self, ctypes.POINTER(full_ctype)).contents
+ return frombuffer(buffer, dtype=full_dtype).squeeze(axis=0)
+
+
# Factory for an array-checking class with from_param defined for
# use with ctypes argtypes mechanism
_pointer_type_cache = {}
@@ -269,8 +281,11 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
"""
+ # normalize dtype to an Optional[dtype]
if dtype is not None:
dtype = _dtype(dtype)
+
+ # normalize flags to an Optional[int]
num = None
if flags is not None:
if isinstance(flags, str):
@@ -287,60 +302,205 @@ def ndpointer(dtype=None, ndim=None, shape=None, flags=None):
except Exception:
raise TypeError("invalid flags specification")
num = _num_fromflags(flags)
+
+ # normalize shape to an Optional[tuple]
+ if shape is not None:
+ try:
+ shape = tuple(shape)
+ except TypeError:
+ # single integer -> 1-tuple
+ shape = (shape,)
+
+ cache_key = (dtype, ndim, shape, num)
+
try:
- return _pointer_type_cache[(dtype, ndim, shape, num)]
+ return _pointer_type_cache[cache_key]
except KeyError:
pass
+
+ # produce a name for the new type
if dtype is None:
name = 'any'
- elif dtype.names:
+ elif dtype.names is not None:
name = str(id(dtype))
else:
name = dtype.str
if ndim is not None:
name += "_%dd" % ndim
if shape is not None:
- try:
- strshape = [str(x) for x in shape]
- except TypeError:
- strshape = [str(shape)]
- shape = (shape,)
- shape = tuple(shape)
- name += "_"+"x".join(strshape)
+ name += "_"+"x".join(str(x) for x in shape)
if flags is not None:
name += "_"+"_".join(flags)
+
+ if dtype is not None and shape is not None:
+ base = _concrete_ndptr
else:
- flags = []
- klass = type("ndpointer_%s"%name, (_ndptr,),
+ base = _ndptr
+
+ klass = type("ndpointer_%s"%name, (base,),
{"_dtype_": dtype,
"_shape_" : shape,
"_ndim_" : ndim,
"_flags_" : num})
- _pointer_type_cache[(dtype, shape, ndim, num)] = klass
+ _pointer_type_cache[cache_key] = klass
return klass
-def _get_typecodes():
- """ Return a dictionary mapping __array_interface__ formats to ctypes types """
- ct = ctypes
- simple_types = [
- ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
- ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
- ct.c_float, ct.c_double,
- ]
+if ctypes is not None:
+ def _ctype_ndarray(element_type, shape):
+ """ Create an ndarray of the given element type and shape """
+ for dim in shape[::-1]:
+ element_type = dim * element_type
+ # prevent the type name include np.ctypeslib
+ element_type.__module__ = None
+ return element_type
+
- return {_dtype(ctype).str: ctype for ctype in simple_types}
+ def _get_scalar_type_map():
+ """
+ Return a dictionary mapping native endian scalar dtype to ctypes types
+ """
+ ct = ctypes
+ simple_types = [
+ ct.c_byte, ct.c_short, ct.c_int, ct.c_long, ct.c_longlong,
+ ct.c_ubyte, ct.c_ushort, ct.c_uint, ct.c_ulong, ct.c_ulonglong,
+ ct.c_float, ct.c_double,
+ ct.c_bool,
+ ]
+ return {_dtype(ctype): ctype for ctype in simple_types}
-def _ctype_ndarray(element_type, shape):
- """ Create an ndarray of the given element type and shape """
- for dim in shape[::-1]:
- element_type = element_type * dim
- return element_type
+ _scalar_type_map = _get_scalar_type_map()
-if ctypes is not None:
- _typecodes = _get_typecodes()
+ def _ctype_from_dtype_scalar(dtype):
+ # swapping twice ensure that `=` is promoted to <, >, or |
+ dtype_with_endian = dtype.newbyteorder('S').newbyteorder('S')
+ dtype_native = dtype.newbyteorder('=')
+ try:
+ ctype = _scalar_type_map[dtype_native]
+ except KeyError:
+ raise NotImplementedError(
+ "Converting {!r} to a ctypes type".format(dtype)
+ )
+
+ if dtype_with_endian.byteorder == '>':
+ ctype = ctype.__ctype_be__
+ elif dtype_with_endian.byteorder == '<':
+ ctype = ctype.__ctype_le__
+
+ return ctype
+
+
+ def _ctype_from_dtype_subarray(dtype):
+ element_dtype, shape = dtype.subdtype
+ ctype = _ctype_from_dtype(element_dtype)
+ return _ctype_ndarray(ctype, shape)
+
+
+ def _ctype_from_dtype_structured(dtype):
+ # extract offsets of each field
+ field_data = []
+ for name in dtype.names:
+ field_dtype, offset = dtype.fields[name][:2]
+ field_data.append((offset, name, _ctype_from_dtype(field_dtype)))
+
+ # ctypes doesn't care about field order
+ field_data = sorted(field_data, key=lambda f: f[0])
+
+ if len(field_data) > 1 and all(offset == 0 for offset, name, ctype in field_data):
+ # union, if multiple fields all at address 0
+ size = 0
+ _fields_ = []
+ for offset, name, ctype in field_data:
+ _fields_.append((name, ctype))
+ size = max(size, ctypes.sizeof(ctype))
+
+ # pad to the right size
+ if dtype.itemsize != size:
+ _fields_.append(('', ctypes.c_char * dtype.itemsize))
+
+ # we inserted manual padding, so always `_pack_`
+ return type('union', (ctypes.Union,), dict(
+ _fields_=_fields_,
+ _pack_=1,
+ __module__=None,
+ ))
+ else:
+ last_offset = 0
+ _fields_ = []
+ for offset, name, ctype in field_data:
+ padding = offset - last_offset
+ if padding < 0:
+ raise NotImplementedError("Overlapping fields")
+ if padding > 0:
+ _fields_.append(('', ctypes.c_char * padding))
+
+ _fields_.append((name, ctype))
+ last_offset = offset + ctypes.sizeof(ctype)
+
+
+ padding = dtype.itemsize - last_offset
+ if padding > 0:
+ _fields_.append(('', ctypes.c_char * padding))
+
+ # we inserted manual padding, so always `_pack_`
+ return type('struct', (ctypes.Structure,), dict(
+ _fields_=_fields_,
+ _pack_=1,
+ __module__=None,
+ ))
+
+
+ def _ctype_from_dtype(dtype):
+ if dtype.fields is not None:
+ return _ctype_from_dtype_structured(dtype)
+ elif dtype.subdtype is not None:
+ return _ctype_from_dtype_subarray(dtype)
+ else:
+ return _ctype_from_dtype_scalar(dtype)
+
+
+ def as_ctypes_type(dtype):
+ r"""
+ Convert a dtype into a ctypes type.
+
+ Parameters
+ ----------
+ dtype : dtype
+ The dtype to convert
+
+ Returns
+ -------
+ ctype
+ A ctype scalar, union, array, or struct
+
+ Raises
+ ------
+ NotImplementedError
+ If the conversion is not possible
+
+ Notes
+ -----
+ This function does not losslessly round-trip in either direction.
+
+ ``np.dtype(as_ctypes_type(dt))`` will:
+
+ - insert padding fields
+ - reorder fields to be sorted by offset
+ - discard field titles
+
+ ``as_ctypes_type(np.dtype(ctype))`` will:
+
+ - discard the class names of `ctypes.Structure`\ s and
+ `ctypes.Union`\ s
+ - convert single-element `ctypes.Union`\ s into single-element
+ `ctypes.Structure`\ s
+ - insert padding fields
+
+ """
+ return _ctype_from_dtype(_dtype(dtype))
+
def as_array(obj, shape=None):
"""
@@ -362,6 +522,7 @@ if ctypes is not None:
return array(obj, copy=False)
+
def as_ctypes(obj):
"""Create and return a ctypes object from a numpy array. Actually
anything that exposes the __array_interface__ is accepted."""
@@ -373,7 +534,11 @@ if ctypes is not None:
addr, readonly = ai["data"]
if readonly:
raise TypeError("readonly arrays unsupported")
- tp = _ctype_ndarray(_typecodes[ai["typestr"]], ai["shape"])
- result = tp.from_address(addr)
- result.__keep = ai
+
+ # can't use `_dtype((ai["typestr"], ai["shape"]))` here, as it overflows
+ # dtype.itemsize (gh-14214)
+ ctype_scalar = as_ctypes_type(ai["typestr"])
+ result_type = _ctype_ndarray(ctype_scalar, ai["shape"])
+ result = result_type.from_address(addr)
+ result.__keep = obj
return result
diff --git a/numpy/distutils/__init__.py b/numpy/distutils/__init__.py
index 8dd326920..8dbb63b28 100644
--- a/numpy/distutils/__init__.py
+++ b/numpy/distutils/__init__.py
@@ -1,14 +1,31 @@
-from __future__ import division, absolute_import, print_function
+"""
+An enhanced distutils, providing support for Fortran compilers, for BLAS,
+LAPACK and other common libraries for numerical computing, and more.
+
+Public submodules are::
+
+ misc_util
+ system_info
+ cpu_info
+ log
+ exec_command
+
+For details, please see the *Packaging* and *NumPy Distutils User Guide*
+sections of the NumPy Reference Guide.
-import sys
+For configuring the preference for and location of libraries like BLAS and
+LAPACK, and for setting include paths and similar build options, please see
+``site.cfg.example`` in the root of the NumPy repository or sdist.
+
+"""
+
+from __future__ import division, absolute_import, print_function
-from .__version__ import version as __version__
# Must import local ccompiler ASAP in order to get
# customized CCompiler.spawn effective.
from . import ccompiler
from . import unixccompiler
-from .info import __doc__
from .npy_pkg_config import *
# If numpy is installed, add distutils.test()
@@ -30,7 +47,7 @@ def customized_fcompiler(plat=None, compiler=None):
c.customize()
return c
-def customized_ccompiler(plat=None, compiler=None):
- c = ccompiler.new_compiler(plat=plat, compiler=compiler)
+def customized_ccompiler(plat=None, compiler=None, verbose=1):
+ c = ccompiler.new_compiler(plat=plat, compiler=compiler, verbose=verbose)
c.customize('')
return c
diff --git a/numpy/distutils/__version__.py b/numpy/distutils/__version__.py
deleted file mode 100644
index 969decbba..000000000
--- a/numpy/distutils/__version__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-major = 0
-minor = 4
-micro = 0
-version = '%(major)d.%(minor)d.%(micro)d' % (locals())
diff --git a/numpy/distutils/_shell_utils.py b/numpy/distutils/_shell_utils.py
new file mode 100644
index 000000000..82abd5f4e
--- /dev/null
+++ b/numpy/distutils/_shell_utils.py
@@ -0,0 +1,91 @@
+"""
+Helper functions for interacting with the shell, and consuming shell-style
+parameters provided in config files.
+"""
+import os
+import shlex
+import subprocess
+try:
+ from shlex import quote
+except ImportError:
+ from pipes import quote
+
+__all__ = ['WindowsParser', 'PosixParser', 'NativeParser']
+
+
+class CommandLineParser:
+ """
+ An object that knows how to split and join command-line arguments.
+
+ It must be true that ``argv == split(join(argv))`` for all ``argv``.
+ The reverse neednt be true - `join(split(cmd))` may result in the addition
+ or removal of unnecessary escaping.
+ """
+ @staticmethod
+ def join(argv):
+ """ Join a list of arguments into a command line string """
+ raise NotImplementedError
+
+ @staticmethod
+ def split(cmd):
+ """ Split a command line string into a list of arguments """
+ raise NotImplementedError
+
+
+class WindowsParser:
+ """
+ The parsing behavior used by `subprocess.call("string")` on Windows, which
+ matches the Microsoft C/C++ runtime.
+
+ Note that this is _not_ the behavior of cmd.
+ """
+ @staticmethod
+ def join(argv):
+ # note that list2cmdline is specific to the windows syntax
+ return subprocess.list2cmdline(argv)
+
+ @staticmethod
+ def split(cmd):
+ import ctypes # guarded import for systems without ctypes
+ try:
+ ctypes.windll
+ except AttributeError:
+ raise NotImplementedError
+
+ # Windows has special parsing rules for the executable (no quotes),
+ # that we do not care about - insert a dummy element
+ if not cmd:
+ return []
+ cmd = 'dummy ' + cmd
+
+ CommandLineToArgvW = ctypes.windll.shell32.CommandLineToArgvW
+ CommandLineToArgvW.restype = ctypes.POINTER(ctypes.c_wchar_p)
+ CommandLineToArgvW.argtypes = (ctypes.c_wchar_p, ctypes.POINTER(ctypes.c_int))
+
+ nargs = ctypes.c_int()
+ lpargs = CommandLineToArgvW(cmd, ctypes.byref(nargs))
+ args = [lpargs[i] for i in range(nargs.value)]
+ assert not ctypes.windll.kernel32.LocalFree(lpargs)
+
+ # strip the element we inserted
+ assert args[0] == "dummy"
+ return args[1:]
+
+
+class PosixParser:
+ """
+ The parsing behavior used by `subprocess.call("string", shell=True)` on Posix.
+ """
+ @staticmethod
+ def join(argv):
+ return ' '.join(quote(arg) for arg in argv)
+
+ @staticmethod
+ def split(cmd):
+ return shlex.split(cmd, posix=True)
+
+
+if os.name == 'nt':
+ NativeParser = WindowsParser
+elif os.name == 'posix':
+ NativeParser = PosixParser
diff --git a/numpy/distutils/ccompiler.py b/numpy/distutils/ccompiler.py
index b03fb96b2..643879023 100644
--- a/numpy/distutils/ccompiler.py
+++ b/numpy/distutils/ccompiler.py
@@ -6,6 +6,7 @@ import sys
import types
import shlex
import time
+import subprocess
from copy import copy
from distutils import ccompiler
from distutils.ccompiler import *
@@ -16,9 +17,11 @@ from distutils.version import LooseVersion
from numpy.distutils import log
from numpy.distutils.compat import get_exception
-from numpy.distutils.exec_command import exec_command
+from numpy.distutils.exec_command import (
+ filepath_from_subprocess_output, forward_bytes_to_stdout
+)
from numpy.distutils.misc_util import cyg2win32, is_sequence, mingw32, \
- quote_args, get_num_build_jobs, \
+ get_num_build_jobs, \
_commandline_dep_string
# globals for parallel build management
@@ -136,20 +139,41 @@ def CCompiler_spawn(self, cmd, display=None):
if is_sequence(display):
display = ' '.join(list(display))
log.info(display)
- s, o = exec_command(cmd)
- if s:
- if is_sequence(cmd):
- cmd = ' '.join(list(cmd))
- try:
- print(o)
- except UnicodeError:
- # When installing through pip, `o` can contain non-ascii chars
- pass
- if re.search('Too many open files', o):
- msg = '\nTry rerunning setup command until build succeeds.'
+ try:
+ if self.verbose:
+ subprocess.check_output(cmd)
else:
- msg = ''
- raise DistutilsExecError('Command "%s" failed with exit status %d%s' % (cmd, s, msg))
+ subprocess.check_output(cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ o = exc.output
+ s = exc.returncode
+ except OSError:
+ # OSError doesn't have the same hooks for the exception
+ # output, but exec_command() historically would use an
+ # empty string for EnvironmentError (base class for
+ # OSError)
+ o = b''
+ # status previously used by exec_command() for parent
+ # of OSError
+ s = 127
+ else:
+ # use a convenience return here so that any kind of
+ # caught exception will execute the default code after the
+ # try / except block, which handles various exceptions
+ return None
+
+ if is_sequence(cmd):
+ cmd = ' '.join(list(cmd))
+
+ if self.verbose:
+ forward_bytes_to_stdout(o)
+
+ if re.search(b'Too many open files', o):
+ msg = '\nTry rerunning setup command until build succeeds.'
+ else:
+ msg = ''
+ raise DistutilsExecError('Command "%s" failed with exit status %d%s' %
+ (cmd, s, msg))
replace_method(CCompiler, 'spawn', CCompiler_spawn)
@@ -404,10 +428,8 @@ def _compiler_to_string(compiler):
v = getattr(compiler, key)
mx = max(mx, len(key))
props.append((key, repr(v)))
- lines = []
- format = '%-' + repr(mx+1) + 's = %s'
- for prop in props:
- lines.append(format % prop)
+ fmt = '%-' + repr(mx+1) + 's = %s'
+ lines = [fmt % prop for prop in props]
return '\n'.join(lines)
def CCompiler_show_customization(self):
@@ -620,7 +642,21 @@ def CCompiler_get_version(self, force=False, ok_status=[0]):
version = m.group('version')
return version
- status, output = exec_command(version_cmd, use_tee=0)
+ try:
+ output = subprocess.check_output(version_cmd, stderr=subprocess.STDOUT)
+ except subprocess.CalledProcessError as exc:
+ output = exc.output
+ status = exc.returncode
+ except OSError:
+ # match the historical returns for a parent
+ # exception class caught by exec_command()
+ status = 127
+ output = b''
+ else:
+ # output isn't actually a filepath but we do this
+ # for now to match previous distutils behavior
+ output = filepath_from_subprocess_output(output)
+ status = 0
version = None
if status in ok_status:
@@ -695,10 +731,12 @@ if sys.platform == 'win32':
_distutils_new_compiler = new_compiler
def new_compiler (plat=None,
compiler=None,
- verbose=0,
+ verbose=None,
dry_run=0,
force=0):
# Try first C compilers from numpy.distutils.
+ if verbose is None:
+ verbose = log.get_threshold() <= log.INFO
if plat is None:
plat = os.name
try:
@@ -731,6 +769,7 @@ def new_compiler (plat=None,
raise DistutilsModuleError(("can't compile C/C++ code: unable to find class '%s' " +
"in module '%s'") % (class_name, module_name))
compiler = klass(None, dry_run, force)
+ compiler.verbose = verbose
log.debug('new_compiler returns %s' % (klass))
return compiler
@@ -738,8 +777,13 @@ ccompiler.new_compiler = new_compiler
_distutils_gen_lib_options = gen_lib_options
def gen_lib_options(compiler, library_dirs, runtime_library_dirs, libraries):
- library_dirs = quote_args(library_dirs)
- runtime_library_dirs = quote_args(runtime_library_dirs)
+ # the version of this function provided by CPython allows the following
+ # to return lists, which are unpacked automatically:
+ # - compiler.runtime_library_dir_option
+ # our version extends the behavior to:
+ # - compiler.library_dir_option
+ # - compiler.library_option
+ # - compiler.find_library_file
r = _distutils_gen_lib_options(compiler, library_dirs,
runtime_library_dirs, libraries)
lib_opts = []
@@ -759,68 +803,3 @@ for _cc in ['msvc9', 'msvc', '_msvc', 'bcpp', 'cygwinc', 'emxc', 'unixc']:
if _m is not None:
setattr(_m, 'gen_lib_options', gen_lib_options)
-_distutils_gen_preprocess_options = gen_preprocess_options
-def gen_preprocess_options (macros, include_dirs):
- include_dirs = quote_args(include_dirs)
- return _distutils_gen_preprocess_options(macros, include_dirs)
-ccompiler.gen_preprocess_options = gen_preprocess_options
-
-##Fix distutils.util.split_quoted:
-# NOTE: I removed this fix in revision 4481 (see ticket #619), but it appears
-# that removing this fix causes f2py problems on Windows XP (see ticket #723).
-# Specifically, on WinXP when gfortran is installed in a directory path, which
-# contains spaces, then f2py is unable to find it.
-import string
-_wordchars_re = re.compile(r'[^\\\'\"%s ]*' % string.whitespace)
-_squote_re = re.compile(r"'(?:[^'\\]|\\.)*'")
-_dquote_re = re.compile(r'"(?:[^"\\]|\\.)*"')
-_has_white_re = re.compile(r'\s')
-def split_quoted(s):
- s = s.strip()
- words = []
- pos = 0
-
- while s:
- m = _wordchars_re.match(s, pos)
- end = m.end()
- if end == len(s):
- words.append(s[:end])
- break
-
- if s[end] in string.whitespace: # unescaped, unquoted whitespace: now
- words.append(s[:end]) # we definitely have a word delimiter
- s = s[end:].lstrip()
- pos = 0
-
- elif s[end] == '\\': # preserve whatever is being escaped;
- # will become part of the current word
- s = s[:end] + s[end+1:]
- pos = end+1
-
- else:
- if s[end] == "'": # slurp singly-quoted string
- m = _squote_re.match(s, end)
- elif s[end] == '"': # slurp doubly-quoted string
- m = _dquote_re.match(s, end)
- else:
- raise RuntimeError("this can't happen (bad char '%c')" % s[end])
-
- if m is None:
- raise ValueError("bad string (mismatched %s quotes?)" % s[end])
-
- (beg, end) = m.span()
- if _has_white_re.search(s[beg+1:end-1]):
- s = s[:beg] + s[beg+1:end-1] + s[end:]
- pos = m.end() - 2
- else:
- # Keeping quotes when a quoted word does not contain
- # white-space. XXX: send a patch to distutils
- pos = m.end()
-
- if pos >= len(s):
- words.append(s)
- break
-
- return words
-ccompiler.split_quoted = split_quoted
-##Fix distutils.util.split_quoted:
diff --git a/numpy/distutils/command/autodist.py b/numpy/distutils/command/autodist.py
index d5e78963c..9c98b84d8 100644
--- a/numpy/distutils/command/autodist.py
+++ b/numpy/distutils/command/autodist.py
@@ -3,23 +3,24 @@
"""
from __future__ import division, absolute_import, print_function
+import textwrap
# We put them here since they could be easily reused outside numpy.distutils
def check_inline(cmd):
"""Return the inline identifier (may be empty)."""
cmd._check_compiler()
- body = """
-#ifndef __cplusplus
-static %(inline)s int static_func (void)
-{
- return 0;
-}
-%(inline)s int nostatic_func (void)
-{
- return 0;
-}
-#endif"""
+ body = textwrap.dedent("""
+ #ifndef __cplusplus
+ static %(inline)s int static_func (void)
+ {
+ return 0;
+ }
+ %(inline)s int nostatic_func (void)
+ {
+ return 0;
+ }
+ #endif""")
for kw in ['inline', '__inline__', '__inline']:
st = cmd.try_compile(body % {'inline': kw}, None, None)
@@ -28,15 +29,16 @@ static %(inline)s int static_func (void)
return ''
+
def check_restrict(cmd):
"""Return the restrict identifier (may be empty)."""
cmd._check_compiler()
- body = """
-static int static_func (char * %(restrict)s a)
-{
- return 0;
-}
-"""
+ body = textwrap.dedent("""
+ static int static_func (char * %(restrict)s a)
+ {
+ return 0;
+ }
+ """)
for kw in ['restrict', '__restrict__', '__restrict']:
st = cmd.try_compile(body % {'restrict': kw}, None, None)
@@ -45,52 +47,76 @@ static int static_func (char * %(restrict)s a)
return ''
+
def check_compiler_gcc4(cmd):
"""Return True if the C compiler is GCC 4.x."""
cmd._check_compiler()
- body = """
-int
-main()
-{
-#if (! defined __GNUC__) || (__GNUC__ < 4)
-#error gcc >= 4 required
-#endif
- return 0;
-}
-"""
+ body = textwrap.dedent("""
+ int
+ main()
+ {
+ #if (! defined __GNUC__) || (__GNUC__ < 4)
+ #error gcc >= 4 required
+ #endif
+ return 0;
+ }
+ """)
return cmd.try_compile(body, None, None)
def check_gcc_function_attribute(cmd, attribute, name):
"""Return True if the given function attribute is supported."""
cmd._check_compiler()
- body = """
-#pragma GCC diagnostic error "-Wattributes"
-#pragma clang diagnostic error "-Wattributes"
-
-int %s %s(void*);
-
-int
-main()
-{
- return 0;
-}
-""" % (attribute, name)
+ body = textwrap.dedent("""
+ #pragma GCC diagnostic error "-Wattributes"
+ #pragma clang diagnostic error "-Wattributes"
+
+ int %s %s(void*);
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (attribute, name)
return cmd.try_compile(body, None, None) != 0
+
+def check_gcc_function_attribute_with_intrinsics(cmd, attribute, name, code,
+ include):
+ """Return True if the given function attribute is supported with
+ intrinsics."""
+ cmd._check_compiler()
+ body = textwrap.dedent("""
+ #include<%s>
+ int %s %s(void)
+ {
+ %s;
+ return 0;
+ }
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (include, attribute, name, code)
+ return cmd.try_compile(body, None, None) != 0
+
+
def check_gcc_variable_attribute(cmd, attribute):
"""Return True if the given variable attribute is supported."""
cmd._check_compiler()
- body = """
-#pragma GCC diagnostic error "-Wattributes"
-#pragma clang diagnostic error "-Wattributes"
-
-int %s foo;
-
-int
-main()
-{
- return 0;
-}
-""" % (attribute, )
+ body = textwrap.dedent("""
+ #pragma GCC diagnostic error "-Wattributes"
+ #pragma clang diagnostic error "-Wattributes"
+
+ int %s foo;
+
+ int
+ main()
+ {
+ return 0;
+ }
+ """) % (attribute, )
return cmd.try_compile(body, None, None) != 0
diff --git a/numpy/distutils/command/build.py b/numpy/distutils/command/build.py
index 3d7101582..5a9da1217 100644
--- a/numpy/distutils/command/build.py
+++ b/numpy/distutils/command/build.py
@@ -16,8 +16,8 @@ class build(old_build):
user_options = old_build.user_options + [
('fcompiler=', None,
"specify the Fortran compiler type"),
- ('parallel=', 'j',
- "number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
help_options = old_build.help_options + [
@@ -28,17 +28,12 @@ class build(old_build):
def initialize_options(self):
old_build.initialize_options(self)
self.fcompiler = None
- self.parallel = None
+ self.warn_error = False
def finalize_options(self):
- if self.parallel:
- try:
- self.parallel = int(self.parallel)
- except ValueError:
- raise ValueError("--parallel/-j argument must be an integer")
build_scripts = self.build_scripts
old_build.finalize_options(self)
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
if build_scripts is None:
self.build_scripts = os.path.join(self.build_base,
'scripts' + plat_specifier)
diff --git a/numpy/distutils/command/build_clib.py b/numpy/distutils/command/build_clib.py
index 910493a77..13edf0717 100644
--- a/numpy/distutils/command/build_clib.py
+++ b/numpy/distutils/command/build_clib.py
@@ -33,15 +33,18 @@ class build_clib(old_build_clib):
('inplace', 'i', 'Build in-place'),
('parallel=', 'j',
"number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
- boolean_options = old_build_clib.boolean_options + ['inplace']
+ boolean_options = old_build_clib.boolean_options + ['inplace', 'warn-error']
def initialize_options(self):
old_build_clib.initialize_options(self)
self.fcompiler = None
self.inplace = 0
self.parallel = None
+ self.warn_error = None
def finalize_options(self):
if self.parallel:
@@ -50,7 +53,10 @@ class build_clib(old_build_clib):
except ValueError:
raise ValueError("--parallel/-j argument must be an integer")
old_build_clib.finalize_options(self)
- self.set_undefined_options('build', ('parallel', 'parallel'))
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ )
def have_f_sources(self):
for (lib_name, build_info) in self.libraries:
@@ -86,6 +92,10 @@ class build_clib(old_build_clib):
self.compiler.customize(self.distribution,
need_cxx=self.have_cxx_sources())
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
libraries = self.libraries
self.libraries = None
self.compiler.customize_cmd(self)
diff --git a/numpy/distutils/command/build_ext.py b/numpy/distutils/command/build_ext.py
index 18d36480a..cd9b1c6f1 100644
--- a/numpy/distutils/command/build_ext.py
+++ b/numpy/distutils/command/build_ext.py
@@ -33,6 +33,8 @@ class build_ext (old_build_ext):
"specify the Fortran compiler type"),
('parallel=', 'j',
"number of parallel jobs"),
+ ('warn-error', None,
+ "turn all warnings into errors (-Werror)"),
]
help_options = old_build_ext.help_options + [
@@ -40,10 +42,13 @@ class build_ext (old_build_ext):
show_fortran_compilers),
]
+ boolean_options = old_build_ext.boolean_options + ['warn-error']
+
def initialize_options(self):
old_build_ext.initialize_options(self)
self.fcompiler = None
self.parallel = None
+ self.warn_error = None
def finalize_options(self):
if self.parallel:
@@ -69,7 +74,10 @@ class build_ext (old_build_ext):
self.include_dirs.extend(incl_dirs)
old_build_ext.finalize_options(self)
- self.set_undefined_options('build', ('parallel', 'parallel'))
+ self.set_undefined_options('build',
+ ('parallel', 'parallel'),
+ ('warn_error', 'warn_error'),
+ )
def run(self):
if not self.extensions:
@@ -116,6 +124,11 @@ class build_ext (old_build_ext):
force=self.force)
self.compiler.customize(self.distribution)
self.compiler.customize_cmd(self)
+
+ if self.warn_error:
+ self.compiler.compiler.append('-Werror')
+ self.compiler.compiler_so.append('-Werror')
+
self.compiler.show_customization()
# Setup directory for storing generated extra DLL files on Windows
@@ -265,10 +278,10 @@ class build_ext (old_build_ext):
# we blindly assume that both packages need all of the libraries,
# resulting in a larger wheel than is required. This should be fixed,
# but it's so rare that I won't bother to handle it.
- pkg_roots = set(
+ pkg_roots = {
self.get_ext_fullname(ext.name).split('.')[0]
for ext in self.extensions
- )
+ }
for pkg_root in pkg_roots:
shared_lib_dir = os.path.join(pkg_root, '.libs')
if not self.inplace:
@@ -281,8 +294,8 @@ class build_ext (old_build_ext):
runtime_lib = os.path.join(self.extra_dll_dir, fn)
copy_file(runtime_lib, shared_lib_dir)
- def swig_sources(self, sources):
- # Do nothing. Swig sources have beed handled in build_src command.
+ def swig_sources(self, sources, extensions=None):
+ # Do nothing. Swig sources have been handled in build_src command.
return sources
def build_extension(self, ext):
diff --git a/numpy/distutils/command/build_src.py b/numpy/distutils/command/build_src.py
index 9def37822..3e0522c5f 100644
--- a/numpy/distutils/command/build_src.py
+++ b/numpy/distutils/command/build_src.py
@@ -28,20 +28,14 @@ def subst_vars(target, source, d):
"""Substitute any occurrence of @foo@ by d['foo'] from source file into
target."""
var = re.compile('@([a-zA-Z_]+)@')
- fs = open(source, 'r')
- try:
- ft = open(target, 'w')
- try:
+ with open(source, 'r') as fs:
+ with open(target, 'w') as ft:
for l in fs:
m = var.search(l)
if m:
ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))
else:
ft.write(l)
- finally:
- ft.close()
- finally:
- fs.close()
class build_src(build_ext.build_ext):
@@ -59,9 +53,12 @@ class build_src(build_ext.build_ext):
('inplace', 'i',
"ignore build-lib and put compiled extensions into the source " +
"directory alongside your pure Python modules"),
+ ('verbose-cfg', None,
+ "change logging level from WARN to INFO which will show all " +
+ "compiler output")
]
- boolean_options = ['force', 'inplace']
+ boolean_options = ['force', 'inplace', 'verbose-cfg']
help_options = []
@@ -82,6 +79,7 @@ class build_src(build_ext.build_ext):
self.swig_opts = None
self.swig_cpp = None
self.swig = None
+ self.verbose_cfg = None
def finalize_options(self):
self.set_undefined_options('build',
@@ -96,7 +94,7 @@ class build_src(build_ext.build_ext):
self.data_files = self.distribution.data_files or []
if self.build_src is None:
- plat_specifier = ".%s-%s" % (get_platform(), sys.version[0:3])
+ plat_specifier = ".{}-{}.{}".format(get_platform(), *sys.version_info[:2])
self.build_src = os.path.join(self.build_base, 'src'+plat_specifier)
# py_modules_dict is used in build_py.find_package_modules
@@ -204,7 +202,6 @@ class build_src(build_ext.build_ext):
def _build_npy_pkg_config(self, info, gd):
- import shutil
template, install_dir, subst_dict = info
template_dir = os.path.dirname(template)
for k, v in gd.items():
@@ -239,7 +236,6 @@ class build_src(build_ext.build_ext):
if not install_cmd.finalized == 1:
install_cmd.finalize_options()
build_npkg = False
- gd = {}
if self.inplace == 1:
top_prefix = '.'
build_npkg = True
@@ -370,9 +366,16 @@ class build_src(build_ext.build_ext):
# incl_dirs = extension.include_dirs
#if self.build_src not in incl_dirs:
# incl_dirs.append(self.build_src)
- build_dir = os.path.join(*([self.build_src]\
+ build_dir = os.path.join(*([self.build_src]
+name.split('.')[:-1]))
self.mkpath(build_dir)
+
+ if self.verbose_cfg:
+ new_level = log.INFO
+ else:
+ new_level = log.WARN
+ old_level = log.set_threshold(new_level)
+
for func in func_sources:
source = func(extension, build_dir)
if not source:
@@ -383,7 +386,7 @@ class build_src(build_ext.build_ext):
else:
log.info(" adding '%s' to sources." % (source,))
new_sources.append(source)
-
+ log.set_threshold(old_level)
return new_sources
def filter_py_files(self, sources):
@@ -427,9 +430,8 @@ class build_src(build_ext.build_ext):
else:
log.info("conv_template:> %s" % (target_file))
outstr = process_c_file(source)
- fid = open(target_file, 'w')
- fid.write(outstr)
- fid.close()
+ with open(target_file, 'w') as fid:
+ fid.write(outstr)
if _header_ext_match(target_file):
d = os.path.dirname(target_file)
if d not in include_dirs:
@@ -549,7 +551,7 @@ class build_src(build_ext.build_ext):
if is_sequence(extension):
name = extension[0]
else: name = extension.name
- target_dir = os.path.join(*([self.build_src]\
+ target_dir = os.path.join(*([self.build_src]
+name.split('.')[:-1]))
target_file = os.path.join(target_dir, ext_name + 'module.c')
new_sources.append(target_file)
@@ -725,25 +727,23 @@ _has_c_header = re.compile(r'-[*]-\s*c\s*-[*]-', re.I).search
_has_cpp_header = re.compile(r'-[*]-\s*c[+][+]\s*-[*]-', re.I).search
def get_swig_target(source):
- f = open(source, 'r')
- result = None
- line = f.readline()
- if _has_cpp_header(line):
- result = 'c++'
- if _has_c_header(line):
- result = 'c'
- f.close()
+ with open(source, 'r') as f:
+ result = None
+ line = f.readline()
+ if _has_cpp_header(line):
+ result = 'c++'
+ if _has_c_header(line):
+ result = 'c'
return result
def get_swig_modulename(source):
- f = open(source, 'r')
- name = None
- for line in f:
- m = _swig_module_name_match(line)
- if m:
- name = m.group('name')
- break
- f.close()
+ with open(source, 'r') as f:
+ name = None
+ for line in f:
+ m = _swig_module_name_match(line)
+ if m:
+ name = m.group('name')
+ break
return name
def _find_swig_target(target_dir, name):
@@ -762,15 +762,14 @@ _f2py_user_module_name_match = re.compile(r'\s*python\s*module\s*(?P<name>[\w_]*
def get_f2py_modulename(source):
name = None
- f = open(source)
- for line in f:
- m = _f2py_module_name_match(line)
- if m:
- if _f2py_user_module_name_match(line): # skip *__user__* names
- continue
- name = m.group('name')
- break
- f.close()
+ with open(source) as f:
+ for line in f:
+ m = _f2py_module_name_match(line)
+ if m:
+ if _f2py_user_module_name_match(line): # skip *__user__* names
+ continue
+ name = m.group('name')
+ break
return name
##########################################
diff --git a/numpy/distutils/command/config.py b/numpy/distutils/command/config.py
index 6b904d6ef..b9f2fa76e 100644
--- a/numpy/distutils/command/config.py
+++ b/numpy/distutils/command/config.py
@@ -8,6 +8,7 @@ import os, signal
import warnings
import sys
import subprocess
+import textwrap
from distutils.command.config import config as old_config
from distutils.command.config import LANG_EXT
@@ -18,6 +19,7 @@ import distutils
from numpy.distutils.exec_command import filepath_from_subprocess_output
from numpy.distutils.mingw32ccompiler import generate_manifest
from numpy.distutils.command.autodist import (check_gcc_function_attribute,
+ check_gcc_function_attribute_with_intrinsics,
check_gcc_variable_attribute,
check_inline,
check_restrict,
@@ -52,18 +54,18 @@ class config(old_config):
self.compiler.initialize()
except IOError:
e = get_exception()
- msg = """\
-Could not initialize compiler instance: do you have Visual Studio
-installed? If you are trying to build with MinGW, please use "python setup.py
-build -c mingw32" instead. If you have Visual Studio installed, check it is
-correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
-VS 2010 for >= 3.3).
-
-Original exception was: %s, and the Compiler class was %s
-============================================================================""" \
+ msg = textwrap.dedent("""\
+ Could not initialize compiler instance: do you have Visual Studio
+ installed? If you are trying to build with MinGW, please use "python setup.py
+ build -c mingw32" instead. If you have Visual Studio installed, check it is
+ correctly installed, and the right version (VS 2008 for python 2.6, 2.7 and 3.2,
+ VS 2010 for >= 3.3).
+
+ Original exception was: %s, and the Compiler class was %s
+ ============================================================================""") \
% (e, self.compiler.__class__.__name__)
- print ("""\
-============================================================================""")
+ print(textwrap.dedent("""\
+ ============================================================================"""))
raise distutils.errors.DistutilsPlatformError(msg)
# After MSVC is initialized, add an explicit /MANIFEST to linker
@@ -95,7 +97,7 @@ Original exception was: %s, and the Compiler class was %s
try:
ret = mth(*((self,)+args))
except (DistutilsExecError, CompileError):
- msg = str(get_exception())
+ str(get_exception())
self.compiler = save_compiler
raise CompileError
self.compiler = save_compiler
@@ -172,31 +174,31 @@ Original exception was: %s, and the Compiler class was %s
def check_decl(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
- body = """
-int main(void)
-{
-#ifndef %s
- (void) %s;
-#endif
- ;
- return 0;
-}""" % (symbol, symbol)
+ body = textwrap.dedent("""
+ int main(void)
+ {
+ #ifndef %s
+ (void) %s;
+ #endif
+ ;
+ return 0;
+ }""") % (symbol, symbol)
return self.try_compile(body, headers, include_dirs)
def check_macro_true(self, symbol,
headers=None, include_dirs=None):
self._check_compiler()
- body = """
-int main(void)
-{
-#if %s
-#else
-#error false or undefined macro
-#endif
- ;
- return 0;
-}""" % (symbol,)
+ body = textwrap.dedent("""
+ int main(void)
+ {
+ #if %s
+ #else
+ #error false or undefined macro
+ #endif
+ ;
+ return 0;
+ }""") % (symbol,)
return self.try_compile(body, headers, include_dirs)
@@ -207,14 +209,14 @@ int main(void)
self._check_compiler()
# First check the type can be compiled
- body = r"""
-int main(void) {
- if ((%(name)s *) 0)
- return 0;
- if (sizeof (%(name)s))
- return 0;
-}
-""" % {'name': type_name}
+ body = textwrap.dedent(r"""
+ int main(void) {
+ if ((%(name)s *) 0)
+ return 0;
+ if (sizeof (%(name)s))
+ return 0;
+ }
+ """) % {'name': type_name}
st = False
try:
@@ -234,33 +236,33 @@ int main(void) {
self._check_compiler()
# First check the type can be compiled
- body = r"""
-typedef %(type)s npy_check_sizeof_type;
-int main (void)
-{
- static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
- test_array [0] = 0
-
- ;
- return 0;
-}
-"""
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) >= 0)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
self._compile(body % {'type': type_name},
headers, include_dirs, 'c')
self._clean()
if expected:
- body = r"""
-typedef %(type)s npy_check_sizeof_type;
-int main (void)
-{
- static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
- test_array [0] = 0
-
- ;
- return 0;
-}
-"""
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) == %(size)s)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
for size in expected:
try:
self._compile(body % {'type': type_name, 'size': size},
@@ -271,17 +273,17 @@ int main (void)
pass
# this fails to *compile* if size > sizeof(type)
- body = r"""
-typedef %(type)s npy_check_sizeof_type;
-int main (void)
-{
- static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
- test_array [0] = 0
-
- ;
- return 0;
-}
-"""
+ body = textwrap.dedent(r"""
+ typedef %(type)s npy_check_sizeof_type;
+ int main (void)
+ {
+ static int test_array [1 - 2 * !(((long) (sizeof (npy_check_sizeof_type))) <= %(size)s)];
+ test_array [0] = 0
+
+ ;
+ return 0;
+ }
+ """)
# The principle is simple: we first find low and high bounds of size
# for the type, where low/high are looked up on a log scale. Then, we
@@ -424,6 +426,11 @@ int main (void)
def check_gcc_function_attribute(self, attribute, name):
return check_gcc_function_attribute(self, attribute, name)
+ def check_gcc_function_attribute_with_intrinsics(self, attribute, name,
+ code, include):
+ return check_gcc_function_attribute_with_intrinsics(self, attribute,
+ name, code, include)
+
def check_gcc_variable_attribute(self, attribute):
return check_gcc_variable_attribute(self, attribute)
@@ -435,10 +442,10 @@ int main (void)
of the program and its output.
"""
# 2008-11-16, RemoveMe
- warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n" \
- "Usage of get_output is deprecated: please do not \n" \
- "use it anymore, and avoid configuration checks \n" \
- "involving running executable on the target machine.\n" \
+ warnings.warn("\n+++++++++++++++++++++++++++++++++++++++++++++++++\n"
+ "Usage of get_output is deprecated: please do not \n"
+ "use it anymore, and avoid configuration checks \n"
+ "involving running executable on the target machine.\n"
"+++++++++++++++++++++++++++++++++++++++++++++++++\n",
DeprecationWarning, stacklevel=2)
self._check_compiler()
diff --git a/numpy/distutils/command/install.py b/numpy/distutils/command/install.py
index a1dd47755..c74ae9446 100644
--- a/numpy/distutils/command/install.py
+++ b/numpy/distutils/command/install.py
@@ -64,16 +64,15 @@ class install(old_install):
# bdist_rpm fails when INSTALLED_FILES contains
# paths with spaces. Such paths must be enclosed
# with double-quotes.
- f = open(self.record, 'r')
- lines = []
- need_rewrite = False
- for l in f:
- l = l.rstrip()
- if ' ' in l:
- need_rewrite = True
- l = '"%s"' % (l)
- lines.append(l)
- f.close()
+ with open(self.record, 'r') as f:
+ lines = []
+ need_rewrite = False
+ for l in f:
+ l = l.rstrip()
+ if ' ' in l:
+ need_rewrite = True
+ l = '"%s"' % (l)
+ lines.append(l)
if need_rewrite:
self.execute(write_file,
(self.record, lines),
diff --git a/numpy/distutils/command/install_clib.py b/numpy/distutils/command/install_clib.py
index 662aa00bd..6a73f7e33 100644
--- a/numpy/distutils/command/install_clib.py
+++ b/numpy/distutils/command/install_clib.py
@@ -19,6 +19,9 @@ class install_clib(Command):
def run (self):
build_clib_cmd = get_cmd("build_clib")
+ if not build_clib_cmd.build_clib:
+ # can happen if the user specified `--skip-build`
+ build_clib_cmd.finalize_options()
build_dir = build_clib_cmd.build_clib
# We need the compiler to get the library name -> filename association
diff --git a/numpy/distutils/conv_template.py b/numpy/distutils/conv_template.py
index 4a8746236..3bcb7b884 100644
--- a/numpy/distutils/conv_template.py
+++ b/numpy/distutils/conv_template.py
@@ -206,10 +206,8 @@ def parse_loop_header(loophead) :
dlist = []
if nsub is None :
raise ValueError("No substitution variables found")
- for i in range(nsub) :
- tmp = {}
- for name, vals in names :
- tmp[name] = vals[i]
+ for i in range(nsub):
+ tmp = {name: vals[i] for name, vals in names}
dlist.append(tmp)
return dlist
@@ -269,22 +267,21 @@ include_src_re = re.compile(r"(\n|\A)#include\s*['\"]"
def resolve_includes(source):
d = os.path.dirname(source)
- fid = open(source)
- lines = []
- for line in fid:
- m = include_src_re.match(line)
- if m:
- fn = m.group('name')
- if not os.path.isabs(fn):
- fn = os.path.join(d, fn)
- if os.path.isfile(fn):
- print('Including file', fn)
- lines.extend(resolve_includes(fn))
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ print('Including file', fn)
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
else:
lines.append(line)
- else:
- lines.append(line)
- fid.close()
return lines
def process_file(source):
@@ -333,6 +330,7 @@ def main():
except ValueError:
e = get_exception()
raise ValueError("In %s loop at %s" % (file, e))
+
outfile.write(writestr)
if __name__ == "__main__":
diff --git a/numpy/distutils/cpuinfo.py b/numpy/distutils/cpuinfo.py
index 580299347..bc9728335 100644
--- a/numpy/distutils/cpuinfo.py
+++ b/numpy/distutils/cpuinfo.py
@@ -242,16 +242,16 @@ class LinuxCPUInfo(CPUInfoBase):
return self.is_PentiumIV() and self.has_sse3()
def _is_Nocona(self):
- return self.is_Intel() \
- and (self.info[0]['cpu family'] == '6' \
- or self.info[0]['cpu family'] == '15' ) \
- and (self.has_sse3() and not self.has_ssse3())\
- and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None
+ return (self.is_Intel()
+ and (self.info[0]['cpu family'] == '6'
+ or self.info[0]['cpu family'] == '15')
+ and (self.has_sse3() and not self.has_ssse3())
+ and re.match(r'.*?\blm\b', self.info[0]['flags']) is not None)
def _is_Core2(self):
- return self.is_64bit() and self.is_Intel() and \
- re.match(r'.*?Core\(TM\)2\b', \
- self.info[0]['model name']) is not None
+ return (self.is_64bit() and self.is_Intel() and
+ re.match(r'.*?Core\(TM\)2\b',
+ self.info[0]['model name']) is not None)
def _is_Itanium(self):
return re.match(r'.*?Itanium\b',
@@ -632,13 +632,13 @@ class Win32CPUInfo(CPUInfoBase):
def _has_sse(self):
if self.is_Intel():
- return (self.info[0]['Family']==6 and \
- self.info[0]['Model'] in [7, 8, 9, 10, 11]) \
- or self.info[0]['Family']==15
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [7, 8, 9, 10, 11])
+ or self.info[0]['Family']==15)
elif self.is_AMD():
- return (self.info[0]['Family']==6 and \
- self.info[0]['Model'] in [6, 7, 8, 10]) \
- or self.info[0]['Family']==15
+ return ((self.info[0]['Family']==6 and
+ self.info[0]['Model'] in [6, 7, 8, 10])
+ or self.info[0]['Family']==15)
else:
return False
diff --git a/numpy/distutils/exec_command.py b/numpy/distutils/exec_command.py
index af7810d75..712f22666 100644
--- a/numpy/distutils/exec_command.py
+++ b/numpy/distutils/exec_command.py
@@ -57,6 +57,7 @@ import os
import sys
import subprocess
import locale
+import warnings
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
@@ -67,8 +68,10 @@ def filepath_from_subprocess_output(output):
Inherited from `exec_command`, and possibly incorrect.
"""
- output = output.decode(locale.getpreferredencoding(False),
- errors='replace')
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ output = output.decode(mylocale, errors='replace')
output = output.replace('\r\n', '\n')
# Another historical oddity
if output[-1:] == '\n':
@@ -79,7 +82,33 @@ def filepath_from_subprocess_output(output):
output = output.encode('ascii', errors='replace')
return output
+
+def forward_bytes_to_stdout(val):
+ """
+ Forward bytes from a subprocess call to the console, without attempting to
+ decode them.
+
+ The assumption is that the subprocess call already returned bytes in
+ a suitable encoding.
+ """
+ if sys.version_info.major < 3:
+ # python 2 has binary output anyway
+ sys.stdout.write(val)
+ elif hasattr(sys.stdout, 'buffer'):
+ # use the underlying binary output if there is one
+ sys.stdout.buffer.write(val)
+ elif hasattr(sys.stdout, 'encoding'):
+ # round-trip the encoding if necessary
+ sys.stdout.write(val.decode(sys.stdout.encoding))
+ else:
+ # make a best-guess at the encoding
+ sys.stdout.write(val.decode('utf8', errors='replace'))
+
+
def temp_file_name():
+ # 2019-01-30, 1.17
+ warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
+ 'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
fo, name = make_temp_file()
fo.close()
return name
@@ -146,9 +175,7 @@ def find_executable(exe, path=None, _cache={}):
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
- env = {}
- for name in names:
- env[name] = os.environ.get(name)
+ env = {name: os.environ.get(name) for name in names}
return env
def _update_environment( **env ):
@@ -156,24 +183,14 @@ def _update_environment( **env ):
for name, value in env.items():
os.environ[name] = value or ''
-def _supports_fileno(stream):
- """
- Returns True if 'stream' supports the file descriptor and allows fileno().
- """
- if hasattr(stream, 'fileno'):
- try:
- stream.fileno()
- return True
- except IOError:
- return False
- else:
- return False
-
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
+ .. deprecated:: 1.17
+ Use subprocess.Popen instead
+
Parameters
----------
command : str
@@ -197,7 +214,10 @@ def exec_command(command, execute_in='', use_shell=None, use_tee=None,
Wild cards will not work for non-posix systems or when use_shell=0.
"""
- log.debug('exec_command(%r,%s)' % (command,\
+ # 2019-01-30, 1.17
+ warnings.warn('exec_command is deprecated since NumPy v1.17, use '
+ 'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
+ log.debug('exec_command(%r,%s)' % (command,
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
@@ -278,9 +298,10 @@ def _exec_command(command, use_shell=None, use_tee = None, **env):
return 127, ''
text, err = proc.communicate()
- text = text.decode(locale.getpreferredencoding(False),
- errors='replace')
-
+ mylocale = locale.getpreferredencoding(False)
+ if mylocale is None:
+ mylocale = 'ascii'
+ text = text.decode(mylocale, errors='replace')
text = text.replace('\r\n', '\n')
# Another historical oddity
if text[-1:] == '\n':
diff --git a/numpy/distutils/extension.py b/numpy/distutils/extension.py
index 935f3eec9..872bd5362 100644
--- a/numpy/distutils/extension.py
+++ b/numpy/distutils/extension.py
@@ -19,8 +19,24 @@ if sys.version_info[0] >= 3:
cxx_ext_re = re.compile(r'.*[.](cpp|cxx|cc)\Z', re.I).match
fortran_pyf_ext_re = re.compile(r'.*[.](f90|f95|f77|for|ftn|f|pyf)\Z', re.I).match
+
class Extension(old_Extension):
- def __init__ (
+ """
+ Parameters
+ ----------
+ name : str
+ Extension name.
+ sources : list of str
+ List of source file locations relative to the top directory of
+ the package.
+ extra_compile_args : list of str
+ Extra command line arguments to pass to the compiler.
+ extra_f77_compile_args : list of str
+ Extra command line arguments to pass to the fortran77 compiler.
+ extra_f90_compile_args : list of str
+ Extra command line arguments to pass to the fortran90 compiler.
+ """
+ def __init__(
self, name, sources,
include_dirs=None,
define_macros=None,
diff --git a/numpy/distutils/fcompiler/__init__.py b/numpy/distutils/fcompiler/__init__.py
index 3bd8057b4..3723470f3 100644
--- a/numpy/distutils/fcompiler/__init__.py
+++ b/numpy/distutils/fcompiler/__init__.py
@@ -37,6 +37,7 @@ from numpy.distutils.misc_util import is_string, all_strings, is_sequence, \
make_temp_file, get_shared_lib_extension
from numpy.distutils.exec_command import find_executable
from numpy.distutils.compat import get_exception
+from numpy.distutils import _shell_utils
from .environment import EnvironmentConfig
@@ -361,7 +362,7 @@ class FCompiler(CCompiler):
set_exe('archiver')
set_exe('ranlib')
- def update_executables(elf):
+ def update_executables(self):
"""Called at the beginning of customisation. Subclasses should
override this if they need to set up the executables dictionary.
@@ -474,13 +475,23 @@ class FCompiler(CCompiler):
fixflags = []
if f77:
+ f77 = _shell_utils.NativeParser.split(f77)
f77flags = self.flag_vars.f77
if f90:
+ f90 = _shell_utils.NativeParser.split(f90)
f90flags = self.flag_vars.f90
freeflags = self.flag_vars.free
# XXX Assuming that free format is default for f90 compiler.
fix = self.command_vars.compiler_fix
+ # NOTE: this and similar examples are probably just
+ # excluding --coverage flag when F90 = gfortran --coverage
+ # instead of putting that flag somewhere more appropriate
+ # this and similar examples where a Fortran compiler
+ # environment variable has been customized by CI or a user
+ # should perhaps eventually be more thoroughly tested and more
+ # robustly handled
if fix:
+ fix = _shell_utils.NativeParser.split(fix)
fixflags = self.flag_vars.fix + f90flags
oflags, aflags, dflags = [], [], []
@@ -506,11 +517,11 @@ class FCompiler(CCompiler):
fflags = self.flag_vars.flags + dflags + oflags + aflags
if f77:
- self.set_commands(compiler_f77=[f77]+f77flags+fflags)
+ self.set_commands(compiler_f77=f77+f77flags+fflags)
if f90:
- self.set_commands(compiler_f90=[f90]+freeflags+f90flags+fflags)
+ self.set_commands(compiler_f90=f90+freeflags+f90flags+fflags)
if fix:
- self.set_commands(compiler_fix=[fix]+fixflags+fflags)
+ self.set_commands(compiler_fix=fix+fixflags+fflags)
#XXX: Do we need LDSHARED->SOSHARED, LDFLAGS->SOFLAGS
diff --git a/numpy/distutils/fcompiler/absoft.py b/numpy/distutils/fcompiler/absoft.py
index 2c3edfe02..d14fee0e1 100644
--- a/numpy/distutils/fcompiler/absoft.py
+++ b/numpy/distutils/fcompiler/absoft.py
@@ -66,7 +66,7 @@ class AbsoftFCompiler(FCompiler):
def library_dir_option(self, dir):
if os.name=='nt':
- return ['-link', '/PATH:"%s"' % (dir)]
+ return ['-link', '/PATH:%s' % (dir)]
return "-L" + dir
def library_option(self, lib):
diff --git a/numpy/distutils/fcompiler/compaq.py b/numpy/distutils/fcompiler/compaq.py
index 07d502706..671b3a55f 100644
--- a/numpy/distutils/fcompiler/compaq.py
+++ b/numpy/distutils/fcompiler/compaq.py
@@ -95,7 +95,7 @@ class CompaqVisualFCompiler(FCompiler):
raise e
except ValueError:
e = get_exception()
- if not "path']" in str(e):
+ if not "'path'" in str(e):
print("Unexpected ValueError in", __file__)
raise e
diff --git a/numpy/distutils/fcompiler/environment.py b/numpy/distutils/fcompiler/environment.py
index 489784580..bb362d483 100644
--- a/numpy/distutils/fcompiler/environment.py
+++ b/numpy/distutils/fcompiler/environment.py
@@ -1,6 +1,7 @@
from __future__ import division, absolute_import, print_function
import os
+import warnings
from distutils.dist import Distribution
__metaclass__ = type
@@ -50,20 +51,30 @@ class EnvironmentConfig(object):
def _get_var(self, name, conf_desc):
hook, envvar, confvar, convert, append = conf_desc
+ if convert is None:
+ convert = lambda x: x
var = self._hook_handler(name, hook)
if envvar is not None:
envvar_contents = os.environ.get(envvar)
if envvar_contents is not None:
- if var and append and os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '0') == '1':
- var = var + [envvar_contents]
+ envvar_contents = convert(envvar_contents)
+ if var and append:
+ if os.environ.get('NPY_DISTUTILS_APPEND_FLAGS', '1') == '1':
+ var.extend(envvar_contents)
+ else:
+ # NPY_DISTUTILS_APPEND_FLAGS was explicitly set to 0
+ # to keep old (overwrite flags rather than append to
+ # them) behavior
+ var = envvar_contents
else:
var = envvar_contents
if confvar is not None and self._conf:
- var = self._conf.get(confvar, (None, var))[1]
- if convert is not None:
- var = convert(var)
+ if confvar in self._conf:
+ source, confvar_contents = self._conf[confvar]
+ var = convert(confvar_contents)
return var
+
def clone(self, hook_handler):
ec = self.__class__(distutils_section=self._distutils_section,
**self._conf_keys)
diff --git a/numpy/distutils/fcompiler/gnu.py b/numpy/distutils/fcompiler/gnu.py
index f151809c7..965c67041 100644
--- a/numpy/distutils/fcompiler/gnu.py
+++ b/numpy/distutils/fcompiler/gnu.py
@@ -265,8 +265,15 @@ class GnuFCompiler(FCompiler):
return []
def runtime_library_dir_option(self, dir):
+ if sys.platform[:3] == 'aix' or sys.platform == 'win32':
+ # Linux/Solaris/Unix support RPATH, Windows and AIX do not
+ raise NotImplementedError
+
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
sep = ',' if sys.platform == 'darwin' else '='
- return '-Wl,-rpath%s"%s"' % (sep, dir)
+ return '-Wl,-rpath%s%s' % (sep, dir)
class Gnu95FCompiler(GnuFCompiler):
@@ -311,6 +318,12 @@ class Gnu95FCompiler(GnuFCompiler):
module_dir_switch = '-J'
module_include_switch = '-I'
+ if sys.platform[:3] == 'aix':
+ executables['linker_so'].append('-lpthread')
+ if platform.architecture()[0][:2] == '64':
+ for key in ['compiler_f77', 'compiler_f90','compiler_fix','linker_so', 'linker_exe']:
+ executables[key].append('-maix64')
+
g2c = 'gfortran'
def _universal_flags(self, cmd):
diff --git a/numpy/distutils/fcompiler/ibm.py b/numpy/distutils/fcompiler/ibm.py
index c4cb2fca7..70d2132e1 100644
--- a/numpy/distutils/fcompiler/ibm.py
+++ b/numpy/distutils/fcompiler/ibm.py
@@ -78,15 +78,14 @@ class IBMFCompiler(FCompiler):
xlf_cfg = '/etc/opt/ibmcmp/xlf/%s/xlf.cfg' % version
fo, new_cfg = make_temp_file(suffix='_xlf.cfg')
log.info('Creating '+new_cfg)
- fi = open(xlf_cfg, 'r')
- crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
- for line in fi:
- m = crt1_match(line)
- if m:
- fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
- else:
- fo.write(line)
- fi.close()
+ with open(xlf_cfg, 'r') as fi:
+ crt1_match = re.compile(r'\s*crt\s*[=]\s*(?P<path>.*)/crt1.o').match
+ for line in fi:
+ m = crt1_match(line)
+ if m:
+ fo.write('crt = %s/bundle1.o\n' % (m.group('path')))
+ else:
+ fo.write(line)
fo.close()
opt.append('-F'+new_cfg)
return opt
diff --git a/numpy/distutils/fcompiler/intel.py b/numpy/distutils/fcompiler/intel.py
index 217eac8fb..51f681274 100644
--- a/numpy/distutils/fcompiler/intel.py
+++ b/numpy/distutils/fcompiler/intel.py
@@ -23,7 +23,10 @@ class BaseIntelFCompiler(FCompiler):
f + '.f', '-o', f + '.o']
def runtime_library_dir_option(self, dir):
- return '-Wl,-rpath="%s"' % dir
+ # TODO: could use -Xlinker here, if it's supported
+ assert "," not in dir
+
+ return '-Wl,-rpath=%s' % dir
class IntelFCompiler(BaseIntelFCompiler):
diff --git a/numpy/distutils/fcompiler/pg.py b/numpy/distutils/fcompiler/pg.py
index 99071800a..9c51947fd 100644
--- a/numpy/distutils/fcompiler/pg.py
+++ b/numpy/distutils/fcompiler/pg.py
@@ -33,7 +33,7 @@ class PGroupFCompiler(FCompiler):
'compiler_f77': ["pgfortran"],
'compiler_fix': ["pgfortran", "-Mfixed"],
'compiler_f90': ["pgfortran"],
- 'linker_so': ["pgfortran", "-shared", "-fpic"],
+ 'linker_so': ["pgfortran"],
'archiver': ["ar", "-cr"],
'ranlib': ["ranlib"]
}
@@ -56,8 +56,12 @@ class PGroupFCompiler(FCompiler):
def get_flags_linker_so(self):
return ["-dynamic", '-undefined', 'dynamic_lookup']
+ else:
+ def get_flags_linker_so(self):
+ return ["-shared", '-fpic']
+
def runtime_library_dir_option(self, dir):
- return '-R"%s"' % dir
+ return '-R%s' % dir
if sys.version_info >= (3, 5):
diff --git a/numpy/distutils/fcompiler/sun.py b/numpy/distutils/fcompiler/sun.py
index d477d3308..561ea854f 100644
--- a/numpy/distutils/fcompiler/sun.py
+++ b/numpy/distutils/fcompiler/sun.py
@@ -44,7 +44,7 @@ class SunFCompiler(FCompiler):
return opt
def runtime_library_dir_option(self, dir):
- return '-R"%s"' % dir
+ return '-R%s' % dir
if __name__ == '__main__':
from distutils import log
diff --git a/numpy/distutils/from_template.py b/numpy/distutils/from_template.py
index 65c60c498..c5c1163c6 100644
--- a/numpy/distutils/from_template.py
+++ b/numpy/distutils/from_template.py
@@ -212,22 +212,21 @@ include_src_re = re.compile(r"(\n|\A)\s*include\s*['\"](?P<name>[\w\d./\\]+[.]sr
def resolve_includes(source):
d = os.path.dirname(source)
- fid = open(source)
- lines = []
- for line in fid:
- m = include_src_re.match(line)
- if m:
- fn = m.group('name')
- if not os.path.isabs(fn):
- fn = os.path.join(d, fn)
- if os.path.isfile(fn):
- print('Including file', fn)
- lines.extend(resolve_includes(fn))
+ with open(source) as fid:
+ lines = []
+ for line in fid:
+ m = include_src_re.match(line)
+ if m:
+ fn = m.group('name')
+ if not os.path.isabs(fn):
+ fn = os.path.join(d, fn)
+ if os.path.isfile(fn):
+ print('Including file', fn)
+ lines.extend(resolve_includes(fn))
+ else:
+ lines.append(line)
else:
lines.append(line)
- else:
- lines.append(line)
- fid.close()
return lines
def process_file(source):
@@ -260,5 +259,6 @@ def main():
writestr = process_str(allstr)
outfile.write(writestr)
+
if __name__ == "__main__":
main()
diff --git a/numpy/distutils/info.py b/numpy/distutils/info.py
deleted file mode 100644
index 2f5310665..000000000
--- a/numpy/distutils/info.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""
-Enhanced distutils with Fortran compilers support and more.
-"""
-from __future__ import division, absolute_import, print_function
-
-postpone_import = True
diff --git a/numpy/distutils/line_endings.py b/numpy/distutils/line_endings.py
index 5ecb104ff..fe8fd1b0f 100644
--- a/numpy/distutils/line_endings.py
+++ b/numpy/distutils/line_endings.py
@@ -11,7 +11,8 @@ def dos2unix(file):
print(file, "Directory!")
return
- data = open(file, "rb").read()
+ with open(file, "rb") as fp:
+ data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
@@ -19,9 +20,8 @@ def dos2unix(file):
newdata = re.sub("\r\n", "\n", data)
if newdata != data:
print('dos2unix:', file)
- f = open(file, "wb")
- f.write(newdata)
- f.close()
+ with open(file, "wb") as f:
+ f.write(newdata)
return file
else:
print(file, 'ok')
@@ -45,7 +45,8 @@ def unix2dos(file):
print(file, "Directory!")
return
- data = open(file, "rb").read()
+ with open(file, "rb") as fp:
+ data = fp.read()
if '\0' in data:
print(file, "Binary!")
return
@@ -53,9 +54,8 @@ def unix2dos(file):
newdata = re.sub("\n", "\r\n", newdata)
if newdata != data:
print('unix2dos:', file)
- f = open(file, "wb")
- f.write(newdata)
- f.close()
+ with open(file, "wb") as f:
+ f.write(newdata)
return file
else:
print(file, 'ok')
diff --git a/numpy/distutils/log.py b/numpy/distutils/log.py
index 37f9fe5dd..ff7de86b1 100644
--- a/numpy/distutils/log.py
+++ b/numpy/distutils/log.py
@@ -67,6 +67,8 @@ def set_threshold(level, force=False):
' %s to %s' % (prev_level, level))
return prev_level
+def get_threshold():
+ return _global_log.threshold
def set_verbosity(v, force=False):
prev_level = _global_log.threshold
diff --git a/numpy/distutils/mingw32ccompiler.py b/numpy/distutils/mingw32ccompiler.py
index e6bbe1996..075858cfe 100644
--- a/numpy/distutils/mingw32ccompiler.py
+++ b/numpy/distutils/mingw32ccompiler.py
@@ -13,6 +13,7 @@ import os
import sys
import subprocess
import re
+import textwrap
# Overwrite certain distutils.ccompiler functions:
import numpy.distutils.ccompiler
@@ -29,7 +30,6 @@ else:
import distutils.cygwinccompiler
from distutils.version import StrictVersion
-from numpy.distutils.ccompiler import gen_preprocess_options, gen_lib_options
from distutils.unixccompiler import UnixCCompiler
from distutils.msvccompiler import get_build_version as get_build_msvc_version
from distutils.errors import (DistutilsExecError, CompileError,
@@ -572,21 +572,21 @@ def msvc_manifest_xml(maj, min):
# embedded in the binary...
# This template was copied directly from the python 2.6 binary (using
# strings.exe from mingw on python.exe).
- template = """\
-<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
- <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
- <security>
- <requestedPrivileges>
- <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
- </requestedPrivileges>
- </security>
- </trustInfo>
- <dependency>
- <dependentAssembly>
- <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
- </dependentAssembly>
- </dependency>
-</assembly>"""
+ template = textwrap.dedent("""\
+ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0">
+ <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3">
+ <security>
+ <requestedPrivileges>
+ <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel>
+ </requestedPrivileges>
+ </security>
+ </trustInfo>
+ <dependency>
+ <dependentAssembly>
+ <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity>
+ </dependentAssembly>
+ </dependency>
+ </assembly>""")
return template % {'fullver': fullver, 'maj': maj, 'min': min}
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 073e841e8..7ba8ad862 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -10,10 +10,10 @@ import tempfile
import subprocess
import shutil
import multiprocessing
+import textwrap
import distutils
from distutils.errors import DistutilsError
-from distutils.msvccompiler import get_build_architecture
try:
from threading import local as tlocal
except ImportError:
@@ -219,15 +219,14 @@ def get_mathlibs(path=None):
raise DistutilsError('_numpyconfig.h not found in numpy include '
'dirs %r' % (dirs,))
- fid = open(config_file)
- mathlibs = []
- s = '#define MATHLIB'
- for line in fid:
- if line.startswith(s):
- value = line[len(s):].strip()
- if value:
- mathlibs.extend(value.split(','))
- fid.close()
+ with open(config_file) as fid:
+ mathlibs = []
+ s = '#define MATHLIB'
+ for line in fid:
+ if line.startswith(s):
+ value = line[len(s):].strip()
+ if value:
+ mathlibs.extend(value.split(','))
return mathlibs
def minrelpath(path):
@@ -444,14 +443,13 @@ def _get_f90_modules(source):
if not f90_ext_match(source):
return []
modules = []
- f = open(source, 'r')
- for line in f:
- m = f90_module_name_match(line)
- if m:
- name = m.group('name')
- modules.append(name)
- # break # XXX can we assume that there is one module per file?
- f.close()
+ with open(source, 'r') as f:
+ for line in f:
+ m = f90_module_name_match(line)
+ if m:
+ name = m.group('name')
+ modules.append(name)
+ # break # XXX can we assume that there is one module per file?
return modules
def is_string(s):
@@ -474,7 +472,7 @@ def is_sequence(seq):
return True
def is_glob_pattern(s):
- return is_string(s) and ('*' in s or '?' is s)
+ return is_string(s) and ('*' in s or '?' in s)
def as_list(seq):
if is_sequence(seq):
@@ -861,7 +859,7 @@ class Configuration(object):
print(message)
def warn(self, message):
- sys.stderr.write('Warning: %s' % (message,))
+ sys.stderr.write('Warning: %s\n' % (message,))
def set_options(self, **options):
"""
@@ -1689,10 +1687,44 @@ class Configuration(object):
and will be installed as foo.ini in the 'lib' subpath.
+ When cross-compiling with numpy distutils, it might be necessary to
+ use modified npy-pkg-config files. Using the default/generated files
+ will link with the host libraries (i.e. libnpymath.a). For
+ cross-compilation you of-course need to link with target libraries,
+ while using the host Python installation.
+
+ You can copy out the numpy/core/lib/npy-pkg-config directory, add a
+ pkgdir value to the .ini files and set NPY_PKG_CONFIG_PATH environment
+ variable to point to the directory with the modified npy-pkg-config
+ files.
+
+ Example npymath.ini modified for cross-compilation::
+
+ [meta]
+ Name=npymath
+ Description=Portable, core math library implementing C99 standard
+ Version=0.1
+
+ [variables]
+ pkgname=numpy.core
+ pkgdir=/build/arm-linux-gnueabi/sysroot/usr/lib/python3.7/site-packages/numpy/core
+ prefix=${pkgdir}
+ libdir=${prefix}/lib
+ includedir=${prefix}/include
+
+ [default]
+ Libs=-L${libdir} -lnpymath
+ Cflags=-I${includedir}
+ Requires=mlib
+
+ [msvc]
+ Libs=/LIBPATH:${libdir} npymath.lib
+ Cflags=/INCLUDE:${includedir}
+ Requires=mlib
+
"""
if subst_dict is None:
subst_dict = {}
- basename = os.path.splitext(template)[0]
template = os.path.join(self.package_path, template)
if self.name in self.installed_pkg_config:
@@ -1835,67 +1867,53 @@ class Configuration(object):
def _get_svn_revision(self, path):
"""Return path's SVN revision number.
"""
- revision = None
- m = None
- cwd = os.getcwd()
try:
- os.chdir(path or '.')
- p = subprocess.Popen(['svnversion'], shell=True,
- stdout=subprocess.PIPE, stderr=None,
- close_fds=True)
- sout = p.stdout
- m = re.match(r'(?P<revision>\d+)', sout.read())
- except Exception:
+ output = subprocess.check_output(
+ ['svnversion'], shell=True, cwd=path)
+ except (subprocess.CalledProcessError, OSError):
pass
- os.chdir(cwd)
- if m:
- revision = int(m.group('revision'))
- return revision
+ else:
+ m = re.match(rb'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
if sys.platform=='win32' and os.environ.get('SVN_ASP_DOT_NET_HACK', None):
entries = njoin(path, '_svn', 'entries')
else:
entries = njoin(path, '.svn', 'entries')
if os.path.isfile(entries):
- f = open(entries)
- fstr = f.read()
- f.close()
+ with open(entries) as f:
+ fstr = f.read()
if fstr[:5] == '<?xml': # pre 1.4
m = re.search(r'revision="(?P<revision>\d+)"', fstr)
if m:
- revision = int(m.group('revision'))
+ return int(m.group('revision'))
else: # non-xml entries file --- check to be sure that
m = re.search(r'dir[\n\r]+(?P<revision>\d+)', fstr)
if m:
- revision = int(m.group('revision'))
- return revision
+ return int(m.group('revision'))
+ return None
def _get_hg_revision(self, path):
"""Return path's Mercurial revision number.
"""
- revision = None
- m = None
- cwd = os.getcwd()
try:
- os.chdir(path or '.')
- p = subprocess.Popen(['hg identify --num'], shell=True,
- stdout=subprocess.PIPE, stderr=None,
- close_fds=True)
- sout = p.stdout
- m = re.match(r'(?P<revision>\d+)', sout.read())
- except Exception:
+ output = subprocess.check_output(
+ ['hg identify --num'], shell=True, cwd=path)
+ except (subprocess.CalledProcessError, OSError):
pass
- os.chdir(cwd)
- if m:
- revision = int(m.group('revision'))
- return revision
+ else:
+ m = re.match(rb'(?P<revision>\d+)', output)
+ if m:
+ return int(m.group('revision'))
+
branch_fn = njoin(path, '.hg', 'branch')
branch_cache_fn = njoin(path, '.hg', 'branch.cache')
if os.path.isfile(branch_fn):
branch0 = None
- f = open(branch_fn)
- revision0 = f.read().strip()
- f.close()
+ with open(branch_fn) as f:
+ revision0 = f.read().strip()
branch_map = {}
for line in file(branch_cache_fn, 'r'):
@@ -1908,8 +1926,9 @@ class Configuration(object):
continue
branch_map[branch1] = revision1
- revision = branch_map.get(branch0)
- return revision
+ return branch_map.get(branch0)
+
+ return None
def get_version(self, version_file=None, version_variable=None):
@@ -2007,9 +2026,8 @@ class Configuration(object):
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
- f = open(target, 'w')
- f.write('version = %r\n' % (version))
- f.close()
+ with open(target, 'w') as f:
+ f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
@@ -2048,9 +2066,8 @@ class Configuration(object):
if not os.path.isfile(target):
version = str(revision)
self.info('Creating %s (version=%r)' % (target, version))
- f = open(target, 'w')
- f.write('version = %r\n' % (version))
- f.close()
+ with open(target, 'w') as f:
+ f.write('version = %r\n' % (version))
def rm_file(f=target,p=self.info):
if delete:
@@ -2110,9 +2127,22 @@ def get_numpy_include_dirs():
return include_dirs
def get_npy_pkg_dir():
- """Return the path where to find the npy-pkg-config directory."""
+ """Return the path where to find the npy-pkg-config directory.
+
+ If the NPY_PKG_CONFIG_PATH environment variable is set, the value of that
+ is returned. Otherwise, a path inside the location of the numpy module is
+ returned.
+
+ The NPY_PKG_CONFIG_PATH can be useful when cross-compiling, maintaining
+ customized npy-pkg-config .ini files for the cross-compilation
+ environment, and using them when cross-compiling.
+
+ """
# XXX: import here for bootstrapping reasons
import numpy
+ d = os.environ.get('NPY_PKG_CONFIG_PATH')
+ if d is not None:
+ return d
d = os.path.join(os.path.dirname(numpy.__file__),
'core', 'lib', 'npy-pkg-config')
return d
@@ -2226,7 +2256,6 @@ def is_bootstrapping():
return True
except AttributeError:
return False
- __NUMPY_SETUP__ = False
#########################
@@ -2287,46 +2316,44 @@ def generate_config_py(target):
from numpy.distutils.system_info import system_info
from distutils.dir_util import mkpath
mkpath(os.path.dirname(target))
- f = open(target, 'w')
- f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
- f.write('# It contains system_info results at the time of building this package.\n')
- f.write('__all__ = ["get_info","show"]\n\n')
+ with open(target, 'w') as f:
+ f.write('# This file is generated by numpy\'s %s\n' % (os.path.basename(sys.argv[0])))
+ f.write('# It contains system_info results at the time of building this package.\n')
+ f.write('__all__ = ["get_info","show"]\n\n')
+
+ # For gfortran+msvc combination, extra shared libraries may exist
+ f.write(textwrap.dedent("""
+ import os
+ import sys
+
+ extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
+
+ if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + extra_dll_dir
+
+ """))
+
+ for k, i in system_info.saved_results.items():
+ f.write('%s=%r\n' % (k, i))
+ f.write(textwrap.dedent(r'''
+ def get_info(name):
+ g = globals()
+ return g.get(name, g.get(name + "_info", {}))
+
+ def show():
+ for name,info_dict in globals().items():
+ if name[0] == "_" or type(info_dict) is not type({}): continue
+ print(name + ":")
+ if not info_dict:
+ print(" NOT AVAILABLE")
+ for k,v in info_dict.items():
+ v = str(v)
+ if k == "sources" and len(v) > 200:
+ v = v[:60] + " ...\n... " + v[-60:]
+ print(" %s = %s" % (k,v))
+ '''))
- # For gfortran+msvc combination, extra shared libraries may exist
- f.write("""
-
-import os
-import sys
-
-extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
-
-if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
- os.environ.setdefault('PATH', '')
- os.environ['PATH'] += os.pathsep + extra_dll_dir
-
-""")
-
- for k, i in system_info.saved_results.items():
- f.write('%s=%r\n' % (k, i))
- f.write(r'''
-def get_info(name):
- g = globals()
- return g.get(name, g.get(name + "_info", {}))
-
-def show():
- for name,info_dict in globals().items():
- if name[0] == "_" or type(info_dict) is not type({}): continue
- print(name + ":")
- if not info_dict:
- print(" NOT AVAILABLE")
- for k,v in info_dict.items():
- v = str(v)
- if k == "sources" and len(v) > 200:
- v = v[:60] + " ...\n... " + v[-60:]
- print(" %s = %s" % (k,v))
- ''')
-
- f.close()
return target
def msvc_version(compiler):
@@ -2336,3 +2363,9 @@ def msvc_version(compiler):
raise ValueError("Compiler instance is not msvc (%s)"\
% compiler.compiler_type)
return compiler._MSVCCompiler__version
+
+def get_build_architecture():
+ # Importing distutils.msvccompiler triggers a warning on non-Windows
+ # systems, so delay the import to here.
+ from distutils.msvccompiler import get_build_architecture
+ return get_build_architecture()
diff --git a/numpy/distutils/npy_pkg_config.py b/numpy/distutils/npy_pkg_config.py
index ea16e772d..48584b4c4 100644
--- a/numpy/distutils/npy_pkg_config.py
+++ b/numpy/distutils/npy_pkg_config.py
@@ -222,9 +222,7 @@ def parse_meta(config):
if not config.has_section('meta'):
raise FormatError("No meta section found !")
- d = {}
- for name, value in config.items('meta'):
- d[name] = value
+ d = dict(config.items('meta'))
for k in ['name', 'description', 'version']:
if not k in d:
@@ -428,7 +426,7 @@ if __name__ == '__main__':
if options.define_variable:
m = re.search(r'([\S]+)=([\S]+)', options.define_variable)
if not m:
- raise ValueError("--define-variable option should be of " \
+ raise ValueError("--define-variable option should be of "
"the form --define-variable=foo=bar")
else:
name = m.group(1)
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index 79adcc334..5fd1003ab 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -17,6 +17,7 @@ classes are available:
atlas_3_10_blas_threads_info,
lapack_atlas_3_10_info
lapack_atlas_3_10_threads_info
+ flame_info
blas_info
lapack_info
openblas_info
@@ -92,20 +93,20 @@ src_dirs = /usr/local/src:/opt/src
search_static_first = 0
[fftw]
-fftw_libs = rfftw, fftw
-fftw_opt_libs = rfftw_threaded, fftw_threaded
-# if the above aren't found, look for {s,d}fftw_libs and {s,d}fftw_opt_libs
+libraries = rfftw, fftw
[atlas]
library_dirs = /usr/lib/3dnow:/usr/lib/3dnow/atlas
# for overriding the names of the atlas libraries
-atlas_libs = lapack, f77blas, cblas, atlas
+libraries = lapack, f77blas, cblas, atlas
[x11]
library_dirs = /usr/X11R6/lib
include_dirs = /usr/X11R6/include
----------
+Note that the ``libraries`` key is the default setting for libraries.
+
Authors:
Pearu Peterson <pearu@cens.ioc.ee>, February 2002
David M. Cooke <cookedm@physics.mcmaster.ca>, April 2002
@@ -126,6 +127,9 @@ import os
import re
import copy
import warnings
+import subprocess
+import textwrap
+
from glob import glob
from functools import reduce
if sys.version_info[0] < 3:
@@ -142,7 +146,7 @@ else:
from distutils.errors import DistutilsError
from distutils.dist import Distribution
import distutils.sysconfig
-from distutils import log
+from numpy.distutils import log
from distutils.util import get_platform
from numpy.distutils.exec_command import (
@@ -153,6 +157,7 @@ from numpy.distutils.misc_util import (is_sequence, is_string,
from numpy.distutils.command.config import config as cmd_config
from numpy.distutils.compat import get_exception
from numpy.distutils import customized_ccompiler
+from numpy.distutils import _shell_utils
import distutils.ccompiler
import tempfile
import shutil
@@ -164,6 +169,17 @@ _bits = {'32bit': 32, '64bit': 64}
platform_bits = _bits[platform.architecture()[0]]
+def _c_string_literal(s):
+ """
+ Convert a python string into a literal suitable for inclusion into C code
+ """
+ # only these three characters are forbidden in C strings
+ s = s.replace('\\', r'\\')
+ s = s.replace('"', r'\"')
+ s = s.replace('\n', r'\n')
+ return '"{}"'.format(s)
+
+
def libpaths(paths, bits):
"""Return a list of library paths valid on 32 or 64 bit systems.
@@ -286,27 +302,21 @@ else:
default_x11_include_dirs.extend(['/usr/lib/X11/include',
'/usr/include/X11'])
- import subprocess as sp
- tmp = None
- try:
- # Explicitly open/close file to avoid ResourceWarning when
- # tests are run in debug mode Python 3.
- tmp = open(os.devnull, 'w')
- p = sp.Popen(["gcc", "-print-multiarch"], stdout=sp.PIPE,
- stderr=tmp)
- except (OSError, DistutilsError):
- # OSError if gcc is not installed, or SandboxViolation (DistutilsError
- # subclass) if an old setuptools bug is triggered (see gh-3160).
- pass
- else:
- triplet = str(p.communicate()[0].decode().strip())
- if p.returncode == 0:
- # gcc supports the "-print-multiarch" option
- default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
- default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
- finally:
- if tmp is not None:
- tmp.close()
+ with open(os.devnull, 'w') as tmp:
+ try:
+ p = subprocess.Popen(["gcc", "-print-multiarch"], stdout=subprocess.PIPE,
+ stderr=tmp)
+ except (OSError, DistutilsError):
+ # OSError if gcc is not installed, or SandboxViolation (DistutilsError
+ # subclass) if an old setuptools bug is triggered (see gh-3160).
+ pass
+ else:
+ triplet = str(p.communicate()[0].decode().strip())
+ if p.returncode == 0:
+ # gcc supports the "-print-multiarch" option
+ default_x11_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+ default_lib_dirs += [os.path.join("/usr/lib/", triplet)]
+
if os.path.join(sys.prefix, 'lib') not in default_lib_dirs:
default_lib_dirs.insert(0, os.path.join(sys.prefix, 'lib'))
@@ -376,6 +386,7 @@ def get_info(name, notfound_action=0):
'atlas_3_10_blas_threads': atlas_3_10_blas_threads_info,
'lapack_atlas_3_10': lapack_atlas_3_10_info, # use lapack_opt instead
'lapack_atlas_3_10_threads': lapack_atlas_3_10_threads_info, # ditto
+ 'flame': flame_info, # use lapack_opt instead
'mkl': mkl_info,
# openblas which may or may not have embedded lapack
'openblas': openblas_info, # use blas_opt instead
@@ -437,14 +448,27 @@ class NotFoundError(DistutilsError):
"""Some third-party program or library is not found."""
+class AliasedOptionError(DistutilsError):
+ """
+ Aliases entries in config files should not be existing.
+ In section '{section}' we found multiple appearances of options {options}."""
+
+
class AtlasNotFoundError(NotFoundError):
"""
- Atlas (http://math-atlas.sourceforge.net/) libraries not found.
+ Atlas (http://github.com/math-atlas/math-atlas) libraries not found.
Directories to search for the libraries can be specified in the
numpy/distutils/site.cfg file (section [atlas]) or by setting
the ATLAS environment variable."""
+class FlameNotFoundError(NotFoundError):
+ """
+ FLAME (http://www.cs.utexas.edu/~flame/web/) libraries not found.
+ Directories to search for the libraries can be specified in the
+ numpy/distutils/site.cfg file (section [flame])."""
+
+
class LapackNotFoundError(NotFoundError):
"""
Lapack (http://www.netlib.org/lapack/) libraries not found.
@@ -461,6 +485,13 @@ class LapackSrcNotFoundError(LapackNotFoundError):
the LAPACK_SRC environment variable."""
+class BlasOptNotFoundError(NotFoundError):
+ """
+ Optimized (vendor) Blas libraries are not found.
+ Falls back to netlib Blas library which has worse performance.
+ A better performance should be easily gained by switching
+ Blas library."""
+
class BlasNotFoundError(NotFoundError):
"""
Blas (http://www.netlib.org/blas/) libraries not found.
@@ -519,7 +550,6 @@ class system_info(object):
dir_env_var = None
search_static_first = 0 # XXX: disabled by default, may disappear in
# future unless it is proved to be useful.
- verbosity = 1
saved_results = {}
notfounderror = NotFoundError
@@ -527,7 +557,6 @@ class system_info(object):
def __init__(self,
default_lib_dirs=default_lib_dirs,
default_include_dirs=default_include_dirs,
- verbosity=1,
):
self.__class__.info = {}
self.local_prefixes = []
@@ -595,6 +624,39 @@ class system_info(object):
dict_append(info, **extra_info)
self.saved_results[self.__class__.__name__] = info
+ def get_option_single(self, *options):
+ """ Ensure that only one of `options` are found in the section
+
+ Parameters
+ ----------
+ *options : list of str
+ a list of options to be found in the section (``self.section``)
+
+ Returns
+ -------
+ str :
+ the option that is uniquely found in the section
+
+ Raises
+ ------
+ AliasedOptionError :
+ in case more than one of the options are found
+ """
+ found = map(lambda opt: self.cp.has_option(self.section, opt), options)
+ found = list(found)
+ if sum(found) == 1:
+ return options[found.index(True)]
+ elif sum(found) == 0:
+ # nothing is found anyways
+ return options[0]
+
+ # Else we have more than 1 key found
+ if AliasedOptionError.__doc__ is None:
+ raise AliasedOptionError()
+ raise AliasedOptionError(AliasedOptionError.__doc__.format(
+ section=self.section, options='[{}]'.format(', '.join(options))))
+
+
def has_info(self):
return self.__class__.__name__ in self.saved_results
@@ -608,8 +670,9 @@ class system_info(object):
for key in ['extra_compile_args', 'extra_link_args']:
# Get values
opt = self.cp.get(self.section, key)
+ opt = _shell_utils.NativeParser.split(opt)
if opt:
- tmp = {key : [opt]}
+ tmp = {key: opt}
dict_append(info, **tmp)
return info
@@ -639,7 +702,7 @@ class system_info(object):
log.info(' FOUND:')
res = self.saved_results.get(self.__class__.__name__)
- if self.verbosity > 0 and flag:
+ if log.get_threshold() <= log.INFO and flag:
for k, v in res.items():
v = str(v)
if k in ['sources', 'libraries'] and len(v) > 270:
@@ -849,7 +912,7 @@ class system_info(object):
"""Return a list of existing paths composed by all combinations
of items from the arguments.
"""
- return combine_paths(*args, **{'verbosity': self.verbosity})
+ return combine_paths(*args)
class fft_opt_info(system_info):
@@ -884,7 +947,9 @@ class fftw_info(system_info):
"""Returns True on successful version detection, else False"""
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
- libs = self.get_libs(self.section + '_libs', ver_param['libs'])
+
+ opt = self.get_option_single(self.section + '_libs', 'libraries')
+ libs = self.get_libs(opt, ver_param['libs'])
info = self.check_libs(lib_dirs, libs)
if info is not None:
flag = 0
@@ -893,7 +958,6 @@ class fftw_info(system_info):
== len(ver_param['includes']):
dict_append(info, include_dirs=[d])
flag = 1
- incl_dirs = [d]
break
if flag:
dict_append(info, define_macros=ver_param['macros'])
@@ -1045,9 +1109,9 @@ class mkl_info(system_info):
for d in paths:
dirs = glob(os.path.join(d, 'mkl', '*'))
dirs += glob(os.path.join(d, 'mkl*'))
- for d in dirs:
- if os.path.isdir(os.path.join(d, 'lib')):
- return d
+ for sub_dir in dirs:
+ if os.path.isdir(os.path.join(sub_dir, 'lib')):
+ return sub_dir
return None
def __init__(self):
@@ -1070,7 +1134,8 @@ class mkl_info(system_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
incl_dirs = self.get_include_dirs()
- mkl_libs = self.get_libs('mkl_libs', self._lib_mkl)
+ opt = self.get_option_single('mkl_libs', 'libraries')
+ mkl_libs = self.get_libs(opt, self._lib_mkl)
info = self.check_libs2(lib_dirs, mkl_libs)
if info is None:
return
@@ -1117,15 +1182,16 @@ class atlas_info(system_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
- atlas_libs = self.get_libs('atlas_libs',
- self._lib_names + self._lib_atlas)
+ opt = self.get_option_single('atlas_libs', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
lapack_libs = self.get_libs('lapack_libs', self._lib_lapack)
atlas = None
lapack = None
atlas_1 = None
for d in lib_dirs:
- atlas = self.check_libs2(d, atlas_libs, [])
+ # FIXME: lapack_atlas is unused
lapack_atlas = self.check_libs2(d, ['lapack_atlas'], [])
+ atlas = self.check_libs2(d, atlas_libs, [])
if atlas is not None:
lib_dirs2 = [d] + self.combine_paths(d, ['atlas*', 'ATLAS*'])
lapack = self.check_libs2(lib_dirs2, lapack_libs, [])
@@ -1157,11 +1223,11 @@ class atlas_info(system_info):
else:
dict_append(info, **atlas)
dict_append(info, define_macros=[('ATLAS_WITHOUT_LAPACK', None)])
- message = """
-*********************************************************************
- Could not find lapack library within the ATLAS installation.
-*********************************************************************
-"""
+ message = textwrap.dedent("""
+ *********************************************************************
+ Could not find lapack library within the ATLAS installation.
+ *********************************************************************
+ """)
warnings.warn(message, stacklevel=2)
self.set_info(**info)
return
@@ -1184,15 +1250,15 @@ class atlas_info(system_info):
if lapack_lib is not None:
sz = os.stat(lapack_lib)[6]
if sz <= 4000 * 1024:
- message = """
-*********************************************************************
- Lapack library (from ATLAS) is probably incomplete:
- size of %s is %sk (expected >4000k)
-
- Follow the instructions in the KNOWN PROBLEMS section of the file
- numpy/INSTALL.txt.
-*********************************************************************
-""" % (lapack_lib, sz / 1024)
+ message = textwrap.dedent("""
+ *********************************************************************
+ Lapack library (from ATLAS) is probably incomplete:
+ size of %s is %sk (expected >4000k)
+
+ Follow the instructions in the KNOWN PROBLEMS section of the file
+ numpy/INSTALL.txt.
+ *********************************************************************
+ """) % (lapack_lib, sz / 1024)
warnings.warn(message, stacklevel=2)
else:
info['language'] = 'f77'
@@ -1209,8 +1275,8 @@ class atlas_blas_info(atlas_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
- atlas_libs = self.get_libs('atlas_libs',
- self._lib_names + self._lib_atlas)
+ opt = self.get_option_single('atlas_libs', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names + self._lib_atlas)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
@@ -1262,8 +1328,8 @@ class atlas_3_10_blas_info(atlas_3_10_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
info = {}
- atlas_libs = self.get_libs('atlas_libs',
- self._lib_names)
+ opt = self.get_option_single('atlas_lib', 'libraries')
+ atlas_libs = self.get_libs(opt, self._lib_names)
atlas = self.check_libs2(lib_dirs, atlas_libs, [])
if atlas is None:
return
@@ -1314,7 +1380,8 @@ class lapack_info(system_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
- lapack_libs = self.get_libs('lapack_libs', self._lib_names)
+ opt = self.get_option_single('lapack_libs', 'libraries')
+ lapack_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, lapack_libs, [])
if info is None:
return
@@ -1462,23 +1529,23 @@ def get_atlas_version(**config):
try:
s, o = c.get_output(atlas_version_c_text,
libraries=libraries, library_dirs=library_dirs,
- use_tee=(system_info.verbosity > 0))
+ )
if s and re.search(r'undefined reference to `_gfortran', o, re.M):
s, o = c.get_output(atlas_version_c_text,
libraries=libraries + ['gfortran'],
library_dirs=library_dirs,
- use_tee=(system_info.verbosity > 0))
+ )
if not s:
- warnings.warn("""
-*****************************************************
-Linkage with ATLAS requires gfortran. Use
+ warnings.warn(textwrap.dedent("""
+ *****************************************************
+ Linkage with ATLAS requires gfortran. Use
- python setup.py config_fc --fcompiler=gnu95 ...
+ python setup.py config_fc --fcompiler=gnu95 ...
-when building extension libraries that use ATLAS.
-Make sure that -lgfortran is used for C++ extensions.
-*****************************************************
-""", stacklevel=2)
+ when building extension libraries that use ATLAS.
+ Make sure that -lgfortran is used for C++ extensions.
+ *****************************************************
+ """), stacklevel=2)
dict_append(info, language='f90',
define_macros=[('ATLAS_REQUIRES_GFORTRAN', None)])
except Exception: # failed to get version from file -- maybe on Windows
@@ -1496,7 +1563,7 @@ Make sure that -lgfortran is used for C++ extensions.
atlas_version = os.environ.get('ATLAS_VERSION', None)
if atlas_version:
dict_append(info, define_macros=[(
- 'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
])
else:
dict_append(info, define_macros=[('NO_ATLAS_INFO', -1)])
@@ -1517,7 +1584,7 @@ Make sure that -lgfortran is used for C++ extensions.
dict_append(info, define_macros=[('NO_ATLAS_INFO', -2)])
else:
dict_append(info, define_macros=[(
- 'ATLAS_INFO', '"\\"%s\\""' % atlas_version)
+ 'ATLAS_INFO', _c_string_literal(atlas_version))
])
result = _cached_atlas_version[key] = atlas_version, info
return result
@@ -1526,139 +1593,226 @@ Make sure that -lgfortran is used for C++ extensions.
class lapack_opt_info(system_info):
notfounderror = LapackNotFoundError
+ # Default order of LAPACK checks
+ lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack']
- def calc_info(self):
+ def _calc_info_mkl(self):
+ info = get_info('lapack_mkl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- lapack_mkl_info = get_info('lapack_mkl')
- if lapack_mkl_info:
- self.set_info(**lapack_mkl_info)
- return
+ def _calc_info_openblas(self):
+ info = get_info('openblas_lapack')
+ if info:
+ self.set_info(**info)
+ return True
+ info = get_info('openblas_clapack')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- openblas_info = get_info('openblas_lapack')
- if openblas_info:
- self.set_info(**openblas_info)
- return
+ def _calc_info_flame(self):
+ info = get_info('flame')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- openblas_info = get_info('openblas_clapack')
- if openblas_info:
- self.set_info(**openblas_info)
- return
+ def _calc_info_atlas(self):
+ info = get_info('atlas_3_10_threads')
+ if not info:
+ info = get_info('atlas_3_10')
+ if not info:
+ info = get_info('atlas_threads')
+ if not info:
+ info = get_info('atlas')
+ if info:
+ # Figure out if ATLAS has lapack...
+ # If not we need the lapack library, but not BLAS!
+ l = info.get('define_macros', [])
+ if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
+ or ('ATLAS_WITHOUT_LAPACK', None) in l:
+ # Get LAPACK (with possible warnings)
+ # If not found we don't accept anything
+ # since we can't use ATLAS with LAPACK!
+ lapack_info = self._get_info_lapack()
+ if not lapack_info:
+ return False
+ dict_append(info, **lapack_info)
+ self.set_info(**info)
+ return True
+ return False
- atlas_info = get_info('atlas_3_10_threads')
- if not atlas_info:
- atlas_info = get_info('atlas_3_10')
- if not atlas_info:
- atlas_info = get_info('atlas_threads')
- if not atlas_info:
- atlas_info = get_info('atlas')
-
- accelerate_info = get_info('accelerate')
- if accelerate_info and not atlas_info:
- self.set_info(**accelerate_info)
- return
+ def _calc_info_accelerate(self):
+ info = get_info('accelerate')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- need_lapack = 0
- need_blas = 0
- info = {}
- if atlas_info:
- l = atlas_info.get('define_macros', [])
- if ('ATLAS_WITH_LAPACK_ATLAS', None) in l \
- or ('ATLAS_WITHOUT_LAPACK', None) in l:
- need_lapack = 1
- info = atlas_info
+ def _get_info_blas(self):
+ # Default to get the optimized BLAS implementation
+ info = get_info('blas_opt')
+ if not info:
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
+ info_src = get_info('blas_src')
+ if not info_src:
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return {}
+ dict_append(info, libraries=[('fblas_src', info_src)])
+ return info
- else:
- warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2)
- need_blas = 1
- need_lapack = 1
+ def _get_info_lapack(self):
+ info = get_info('lapack')
+ if not info:
+ warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=3)
+ info_src = get_info('lapack_src')
+ if not info_src:
+ warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return {}
+ dict_append(info, libraries=[('flapack_src', info_src)])
+ return info
+
+ def _calc_info_lapack(self):
+ info = self._get_info_lapack()
+ if info:
+ info_blas = self._get_info_blas()
+ dict_append(info, **info_blas)
dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+ self.set_info(**info)
+ return True
+ return False
- if need_lapack:
- lapack_info = get_info('lapack')
- #lapack_info = {} ## uncomment for testing
- if lapack_info:
- dict_append(info, **lapack_info)
- else:
- warnings.warn(LapackNotFoundError.__doc__, stacklevel=2)
- lapack_src_info = get_info('lapack_src')
- if not lapack_src_info:
- warnings.warn(LapackSrcNotFoundError.__doc__, stacklevel=2)
- return
- dict_append(info, libraries=[('flapack_src', lapack_src_info)])
-
- if need_blas:
- blas_info = get_info('blas')
- if blas_info:
- dict_append(info, **blas_info)
- else:
- warnings.warn(BlasNotFoundError.__doc__, stacklevel=2)
- blas_src_info = get_info('blas_src')
- if not blas_src_info:
- warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2)
- return
- dict_append(info, libraries=[('fblas_src', blas_src_info)])
+ def calc_info(self):
+ user_order = os.environ.get('NPY_LAPACK_ORDER', None)
+ if user_order is None:
+ lapack_order = self.lapack_order
+ else:
+ # the user has requested the order of the
+ # check they are all in the available list, a COMMA SEPARATED list
+ user_order = user_order.lower().split(',')
+ non_existing = []
+ lapack_order = []
+ for order in user_order:
+ if order in self.lapack_order:
+ lapack_order.append(order)
+ elif len(order) > 0:
+ non_existing.append(order)
+ if len(non_existing) > 0:
+ raise ValueError("lapack_opt_info user defined "
+ "LAPACK order has unacceptable "
+ "values: {}".format(non_existing))
+
+ for lapack in lapack_order:
+ if getattr(self, '_calc_info_{}'.format(lapack))():
+ return
- self.set_info(**info)
- return
+ if 'lapack' not in lapack_order:
+ # Since the user may request *not* to use any library, we still need
+ # to raise warnings to signal missing packages!
+ warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)
+ warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
+ # Default order of BLAS checks
+ blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas']
- def calc_info(self):
+ def _calc_info_mkl(self):
+ info = get_info('blas_mkl')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- blas_mkl_info = get_info('blas_mkl')
- if blas_mkl_info:
- self.set_info(**blas_mkl_info)
- return
+ def _calc_info_blis(self):
+ info = get_info('blis')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- blis_info = get_info('blis')
- if blis_info:
- self.set_info(**blis_info)
- return
+ def _calc_info_openblas(self):
+ info = get_info('openblas')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- openblas_info = get_info('openblas')
- if openblas_info:
- self.set_info(**openblas_info)
- return
+ def _calc_info_atlas(self):
+ info = get_info('atlas_3_10_blas_threads')
+ if not info:
+ info = get_info('atlas_3_10_blas')
+ if not info:
+ info = get_info('atlas_blas_threads')
+ if not info:
+ info = get_info('atlas_blas')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- atlas_info = get_info('atlas_3_10_blas_threads')
- if not atlas_info:
- atlas_info = get_info('atlas_3_10_blas')
- if not atlas_info:
- atlas_info = get_info('atlas_blas_threads')
- if not atlas_info:
- atlas_info = get_info('atlas_blas')
-
- accelerate_info = get_info('accelerate')
- if accelerate_info and not atlas_info:
- self.set_info(**accelerate_info)
- return
+ def _calc_info_accelerate(self):
+ info = get_info('accelerate')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
- need_blas = 0
+ def _calc_info_blas(self):
+ # Warn about a non-optimized BLAS library
+ warnings.warn(BlasOptNotFoundError.__doc__ or '', stacklevel=3)
info = {}
- if atlas_info:
- info = atlas_info
+ dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+
+ blas = get_info('blas')
+ if blas:
+ dict_append(info, **blas)
else:
- warnings.warn(AtlasNotFoundError.__doc__, stacklevel=2)
- need_blas = 1
- dict_append(info, define_macros=[('NO_ATLAS_INFO', 1)])
+ # Not even BLAS was found!
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=3)
- if need_blas:
- blas_info = get_info('blas')
- if blas_info:
- dict_append(info, **blas_info)
- else:
- warnings.warn(BlasNotFoundError.__doc__, stacklevel=2)
- blas_src_info = get_info('blas_src')
- if not blas_src_info:
- warnings.warn(BlasSrcNotFoundError.__doc__, stacklevel=2)
- return
- dict_append(info, libraries=[('fblas_src', blas_src_info)])
+ blas_src = get_info('blas_src')
+ if not blas_src:
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=3)
+ return False
+ dict_append(info, libraries=[('fblas_src', blas_src)])
self.set_info(**info)
- return
+ return True
+
+ def calc_info(self):
+ user_order = os.environ.get('NPY_BLAS_ORDER', None)
+ if user_order is None:
+ blas_order = self.blas_order
+ else:
+ # the user has requested the order of the
+ # check they are all in the available list
+ user_order = user_order.lower().split(',')
+ non_existing = []
+ blas_order = []
+ for order in user_order:
+ if order in self.blas_order:
+ blas_order.append(order)
+ elif len(order) > 0:
+ non_existing.append(order)
+ if len(non_existing) > 0:
+ raise ValueError("blas_opt_info user defined BLAS order has unacceptable values: {}".format(non_existing))
+
+ for blas in blas_order:
+ if getattr(self, '_calc_info_{}'.format(blas))():
+ return
+
+ if 'blas' not in blas_order:
+ # Since the user may request *not* to use any library, we still need
+ # to raise warnings to signal missing packages!
+ warnings.warn(BlasNotFoundError.__doc__ or '', stacklevel=2)
+ warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
class blas_info(system_info):
@@ -1669,39 +1823,64 @@ class blas_info(system_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
- blas_libs = self.get_libs('blas_libs', self._lib_names)
+ opt = self.get_option_single('blas_libs', 'libraries')
+ blas_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, blas_libs, [])
if info is None:
return
else:
info['include_dirs'] = self.get_include_dirs()
if platform.system() == 'Windows':
- # The check for windows is needed because has_cblas uses the
+ # The check for windows is needed because get_cblas_libs uses the
# same compiler that was used to compile Python and msvc is
# often not installed when mingw is being used. This rough
# treatment is not desirable, but windows is tricky.
info['language'] = 'f77' # XXX: is it generally true?
else:
- lib = self.has_cblas(info)
+ lib = self.get_cblas_libs(info)
if lib is not None:
info['language'] = 'c'
- info['libraries'] = [lib]
+ info['libraries'] = lib
info['define_macros'] = [('HAVE_CBLAS', None)]
self.set_info(**info)
- def has_cblas(self, info):
+ def get_cblas_libs(self, info):
+ """ Check whether we can link with CBLAS interface
+
+ This method will search through several combinations of libraries
+ to check whether CBLAS is present:
+
+ 1. Libraries in ``info['libraries']``, as is
+ 2. As 1. but also explicitly adding ``'cblas'`` as a library
+ 3. As 1. but also explicitly adding ``'blas'`` as a library
+ 4. Check only library ``'cblas'``
+ 5. Check only library ``'blas'``
+
+ Parameters
+ ----------
+ info : dict
+ system information dictionary for compilation and linking
+
+ Returns
+ -------
+ libraries : list of str or None
+ a list of libraries that enables the use of CBLAS interface.
+ Returns None if not found or a compilation error occurs.
+
+ Since 1.17 returns a list.
+ """
# primitive cblas check by looking for the header and trying to link
# cblas or blas
- res = False
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
- s = """#include <cblas.h>
- int main(int argc, const char *argv[])
- {
- double a[4] = {1,2,3,4};
- double b[4] = {5,6,7,8};
- return cblas_ddot(4, a, 1, b, 1) > 10;
- }"""
+ s = textwrap.dedent("""\
+ #include <cblas.h>
+ int main(int argc, const char *argv[])
+ {
+ double a[4] = {1,2,3,4};
+ double b[4] = {5,6,7,8};
+ return cblas_ddot(4, a, 1, b, 1) > 10;
+ }""")
src = os.path.join(tmpdir, 'source.c')
try:
with open(src, 'wt') as f:
@@ -1711,27 +1890,24 @@ class blas_info(system_info):
# check we can compile (find headers)
obj = c.compile([src], output_dir=tmpdir,
include_dirs=self.get_include_dirs())
+ except (distutils.ccompiler.CompileError, distutils.ccompiler.LinkError):
+ return None
- # check we can link (find library)
- # some systems have separate cblas and blas libs. First
- # check for cblas lib, and if not present check for blas lib.
+ # check we can link (find library)
+ # some systems have separate cblas and blas libs.
+ for libs in [info['libraries'], ['cblas'] + info['libraries'],
+ ['blas'] + info['libraries'], ['cblas'], ['blas']]:
try:
c.link_executable(obj, os.path.join(tmpdir, "a.out"),
- libraries=["cblas"],
+ libraries=libs,
library_dirs=info['library_dirs'],
extra_postargs=info.get('extra_link_args', []))
- res = "cblas"
+ return libs
except distutils.ccompiler.LinkError:
- c.link_executable(obj, os.path.join(tmpdir, "a.out"),
- libraries=["blas"],
- library_dirs=info['library_dirs'],
- extra_postargs=info.get('extra_link_args', []))
- res = "blas"
- except distutils.ccompiler.CompileError:
- res = None
+ pass
finally:
shutil.rmtree(tmpdir)
- return res
+ return None
class openblas_info(blas_info):
@@ -1748,9 +1924,9 @@ class openblas_info(blas_info):
lib_dirs = self.get_lib_dirs()
- openblas_libs = self.get_libs('libraries', self._lib_names)
- if openblas_libs == self._lib_names: # backward compat with 1.8.0
- openblas_libs = self.get_libs('openblas_libs', self._lib_names)
+ # Prefer to use libraries over openblas_libs
+ opt = self.get_option_single('openblas_libs', 'libraries')
+ openblas_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, openblas_libs, [])
@@ -1821,12 +1997,13 @@ class openblas_lapack_info(openblas_info):
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
- s = """void zungqr_();
- int main(int argc, const char *argv[])
- {
- zungqr_();
- return 0;
- }"""
+ s = textwrap.dedent("""\
+ void zungqr_();
+ int main(int argc, const char *argv[])
+ {
+ zungqr_();
+ return 0;
+ }""")
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
@@ -1862,10 +2039,8 @@ class blis_info(blas_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
- blis_libs = self.get_libs('libraries', self._lib_names)
- if blis_libs == self._lib_names:
- blis_libs = self.get_libs('blis_libs', self._lib_names)
-
+ opt = self.get_option_single('blis_libs', 'libraries')
+ blis_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs2(lib_dirs, blis_libs, [])
if info is None:
return
@@ -1878,8 +2053,86 @@ class blis_info(blas_info):
include_dirs=incl_dirs)
self.set_info(**info)
+
+class flame_info(system_info):
+ """ Usage of libflame for LAPACK operations
+
+ This requires libflame to be compiled with lapack wrappers:
+
+ ./configure --enable-lapack2flame ...
+
+ Be aware that libflame 5.1.0 has some missing names in the shared library, so
+ if you have problems, try the static flame library.
+ """
+ section = 'flame'
+ _lib_names = ['flame']
+ notfounderror = FlameNotFoundError
+
+ def check_embedded_lapack(self, info):
+ """ libflame does not necessarily have a wrapper for fortran LAPACK, we need to check """
+ c = customized_ccompiler()
+
+ tmpdir = tempfile.mkdtemp()
+ s = textwrap.dedent("""\
+ void zungqr_();
+ int main(int argc, const char *argv[])
+ {
+ zungqr_();
+ return 0;
+ }""")
+ src = os.path.join(tmpdir, 'source.c')
+ out = os.path.join(tmpdir, 'a.out')
+ # Add the additional "extra" arguments
+ extra_args = info.get('extra_link_args', [])
+ try:
+ with open(src, 'wt') as f:
+ f.write(s)
+ obj = c.compile([src], output_dir=tmpdir)
+ try:
+ c.link_executable(obj, out, libraries=info['libraries'],
+ library_dirs=info['library_dirs'],
+ extra_postargs=extra_args)
+ return True
+ except distutils.ccompiler.LinkError:
+ return False
+ finally:
+ shutil.rmtree(tmpdir)
+
+ def calc_info(self):
+ lib_dirs = self.get_lib_dirs()
+ flame_libs = self.get_libs('libraries', self._lib_names)
+
+ info = self.check_libs2(lib_dirs, flame_libs, [])
+ if info is None:
+ return
+
+ if self.check_embedded_lapack(info):
+ # check if the user has supplied all information required
+ self.set_info(**info)
+ else:
+ # Try and get the BLAS lib to see if we can get it to work
+ blas_info = get_info('blas_opt')
+ if not blas_info:
+ # since we already failed once, this ain't going to work either
+ return
+
+ # Now we need to merge the two dictionaries
+ for key in blas_info:
+ if isinstance(blas_info[key], list):
+ info[key] = info.get(key, []) + blas_info[key]
+ elif isinstance(blas_info[key], tuple):
+ info[key] = info.get(key, ()) + blas_info[key]
+ else:
+ info[key] = info.get(key, '') + blas_info[key]
+
+ # Now check again
+ if self.check_embedded_lapack(info):
+ self.set_info(**info)
+
+
class accelerate_info(system_info):
section = 'accelerate'
+ _lib_names = ['accelerate', 'veclib']
notfounderror = BlasNotFoundError
def calc_info(self):
@@ -1888,7 +2141,7 @@ class accelerate_info(system_info):
if libraries:
libraries = [libraries]
else:
- libraries = self.get_libs('libraries', ['accelerate', 'veclib'])
+ libraries = self.get_libs('libraries', self._lib_names)
libraries = [lib.strip().lower() for lib in libraries]
if (sys.platform == 'darwin' and
@@ -1986,6 +2239,7 @@ class blas_src_info(system_info):
class x11_info(system_info):
section = 'x11'
notfounderror = X11NotFoundError
+ _lib_names = ['X11']
def __init__(self):
system_info.__init__(self,
@@ -1997,7 +2251,8 @@ class x11_info(system_info):
return
lib_dirs = self.get_lib_dirs()
include_dirs = self.get_include_dirs()
- x11_libs = self.get_libs('x11_libs', ['X11'])
+ opt = self.get_option_single('x11_libs', 'libraries')
+ x11_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, x11_libs, [])
if info is None:
return
@@ -2062,7 +2317,7 @@ class _numpy_info(system_info):
if vrs is None:
continue
macros = [(self.modulename.upper() + '_VERSION',
- '"\\"%s\\""' % (vrs)),
+ _c_string_literal(vrs)),
(self.modulename.upper(), None)]
break
dict_append(info, define_macros=macros)
@@ -2107,17 +2362,17 @@ class numerix_info(system_info):
if which[0] is None:
which = "numpy", "defaulted"
try:
- import numpy
+ import numpy # noqa: F401
which = "numpy", "defaulted"
except ImportError:
msg1 = str(get_exception())
try:
- import Numeric
+ import Numeric # noqa: F401
which = "numeric", "defaulted"
except ImportError:
msg2 = str(get_exception())
try:
- import numarray
+ import numarray # noqa: F401
which = "numarray", "defaulted"
except ImportError:
msg3 = str(get_exception())
@@ -2267,7 +2522,7 @@ class _pkg_config_info(system_info):
version = self.get_config_output(config_exe, self.version_flag)
if version:
macros.append((self.__class__.__name__.split('.')[-1].upper(),
- '"\\"%s\\""' % (version)))
+ _c_string_literal(version)))
if self.version_macro_name:
macros.append((self.version_macro_name + '_%s'
% (version.replace('.', '_')), None))
@@ -2388,7 +2643,8 @@ class amd_info(system_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
- amd_libs = self.get_libs('amd_libs', self._lib_names)
+ opt = self.get_option_single('amd_libs', 'libraries')
+ amd_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, amd_libs, [])
if info is None:
return
@@ -2419,7 +2675,8 @@ class umfpack_info(system_info):
def calc_info(self):
lib_dirs = self.get_lib_dirs()
- umfpack_libs = self.get_libs('umfpack_libs', self._lib_names)
+ opt = self.get_option_single('umfpack_libs', 'libraries')
+ umfpack_libs = self.get_libs(opt, self._lib_names)
info = self.check_libs(lib_dirs, umfpack_libs, [])
if info is None:
return
@@ -2437,7 +2694,6 @@ class umfpack_info(system_info):
define_macros=[('SCIPY_UMFPACK_H', None)],
swig_opts=['-I' + inc_dir])
- amd = get_info('amd')
dict_append(info, **get_info('amd'))
self.set_info(**info)
@@ -2533,6 +2789,7 @@ def show_all(argv=None):
del show_only[show_only.index(name)]
conf = c()
conf.verbosity = 2
+ # FIXME: r not used
r = conf.get_info()
if show_only:
log.info('Info classes not defined: %s', ','.join(show_only))
diff --git a/numpy/distutils/tests/test_exec_command.py b/numpy/distutils/tests/test_exec_command.py
index 8bd265007..37912f5ba 100644
--- a/numpy/distutils/tests/test_exec_command.py
+++ b/numpy/distutils/tests/test_exec_command.py
@@ -6,7 +6,7 @@ from tempfile import TemporaryFile
from numpy.distutils import exec_command
from numpy.distutils.exec_command import get_pythonexe
-from numpy.testing import tempdir, assert_
+from numpy.testing import tempdir, assert_, assert_warns
# In python 3 stdout, stderr are text (unicode compliant) devices, so to
# emulate them import StringIO from the io module.
@@ -71,27 +71,31 @@ def test_exec_command_stdout():
# Test posix version:
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
- exec_command.exec_command("cd '.'")
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(StringIO()):
with redirect_stderr(TemporaryFile()):
- exec_command.exec_command("cd '.'")
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
def test_exec_command_stderr():
# Test posix version:
with redirect_stdout(TemporaryFile(mode='w+')):
with redirect_stderr(StringIO()):
- exec_command.exec_command("cd '.'")
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
if os.name == 'posix':
# Test general (non-posix) version:
with emulate_nonposix():
with redirect_stdout(TemporaryFile()):
with redirect_stderr(StringIO()):
- exec_command.exec_command("cd '.'")
+ with assert_warns(DeprecationWarning):
+ exec_command.exec_command("cd '.'")
class TestExecCommand(object):
@@ -205,11 +209,12 @@ class TestExecCommand(object):
def test_basic(self):
with redirect_stdout(StringIO()):
with redirect_stderr(StringIO()):
- if os.name == "posix":
- self.check_posix(use_tee=0)
- self.check_posix(use_tee=1)
- elif os.name == "nt":
- self.check_nt(use_tee=0)
- self.check_nt(use_tee=1)
- self.check_execute_in(use_tee=0)
- self.check_execute_in(use_tee=1)
+ with assert_warns(DeprecationWarning):
+ if os.name == "posix":
+ self.check_posix(use_tee=0)
+ self.check_posix(use_tee=1)
+ elif os.name == "nt":
+ self.check_nt(use_tee=0)
+ self.check_nt(use_tee=1)
+ self.check_execute_in(use_tee=0)
+ self.check_execute_in(use_tee=1)
diff --git a/numpy/distutils/tests/test_fcompiler.py b/numpy/distutils/tests/test_fcompiler.py
index 95e44b051..6d245fbd4 100644
--- a/numpy/distutils/tests/test_fcompiler.py
+++ b/numpy/distutils/tests/test_fcompiler.py
@@ -1,6 +1,8 @@
from __future__ import division, absolute_import, print_function
-from numpy.testing import assert_
+import pytest
+
+from numpy.testing import assert_, suppress_warnings
import numpy.distutils.fcompiler
customizable_flags = [
@@ -25,6 +27,7 @@ def test_fcompiler_flags(monkeypatch):
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
+
monkeypatch.delenv(envvar)
assert_(new_flags == [new_flag])
@@ -33,9 +36,9 @@ def test_fcompiler_flags(monkeypatch):
for opt, envvar in customizable_flags:
new_flag = '-dummy-{}-flag'.format(opt)
prev_flags = getattr(flag_vars, opt)
-
monkeypatch.setenv(envvar, new_flag)
new_flags = getattr(flag_vars, opt)
+
monkeypatch.delenv(envvar)
if prev_flags is None:
assert_(new_flags == [new_flag])
diff --git a/numpy/distutils/tests/test_misc_util.py b/numpy/distutils/tests/test_misc_util.py
index 2a3294ddf..3e239cf48 100644
--- a/numpy/distutils/tests/test_misc_util.py
+++ b/numpy/distutils/tests/test_misc_util.py
@@ -79,3 +79,6 @@ def test_installed_npymath_ini():
# Regression test for gh-7707. If npymath.ini wasn't installed, then this
# will give an error.
info = get_info('npymath')
+
+ assert isinstance(info, dict)
+ assert "define_macros" in info
diff --git a/numpy/distutils/tests/test_shell_utils.py b/numpy/distutils/tests/test_shell_utils.py
new file mode 100644
index 000000000..a0344244f
--- /dev/null
+++ b/numpy/distutils/tests/test_shell_utils.py
@@ -0,0 +1,79 @@
+from __future__ import division, absolute_import, print_function
+
+import pytest
+import subprocess
+import os
+import json
+import sys
+
+from numpy.distutils import _shell_utils
+
+argv_cases = [
+ [r'exe'],
+ [r'path/exe'],
+ [r'path\exe'],
+ [r'\\server\path\exe'],
+ [r'path to/exe'],
+ [r'path to\exe'],
+
+ [r'exe', '--flag'],
+ [r'path/exe', '--flag'],
+ [r'path\exe', '--flag'],
+ [r'path to/exe', '--flag'],
+ [r'path to\exe', '--flag'],
+
+ # flags containing literal quotes in their name
+ [r'path to/exe', '--flag-"quoted"'],
+ [r'path to\exe', '--flag-"quoted"'],
+ [r'path to/exe', '"--flag-quoted"'],
+ [r'path to\exe', '"--flag-quoted"'],
+]
+
+
+@pytest.fixture(params=[
+ _shell_utils.WindowsParser,
+ _shell_utils.PosixParser
+])
+def Parser(request):
+ return request.param
+
+
+@pytest.fixture
+def runner(Parser):
+ if Parser != _shell_utils.NativeParser:
+ pytest.skip('Unable to run with non-native parser')
+
+ if Parser == _shell_utils.WindowsParser:
+ return lambda cmd: subprocess.check_output(cmd)
+ elif Parser == _shell_utils.PosixParser:
+ # posix has no non-shell string parsing
+ return lambda cmd: subprocess.check_output(cmd, shell=True)
+ else:
+ raise NotImplementedError
+
+
+@pytest.mark.parametrize('argv', argv_cases)
+def test_join_matches_subprocess(Parser, runner, argv):
+ """
+ Test that join produces strings understood by subprocess
+ """
+ # invoke python to return its arguments as json
+ cmd = [
+ sys.executable, '-c',
+ 'import json, sys; print(json.dumps(sys.argv[1:]))'
+ ]
+ joined = Parser.join(cmd + argv)
+ json_out = runner(joined).decode()
+ assert json.loads(json_out) == argv
+
+
+@pytest.mark.parametrize('argv', argv_cases)
+def test_roundtrip(Parser, argv):
+ """
+ Test that split is the inverse operation of join
+ """
+ try:
+ joined = Parser.join(argv)
+ assert argv == Parser.split(joined)
+ except NotImplementedError:
+ pytest.skip("Not implemented")
diff --git a/numpy/distutils/tests/test_system_info.py b/numpy/distutils/tests/test_system_info.py
index 4aec13c82..3c7638960 100644
--- a/numpy/distutils/tests/test_system_info.py
+++ b/numpy/distutils/tests/test_system_info.py
@@ -7,10 +7,12 @@ from tempfile import mkstemp, mkdtemp
from subprocess import Popen, PIPE
from distutils.errors import DistutilsError
+from numpy.testing import assert_, assert_equal, assert_raises
from numpy.distutils import ccompiler, customized_ccompiler
-from numpy.testing import assert_, assert_equal
from numpy.distutils.system_info import system_info, ConfigParser
+from numpy.distutils.system_info import AliasedOptionError
from numpy.distutils.system_info import default_lib_dirs, default_include_dirs
+from numpy.distutils import _shell_utils
def get_class(name, notfound_action=1):
@@ -21,7 +23,8 @@ def get_class(name, notfound_action=1):
2 - raise error
"""
cl = {'temp1': Temp1Info,
- 'temp2': Temp2Info
+ 'temp2': Temp2Info,
+ 'duplicate_options': DuplicateOptionInfo,
}.get(name.lower(), _system_info)
return cl()
@@ -29,7 +32,7 @@ simple_site = """
[ALL]
library_dirs = {dir1:s}{pathsep:s}{dir2:s}
libraries = {lib1:s},{lib2:s}
-extra_compile_args = -I/fake/directory
+extra_compile_args = -I/fake/directory -I"/path with/spaces" -Os
runtime_library_dirs = {dir1:s}
[temp1]
@@ -40,8 +43,12 @@ runtime_library_dirs = {dir1:s}
[temp2]
library_dirs = {dir2:s}
libraries = {lib2:s}
-extra_link_args = -Wl,-rpath={lib2:s}
+extra_link_args = -Wl,-rpath={lib2_escaped:s}
rpath = {dir2:s}
+
+[duplicate_options]
+mylib_libs = {lib1:s}
+libraries = {lib2:s}
"""
site_cfg = simple_site
@@ -118,6 +125,10 @@ class Temp2Info(_system_info):
"""For testing purposes"""
section = 'temp2'
+class DuplicateOptionInfo(_system_info):
+ """For testing purposes"""
+ section = 'duplicate_options'
+
class TestSystemInfoReading(object):
@@ -137,7 +148,8 @@ class TestSystemInfoReading(object):
'lib1': self._lib1,
'dir2': self._dir2,
'lib2': self._lib2,
- 'pathsep': os.pathsep
+ 'pathsep': os.pathsep,
+ 'lib2_escaped': _shell_utils.NativeParser.join([self._lib2])
})
# Write site.cfg
fd, self._sitecfg = mkstemp()
@@ -158,6 +170,9 @@ class TestSystemInfoReading(object):
self.c_default = site_and_parse(get_class('default'), self._sitecfg)
self.c_temp1 = site_and_parse(get_class('temp1'), self._sitecfg)
self.c_temp2 = site_and_parse(get_class('temp2'), self._sitecfg)
+ self.c_dup_options = site_and_parse(get_class('duplicate_options'),
+ self._sitecfg)
+
def teardown(self):
# Do each removal separately
@@ -181,7 +196,7 @@ class TestSystemInfoReading(object):
assert_equal(tsi.get_libraries(), [self._lib1, self._lib2])
assert_equal(tsi.get_runtime_lib_dirs(), [self._dir1])
extra = tsi.calc_extra_info()
- assert_equal(extra['extra_compile_args'], ['-I/fake/directory'])
+ assert_equal(extra['extra_compile_args'], ['-I/fake/directory', '-I/path with/spaces', '-Os'])
def test_temp1(self):
# Read in all information in the temp1 block
@@ -200,6 +215,13 @@ class TestSystemInfoReading(object):
extra = tsi.calc_extra_info()
assert_equal(extra['extra_link_args'], ['-Wl,-rpath=' + self._lib2])
+ def test_duplicate_options(self):
+ # Ensure that duplicates are raising an AliasedOptionError
+ tsi = self.c_dup_options
+ assert_raises(AliasedOptionError, tsi.get_option_single, "mylib_libs", "libraries")
+ assert_equal(tsi.get_libs("mylib_libs", [self._lib1]), [self._lib1])
+ assert_equal(tsi.get_libs("libraries", [self._lib2]), [self._lib2])
+
@pytest.mark.skipif(not HAVE_COMPILER, reason="Missing compiler")
def test_compile1(self):
# Compile source and link the first source
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index c87a40ccd..1871512bf 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -260,6 +260,45 @@ identical behaviour between arrays and scalars, irrespective of whether the
value is inside an array or not. NumPy scalars also have many of the same
methods arrays do.
+Overflow Errors
+===============
+
+The fixed size of NumPy numeric types may cause overflow errors when a value
+requires more memory than available in the data type. For example,
+`numpy.power` evaluates ``100 * 10 ** 8`` correctly for 64-bit integers,
+but gives 1874919424 (incorrect) for a 32-bit integer.
+
+ >>> np.power(100, 8, dtype=np.int64)
+ 10000000000000000
+ >>> np.power(100, 8, dtype=np.int32)
+ 1874919424
+
+The behaviour of NumPy and Python integer types differs significantly for
+integer overflows and may confuse users expecting NumPy integers to behave
+similar to Python's ``int``. Unlike NumPy, the size of Python's ``int`` is
+flexible. This means Python integers may expand to accommodate any integer and
+will not overflow.
+
+NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
+minimum or maximum values of NumPy integer and floating point values
+respectively ::
+
+ >>> np.iinfo(np.int) # Bounds of the default integer on this system.
+ iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
+ >>> np.iinfo(np.int32) # Bounds of a 32-bit integer
+ iinfo(min=-2147483648, max=2147483647, dtype=int32)
+ >>> np.iinfo(np.int64) # Bounds of a 64-bit integer
+ iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
+
+If 64-bit integers are still too small the result may be cast to a
+floating point number. Floating point numbers offer a larger, but inexact,
+range of possible values.
+
+ >>> np.power(100, 100, dtype=np.int64) # Incorrect even with 64-bit int
+ 0
+ >>> np.power(100, 100, dtype=np.float64)
+ 1e+200
+
Extended Precision
==================
@@ -275,8 +314,8 @@ compiler's ``long double`` available as ``np.longdouble`` (and
``np.clongdouble`` for the complex numbers). You can find out what your
numpy provides with ``np.finfo(np.longdouble)``.
-NumPy does not provide a dtype with more precision than C
-``long double``\\s; in particular, the 128-bit IEEE quad precision
+NumPy does not provide a dtype with more precision than C's
+``long double``\\; in particular, the 128-bit IEEE quad precision
data type (FORTRAN's ``REAL*16``\\) is not available.
For efficient memory alignment, ``np.longdouble`` is usually stored
diff --git a/numpy/doc/broadcasting.py b/numpy/doc/broadcasting.py
index 6c3a4bc75..cb548a0d0 100644
--- a/numpy/doc/broadcasting.py
+++ b/numpy/doc/broadcasting.py
@@ -3,6 +3,12 @@
Broadcasting over arrays
========================
+.. note::
+ See `this article
+ <https://numpy.org/devdocs/user/theory.broadcasting.html>`_
+ for illustrations of broadcasting concepts.
+
+
The term broadcasting describes how numpy treats arrays with different
shapes during arithmetic operations. Subject to certain constraints,
the smaller array is "broadcast" across the larger array so that they
@@ -36,7 +42,7 @@ We can think of the scalar ``b`` being *stretched* during the arithmetic
operation into an array with the same shape as ``a``. The new elements in
``b`` are simply copies of the original scalar. The stretching analogy is
only conceptual. NumPy is smart enough to use the original scalar value
-without actually making copies, so that broadcasting operations are as
+without actually making copies so that broadcasting operations are as
memory and computationally efficient as possible.
The code in the second example is more efficient than that in the first
@@ -46,7 +52,7 @@ because broadcasting moves less memory around during the multiplication
General Broadcasting Rules
==========================
When operating on two arrays, NumPy compares their shapes element-wise.
-It starts with the trailing dimensions, and works its way forward. Two
+It starts with the trailing dimensions and works its way forward. Two
dimensions are compatible when
1) they are equal, or
@@ -55,8 +61,7 @@ dimensions are compatible when
If these conditions are not met, a
``ValueError: operands could not be broadcast together`` exception is
thrown, indicating that the arrays have incompatible shapes. The size of
-the resulting array is the maximum size along each dimension of the input
-arrays.
+the resulting array is the size that is not 1 along each axis of the inputs.
Arrays do not need to have the same *number* of dimensions. For example,
if you have a ``256x256x3`` array of RGB values, and you want to scale
@@ -172,8 +177,5 @@ Here the ``newaxis`` index operator inserts a new axis into ``a``,
making it a two-dimensional ``4x1`` array. Combining the ``4x1`` array
with ``b``, which has shape ``(3,)``, yields a ``4x3`` array.
-See `this article <https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc>`_
-for illustrations of broadcasting concepts.
-
"""
from __future__ import division, absolute_import, print_function
diff --git a/numpy/doc/byteswapping.py b/numpy/doc/byteswapping.py
index f9491ed43..7a749c8d5 100644
--- a/numpy/doc/byteswapping.py
+++ b/numpy/doc/byteswapping.py
@@ -31,16 +31,16 @@ Let's say the two integers were in fact 1 and 770. Because 770 = 256 *
3 + 2, the 4 bytes in memory would contain respectively: 0, 1, 3, 2.
The bytes I have loaded from the file would have these contents:
->>> big_end_str = chr(0) + chr(1) + chr(3) + chr(2)
->>> big_end_str
-'\\x00\\x01\\x03\\x02'
+>>> big_end_buffer = bytearray([0,1,3,2])
+>>> big_end_buffer
+bytearray(b'\\x00\\x01\\x03\\x02')
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
there are two integers, and that they are 16 bit and big-endian:
>>> import numpy as np
->>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_str)
+>>> big_end_arr = np.ndarray(shape=(2,),dtype='>i2', buffer=big_end_buffer)
>>> big_end_arr[0]
1
>>> big_end_arr[1]
@@ -53,7 +53,7 @@ integer, the dtype string would be ``<u4``.
In fact, why don't we try that?
->>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_str)
+>>> little_end_u4 = np.ndarray(shape=(1,),dtype='<u4', buffer=big_end_buffer)
>>> little_end_u4[0] == 1 * 256**1 + 3 * 256**2 + 2 * 256**3
True
@@ -97,7 +97,7 @@ Data and dtype endianness don't match, change dtype to match data
We make something where they don't match:
->>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_str)
+>>> wrong_end_dtype_arr = np.ndarray(shape=(2,),dtype='<i2', buffer=big_end_buffer)
>>> wrong_end_dtype_arr[0]
256
@@ -110,7 +110,7 @@ the correct endianness:
Note the array has not changed in memory:
->>> fixed_end_dtype_arr.tobytes() == big_end_str
+>>> fixed_end_dtype_arr.tobytes() == big_end_buffer
True
Data and type endianness don't match, change data to match dtype
@@ -126,7 +126,7 @@ that needs a certain byte ordering.
Now the array *has* changed in memory:
->>> fixed_end_mem_arr.tobytes() == big_end_str
+>>> fixed_end_mem_arr.tobytes() == big_end_buffer
False
Data and dtype endianness match, swap data and dtype
@@ -140,7 +140,7 @@ the previous operations:
>>> swapped_end_arr = big_end_arr.byteswap().newbyteorder()
>>> swapped_end_arr[0]
1
->>> swapped_end_arr.tobytes() == big_end_str
+>>> swapped_end_arr.tobytes() == big_end_buffer
False
An easier way of casting the data to a specific dtype and byte ordering
@@ -149,7 +149,7 @@ can be achieved with the ndarray astype method:
>>> swapped_end_arr = big_end_arr.astype('<i2')
>>> swapped_end_arr[0]
1
->>> swapped_end_arr.tobytes() == big_end_str
+>>> swapped_end_arr.tobytes() == big_end_buffer
False
"""
diff --git a/numpy/doc/constants.py b/numpy/doc/constants.py
index 21c7a3c67..72793e44d 100644
--- a/numpy/doc/constants.py
+++ b/numpy/doc/constants.py
@@ -22,53 +22,51 @@ constants = []
def add_newdoc(module, name, doc):
constants.append((name, doc))
-add_newdoc('numpy', 'Inf',
+add_newdoc('numpy', 'pi',
"""
- IEEE 754 floating point representation of (positive) infinity.
-
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
+ ``pi = 3.1415926535897932384626433...``
- See Also
- --------
- inf
+ References
+ ----------
+ https://en.wikipedia.org/wiki/Pi
""")
-add_newdoc('numpy', 'Infinity',
+add_newdoc('numpy', 'e',
"""
- IEEE 754 floating point representation of (positive) infinity.
+ Euler's constant, base of natural logarithms, Napier's constant.
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
+ ``e = 2.71828182845904523536028747135266249775724709369995...``
See Also
--------
- inf
+ exp : Exponential function
+ log : Natural logarithm
+
+ References
+ ----------
+ https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
""")
-add_newdoc('numpy', 'NAN',
+add_newdoc('numpy', 'euler_gamma',
"""
- IEEE 754 floating point representation of Not a Number (NaN).
-
- `NaN` and `NAN` are equivalent definitions of `nan`. Please use
- `nan` instead of `NAN`.
+ ``γ = 0.5772156649015328606065120900824024310421...``
- See Also
- --------
- nan
+ References
+ ----------
+ https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
""")
-add_newdoc('numpy', 'NINF',
+add_newdoc('numpy', 'inf',
"""
- IEEE 754 floating point representation of negative infinity.
+ IEEE 754 floating point representation of (positive) infinity.
Returns
-------
y : float
- A floating point representation of negative infinity.
+ A floating point representation of positive infinity.
See Also
--------
@@ -90,12 +88,96 @@ add_newdoc('numpy', 'NINF',
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
+ `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
+
Examples
--------
- >>> np.NINF
- -inf
- >>> np.log(0)
- -inf
+ >>> np.inf
+ inf
+ >>> np.array([1]) / 0.
+ array([ Inf])
+
+ """)
+
+add_newdoc('numpy', 'nan',
+ """
+ IEEE 754 floating point representation of Not a Number (NaN).
+
+ Returns
+ -------
+ y : A floating point representation of Not a Number.
+
+ See Also
+ --------
+ isnan : Shows which elements are Not a Number.
+
+ isfinite : Shows which elements are finite (not one of
+ Not a Number, positive infinity and negative infinity)
+
+ Notes
+ -----
+ NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
+ (IEEE 754). This means that Not a Number is not equivalent to infinity.
+
+ `NaN` and `NAN` are aliases of `nan`.
+
+ Examples
+ --------
+ >>> np.nan
+ nan
+ >>> np.log(-1)
+ nan
+ >>> np.log([-1, 1, 2])
+ array([ NaN, 0. , 0.69314718])
+
+ """)
+
+add_newdoc('numpy', 'newaxis',
+ """
+ A convenient alias for None, useful for indexing arrays.
+
+ See Also
+ --------
+ `numpy.doc.indexing`
+
+ Examples
+ --------
+ >>> newaxis is None
+ True
+ >>> x = np.arange(3)
+ >>> x
+ array([0, 1, 2])
+ >>> x[:, newaxis]
+ array([[0],
+ [1],
+ [2]])
+ >>> x[:, newaxis, newaxis]
+ array([[[0]],
+ [[1]],
+ [[2]]])
+ >>> x[:, newaxis] * x
+ array([[0, 0, 0],
+ [0, 1, 2],
+ [0, 2, 4]])
+
+ Outer product, same as ``outer(x, y)``:
+
+ >>> y = np.arange(3, 6)
+ >>> x[:, newaxis] * y
+ array([[ 0, 0, 0],
+ [ 3, 4, 5],
+ [ 6, 8, 10]])
+
+ ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
+
+ >>> x[newaxis, :].shape
+ (1, 3)
+ >>> x[newaxis].shape
+ (1, 3)
+ >>> x[None].shape
+ (1, 3)
+ >>> x[:, newaxis].shape
+ (3, 1)
""")
@@ -144,32 +226,6 @@ add_newdoc('numpy', 'NZERO',
""")
-add_newdoc('numpy', 'NaN',
- """
- IEEE 754 floating point representation of Not a Number (NaN).
-
- `NaN` and `NAN` are equivalent definitions of `nan`. Please use
- `nan` instead of `NaN`.
-
- See Also
- --------
- nan
-
- """)
-
-add_newdoc('numpy', 'PINF',
- """
- IEEE 754 floating point representation of (positive) infinity.
-
- Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
- `inf`. For more details, see `inf`.
-
- See Also
- --------
- inf
-
- """)
-
add_newdoc('numpy', 'PZERO',
"""
IEEE 754 floating point representation of positive zero.
@@ -215,31 +271,40 @@ add_newdoc('numpy', 'PZERO',
""")
-add_newdoc('numpy', 'e',
+add_newdoc('numpy', 'NAN',
"""
- Euler's constant, base of natural logarithms, Napier's constant.
+ IEEE 754 floating point representation of Not a Number (NaN).
- ``e = 2.71828182845904523536028747135266249775724709369995...``
+ `NaN` and `NAN` are equivalent definitions of `nan`. Please use
+ `nan` instead of `NAN`.
See Also
--------
- exp : Exponential function
- log : Natural logarithm
+ nan
- References
- ----------
- https://en.wikipedia.org/wiki/E_%28mathematical_constant%29
+ """)
+
+add_newdoc('numpy', 'NaN',
+ """
+ IEEE 754 floating point representation of Not a Number (NaN).
+
+ `NaN` and `NAN` are equivalent definitions of `nan`. Please use
+ `nan` instead of `NaN`.
+
+ See Also
+ --------
+ nan
""")
-add_newdoc('numpy', 'inf',
+add_newdoc('numpy', 'NINF',
"""
- IEEE 754 floating point representation of (positive) infinity.
+ IEEE 754 floating point representation of negative infinity.
Returns
-------
y : float
- A floating point representation of positive infinity.
+ A floating point representation of negative infinity.
See Also
--------
@@ -261,18 +326,16 @@ add_newdoc('numpy', 'inf',
Also that positive infinity is not equivalent to negative infinity. But
infinity is equivalent to positive infinity.
- `Inf`, `Infinity`, `PINF` and `infty` are aliases for `inf`.
-
Examples
--------
- >>> np.inf
- inf
- >>> np.array([1]) / 0.
- array([ Inf])
+ >>> np.NINF
+ -inf
+ >>> np.log(0)
+ -inf
""")
-add_newdoc('numpy', 'infty',
+add_newdoc('numpy', 'PINF',
"""
IEEE 754 floating point representation of (positive) infinity.
@@ -285,108 +348,46 @@ add_newdoc('numpy', 'infty',
""")
-add_newdoc('numpy', 'nan',
+add_newdoc('numpy', 'infty',
"""
- IEEE 754 floating point representation of Not a Number (NaN).
+ IEEE 754 floating point representation of (positive) infinity.
- Returns
- -------
- y : A floating point representation of Not a Number.
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
See Also
--------
- isnan : Shows which elements are Not a Number.
-
- isfinite : Shows which elements are finite (not one of
- Not a Number, positive infinity and negative infinity)
-
- Notes
- -----
- NumPy uses the IEEE Standard for Binary Floating-Point for Arithmetic
- (IEEE 754). This means that Not a Number is not equivalent to infinity.
-
- `NaN` and `NAN` are aliases of `nan`.
-
- Examples
- --------
- >>> np.nan
- nan
- >>> np.log(-1)
- nan
- >>> np.log([-1, 1, 2])
- array([ NaN, 0. , 0.69314718])
+ inf
""")
-add_newdoc('numpy', 'newaxis',
+add_newdoc('numpy', 'Inf',
"""
- A convenient alias for None, useful for indexing arrays.
+ IEEE 754 floating point representation of (positive) infinity.
- See Also
- --------
- `numpy.doc.indexing`
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
- Examples
+ See Also
--------
- >>> newaxis is None
- True
- >>> x = np.arange(3)
- >>> x
- array([0, 1, 2])
- >>> x[:, newaxis]
- array([[0],
- [1],
- [2]])
- >>> x[:, newaxis, newaxis]
- array([[[0]],
- [[1]],
- [[2]]])
- >>> x[:, newaxis] * x
- array([[0, 0, 0],
- [0, 1, 2],
- [0, 2, 4]])
-
- Outer product, same as ``outer(x, y)``:
-
- >>> y = np.arange(3, 6)
- >>> x[:, newaxis] * y
- array([[ 0, 0, 0],
- [ 3, 4, 5],
- [ 6, 8, 10]])
-
- ``x[newaxis, :]`` is equivalent to ``x[newaxis]`` and ``x[None]``:
-
- >>> x[newaxis, :].shape
- (1, 3)
- >>> x[newaxis].shape
- (1, 3)
- >>> x[None].shape
- (1, 3)
- >>> x[:, newaxis].shape
- (3, 1)
+ inf
""")
-add_newdoc('numpy', 'pi',
+add_newdoc('numpy', 'Infinity',
"""
- ``pi = 3.1415926535897932384626433...``
-
- References
- ----------
- https://en.wikipedia.org/wiki/Pi
-
- """)
+ IEEE 754 floating point representation of (positive) infinity.
-add_newdoc('numpy', 'euler_gamma',
- """
- ``γ = 0.5772156649015328606065120900824024310421...``
+ Use `inf` because `Inf`, `Infinity`, `PINF` and `infty` are aliases for
+ `inf`. For more details, see `inf`.
- References
- ----------
- https://en.wikipedia.org/wiki/Euler-Mascheroni_constant
+ See Also
+ --------
+ inf
""")
+
if __doc__:
constants_str = []
constants.sort()
diff --git a/numpy/doc/dispatch.py b/numpy/doc/dispatch.py
new file mode 100644
index 000000000..c9029941b
--- /dev/null
+++ b/numpy/doc/dispatch.py
@@ -0,0 +1,271 @@
+""".. _dispatch_mechanism:
+
+Numpy's dispatch mechanism, introduced in numpy version v1.16 is the
+recommended approach for writing custom N-dimensional array containers that are
+compatible with the numpy API and provide custom implementations of numpy
+functionality. Applications include `dask <http://dask.pydata.org>`_ arrays, an
+N-dimensional array distributed across multiple nodes, and `cupy
+<https://docs-cupy.chainer.org/en/stable/>`_ arrays, an N-dimensional array on
+a GPU.
+
+To get a feel for writing custom array containers, we'll begin with a simple
+example that has rather narrow utility but illustrates the concepts involved.
+
+>>> import numpy as np
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+...
+
+Our custom array can be instantiated like:
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr
+DiagonalArray(N=5, value=1)
+
+We can convert to a numpy array using :func:`numpy.array` or
+:func:`numpy.asarray`, which will call its ``__array__`` method to obtain a
+standard ``numpy.ndarray``.
+
+>>> np.asarray(arr)
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+If we operate on ``arr`` with a numpy function, numpy will again use the
+``__array__`` interface to convert it to an array and then apply the function
+in the usual way.
+
+>>> np.multiply(arr, 2)
+array([[2., 0., 0., 0., 0.],
+ [0., 2., 0., 0., 0.],
+ [0., 0., 2., 0., 0.],
+ [0., 0., 0., 2., 0.],
+ [0., 0., 0., 0., 2.]])
+
+
+Notice that the return type is a standard ``numpy.ndarray``.
+
+>>> type(arr)
+numpy.ndarray
+
+How can we pass our custom array type through this function? Numpy allows a
+class to indicate that it would like to handle computations in a custom-defined
+way through the interaces ``__array_ufunc__`` and ``__array_function__``. Let's
+take one at a time, starting with ``_array_ufunc__``. This method covers
+:ref:`ufuncs`, a class of functions that includes, for example,
+:func:`numpy.multiply` and :func:`numpy.sin`.
+
+The ``__array_ufunc__`` receives:
+
+- ``ufunc``, a function like ``numpy.multiply``
+- ``method``, a string, differentiating between ``numpy.multiply(...)`` and
+ variants like ``numpy.multiply.outer``, ``numpy.multiply.accumulate``, and so
+ on. For the common case, ``numpy.multiply(...)``, ``method == '__call__'``.
+- ``inputs``, which could be a mixture of different types
+- ``kwargs``, keyword arguments passed to the function
+
+For this example we will only handle the method ``__call__``.
+
+>>> from numbers import Number
+>>> class DiagonalArray:
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+...
+
+Now our custom array type passes through numpy functions.
+
+>>> arr = DiagonalArray(5, 1)
+>>> np.multiply(arr, 3)
+DiagonalArray(N=5, value=3)
+>>> np.add(arr, 3)
+DiagonalArray(N=5, value=4)
+>>> np.sin(arr)
+DiagonalArray(N=5, value=0.8414709848078965)
+
+At this point ``arr + 3`` does not work.
+
+>>> arr + 3
+TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
+
+To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
+and so on to dispatch to the corresponding ufunc. We can achieve this
+conveniently by inheriting from the mixin
+:class:`~numpy.lib.mixins.NDArrayOperatorsMixin`.
+
+>>> import numpy.lib.mixins
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+...
+
+>>> arr = DiagonalArray(5, 1)
+>>> arr + 3
+DiagonalArray(N=5, value=4)
+>>> arr > 0
+DiagonalArray(N=5, value=True)
+
+Now let's tackle ``__array_function__``. We'll create dict that maps numpy
+functions to our custom variants.
+
+>>> HANDLED_FUNCTIONS = {}
+>>> class DiagonalArray(numpy.lib.mixins.NDArrayOperatorsMixin):
+... def __init__(self, N, value):
+... self._N = N
+... self._i = value
+... def __repr__(self):
+... return f"{self.__class__.__name__}(N={self._N}, value={self._i})"
+... def __array__(self):
+... return self._i * np.eye(self._N)
+... def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+... if method == '__call__':
+... N = None
+... scalars = []
+... for input in inputs:
+... # In this case we accept only scalar numbers or DiagonalArrays.
+... if isinstance(input, Number):
+... scalars.append(input)
+... elif isinstance(input, self.__class__):
+... scalars.append(input._i)
+... if N is not None:
+... if N != self._N:
+... raise TypeError("inconsistent sizes")
+... else:
+... N = self._N
+... else:
+... return NotImplemented
+... return self.__class__(N, ufunc(*scalars, **kwargs))
+... else:
+... return NotImplemented
+... def __array_function__(self, func, types, args, kwargs):
+... if func not in HANDLED_FUNCTIONS:
+... return NotImplemented
+... # Note: this allows subclasses that don't override
+... # __array_function__ to handle DiagonalArray objects.
+... if not all(issubclass(t, self.__class__) for t in types):
+... return NotImplemented
+... return HANDLED_FUNCTIONS[func](*args, **kwargs)
+...
+
+A convenient pattern is to define a decorator ``implements`` that can be used
+to add functions to ``HANDLED_FUNCTIONS``.
+
+>>> def implements(np_function):
+... "Register an __array_function__ implementation for DiagonalArray objects."
+... def decorator(func):
+... HANDLED_FUNCTIONS[np_function] = func
+... return func
+... return decorator
+...
+
+Now we write implementations of numpy functions for ``DiagonalArray``.
+For completeness, to support the usage ``arr.sum()`` add a method ``sum`` that
+calls ``numpy.sum(self)``, and the same for ``mean``.
+
+>>> @implements(np.sum)
+... def sum(arr):
+... "Implementation of np.sum for DiagonalArray objects"
+... return arr._i * arr._N
+...
+>>> @implements(np.mean)
+... def mean(arr):
+... "Implementation of np.mean for DiagonalArray objects"
+... return arr._i / arr._N
+...
+>>> arr = DiagonalArray(5, 1)
+>>> np.sum(arr)
+5
+>>> np.mean(arr)
+0.2
+
+If the user tries to use any numpy functions not included in
+``HANDLED_FUNCTIONS``, a ``TypeError`` will be raised by numpy, indicating that
+this operation is not supported. For example, concatenating two
+``DiagonalArrays`` does not produce another diagonal array, so it is not
+supported.
+
+>>> np.concatenate([arr, arr])
+TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
+
+Additionally, our implementations of ``sum`` and ``mean`` do not accept the
+optional arguments that numpy's implementation does.
+
+>>> np.sum(arr, axis=0)
+TypeError: sum() got an unexpected keyword argument 'axis'
+
+The user always has the option of converting to a normal ``numpy.ndarray`` with
+:func:`numpy.asarray` and using standard numpy from there.
+
+>>> np.concatenate([np.asarray(arr), np.asarray(arr)])
+array([[1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.],
+ [1., 0., 0., 0., 0.],
+ [0., 1., 0., 0., 0.],
+ [0., 0., 1., 0., 0.],
+ [0., 0., 0., 1., 0.],
+ [0., 0., 0., 0., 1.]])
+
+Refer to the `dask source code <https://github.com/dask/dask>`_ and
+`cupy source code <https://github.com/cupy/cupy>`_ for more fully-worked
+examples of custom array containers.
+
+See also `NEP 18 <http://www.numpy.org/neps/nep-0018-array-function-protocol.html>`_.
+"""
diff --git a/numpy/doc/glossary.py b/numpy/doc/glossary.py
index a3b9423a8..7d1c9a1d5 100644
--- a/numpy/doc/glossary.py
+++ b/numpy/doc/glossary.py
@@ -159,7 +159,7 @@ Glossary
field
In a :term:`structured data type`, each sub-type is called a `field`.
- The `field` has a name (a string), a type (any valid :term:`dtype`, and
+ The `field` has a name (a string), a type (any valid dtype, and
an optional `title`. See :ref:`arrays.dtypes`
Fortran order
@@ -209,6 +209,9 @@ Glossary
Key 1: b
Key 2: c
+ itemsize
+ The size of the dtype element in bytes.
+
list
A Python container that can hold any number of objects or items.
The items do not have to be of the same type, and can even be
@@ -270,13 +273,11 @@ Glossary
masked_array(data = [-- 2.0 --],
mask = [ True False True],
fill_value = 1e+20)
- <BLANKLINE>
>>> x + [1, 2, 3]
masked_array(data = [-- 4.0 --],
mask = [ True False True],
fill_value = 1e+20)
- <BLANKLINE>
Masked arrays are often used when operating on arrays containing
@@ -347,31 +348,31 @@ Glossary
Painting the city red!
slice
- Used to select only certain elements from a sequence::
+ Used to select only certain elements from a sequence:
- >>> x = range(5)
- >>> x
- [0, 1, 2, 3, 4]
+ >>> x = range(5)
+ >>> x
+ [0, 1, 2, 3, 4]
- >>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
- [1, 2]
+ >>> x[1:3] # slice from 1 to 3 (excluding 3 itself)
+ [1, 2]
- >>> x[1:5:2] # slice from 1 to 5, but skipping every second element
- [1, 3]
+ >>> x[1:5:2] # slice from 1 to 5, but skipping every second element
+ [1, 3]
- >>> x[::-1] # slice a sequence in reverse
- [4, 3, 2, 1, 0]
+ >>> x[::-1] # slice a sequence in reverse
+ [4, 3, 2, 1, 0]
Arrays may have more than one dimension, each which can be sliced
- individually::
+ individually:
- >>> x = np.array([[1, 2], [3, 4]])
- >>> x
- array([[1, 2],
- [3, 4]])
+ >>> x = np.array([[1, 2], [3, 4]])
+ >>> x
+ array([[1, 2],
+ [3, 4]])
- >>> x[:, 1]
- array([2, 4])
+ >>> x[:, 1]
+ array([2, 4])
structure
See :term:`structured data type`
@@ -379,6 +380,20 @@ Glossary
structured data type
A data type composed of other datatypes
+ subarray data type
+ A :term:`structured data type` may contain a :term:`ndarray` with its
+ own dtype and shape:
+
+ >>> dt = np.dtype([('a', np.int32), ('b', np.float32, (3,))])
+ >>> np.zeros(3, dtype=dt)
+ array([(0, [0., 0., 0.]), (0, [0., 0., 0.]), (0, [0., 0., 0.])],
+ dtype=[('a', '<i4'), ('b', '<f4', (3,))])
+
+ title
+ In addition to field names, structured array fields may have an
+ associated :ref:`title <titles>` which is an alias to the name and is
+ commonly used for plotting.
+
tuple
A sequence that may contain a variable number of types of any
kind. A tuple is immutable, i.e., once constructed it cannot be
@@ -415,8 +430,19 @@ Glossary
'alpha'
ufunc
- Universal function. A fast element-wise array operation. Examples include
- ``add``, ``sin`` and ``logical_or``.
+ Universal function. A fast element-wise, :term:`vectorized
+ <vectorization>` array operation. Examples include ``add``, ``sin`` and
+ ``logical_or``.
+
+ vectorization
+ Optimizing a looping block by specialized code. In a traditional sense,
+ vectorization performs the same operation on multiple elements with
+ fixed strides between them via specialized hardware. Compilers know how
+ to take advantage of well-constructed loops to implement such
+ optimizations. NumPy uses :ref:`vectorization <whatis-vectorization>`
+ to mean any optimization via specialized code performing the same
+ operations on multiple elements, typically achieving speedups by
+ avoiding some of the overhead in looking up and converting the elements.
view
An array that does not own its data, but refers to another array's
diff --git a/numpy/doc/indexing.py b/numpy/doc/indexing.py
index 087a688bc..676015668 100644
--- a/numpy/doc/indexing.py
+++ b/numpy/doc/indexing.py
@@ -1,4 +1,5 @@
-"""==============
+"""
+==============
Array indexing
==============
@@ -93,7 +94,9 @@ well. A few examples illustrates best: ::
[21, 24, 27]])
Note that slices of arrays do not copy the internal array data but
-only produce new views of the original data.
+only produce new views of the original data. This is different from
+list or tuple slicing and an explicit ``copy()`` is recommended if
+the original data is not required anymore.
It is possible to index arrays with other arrays for the purposes of
selecting lists of values out of arrays into new arrays. There are
@@ -105,7 +108,7 @@ arrays and thus greatly improve performance.
It is possible to use special features to effectively increase the
number of dimensions in an array through indexing so the resulting
-array aquires the shape needed for use in an expression or with a
+array acquires the shape needed for use in an expression or with a
specific function.
Index arrays
@@ -293,6 +296,13 @@ to produce a resultant array of shape (3,2).
Likewise, slicing can be combined with broadcasted boolean indices: ::
+ >>> b = y > 20
+ >>> b
+ array([[False, False, False, False, False, False, False],
+ [False, False, False, False, False, False, False],
+ [False, False, False, False, False, False, False],
+ [ True, True, True, True, True, True, True],
+ [ True, True, True, True, True, True, True]])
>>> y[b[:,5],1:3]
array([[22, 23],
[29, 30]])
diff --git a/numpy/doc/structured_arrays.py b/numpy/doc/structured_arrays.py
index ab97c5df6..1343d2adc 100644
--- a/numpy/doc/structured_arrays.py
+++ b/numpy/doc/structured_arrays.py
@@ -13,8 +13,8 @@ datatypes organized as a sequence of named :term:`fields <field>`. For example,
>>> x = np.array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
>>> x
- array([('Rex', 9, 81.0), ('Fido', 3, 27.0)],
- dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')])
+ array([('Rex', 9, 81.), ('Fido', 3, 27.)],
+ dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
Here ``x`` is a one-dimensional array of length two whose datatype is a
structure with three fields: 1. A string of length 10 or less named 'name', 2.
@@ -32,37 +32,35 @@ with the field name::
array([9, 3], dtype=int32)
>>> x['age'] = 5
>>> x
- array([('Rex', 5, 81.0), ('Fido', 5, 27.0)],
- dtype=[('name', 'S10'), ('age', '<i4'), ('weight', '<f4')])
-
-Structured arrays are designed for low-level manipulation of structured data,
-for example, for interpreting binary blobs. Structured datatypes are
-designed to mimic 'structs' in the C language, making them also useful for
-interfacing with C code. For these purposes, numpy supports specialized
-features such as subarrays and nested datatypes, and allows manual control over
-the memory layout of the structure.
-
-For simple manipulation of tabular data other pydata projects, such as pandas,
-xarray, or DataArray, provide higher-level interfaces that may be more
-suitable. These projects may also give better performance for tabular data
-analysis because the C-struct-like memory layout of structured arrays can lead
-to poor cache behavior.
+ array([('Rex', 5, 81.), ('Fido', 5, 27.)],
+ dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
+
+Structured datatypes are designed to be able to mimic 'structs' in the C
+language, and share a similar memory layout. They are meant for interfacing with
+C code and for low-level manipulation of structured buffers, for example for
+interpreting binary blobs. For these purposes they support specialized features
+such as subarrays, nested datatypes, and unions, and allow control over the
+memory layout of the structure.
+
+Users looking to manipulate tabular data, such as stored in csv files, may find
+other pydata projects more suitable, such as xarray, pandas, or DataArray.
+These provide a high-level interface for tabular data analysis and are better
+optimized for that use. For instance, the C-struct-like memory layout of
+structured arrays in numpy can lead to poor cache behavior in comparison.
.. _defining-structured-types:
Structured Datatypes
====================
-To use structured arrays one first needs to define a structured datatype.
-
A structured datatype can be thought of as a sequence of bytes of a certain
length (the structure's :term:`itemsize`) which is interpreted as a collection
of fields. Each field has a name, a datatype, and a byte offset within the
structure. The datatype of a field may be any numpy datatype including other
-structured datatypes, and it may also be a :term:`sub-array` which behaves like
-an ndarray of a specified shape. The offsets of the fields are arbitrary, and
-fields may even overlap. These offsets are usually determined automatically by
-numpy, but can also be specified.
+structured datatypes, and it may also be a :term:`subarray data type` which
+behaves like an ndarray of a specified shape. The offsets of the fields are
+arbitrary, and fields may even overlap. These offsets are usually determined
+automatically by numpy, but can also be specified.
Structured Datatype Creation
----------------------------
@@ -81,14 +79,14 @@ summary they are:
convertible to a datatype, and ``shape`` is a tuple of integers specifying
subarray shape.
- >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2,2))])
- dtype=[('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
+ >>> np.dtype([('x', 'f4'), ('y', np.float32), ('z', 'f4', (2, 2))])
+ dtype([('x', '<f4'), ('y', '<f4'), ('z', '<f4', (2, 2))])
If ``fieldname`` is the empty string ``''``, the field will be given a
default name of the form ``f#``, where ``#`` is the integer index of the
field, counting from 0 from the left::
- >>> np.dtype([('x', 'f4'),('', 'i4'),('z', 'i8')])
+ >>> np.dtype([('x', 'f4'), ('', 'i4'), ('z', 'i8')])
dtype([('x', '<f4'), ('f1', '<i4'), ('z', '<i8')])
The byte offsets of the fields within the structure and the total
@@ -102,10 +100,10 @@ summary they are:
automatically, and the field names are given the default names ``f0``,
``f1``, etc. ::
- >>> np.dtype('i8,f4,S3')
+ >>> np.dtype('i8, f4, S3')
dtype([('f0', '<i8'), ('f1', '<f4'), ('f2', 'S3')])
- >>> np.dtype('3int8, float32, (2,3)float64')
- dtype([('f0', 'i1', 3), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
+ >>> np.dtype('3int8, float32, (2, 3)float64')
+ dtype([('f0', 'i1', (3,)), ('f1', '<f4'), ('f2', '<f8', (2, 3))])
3. A dictionary of field parameter arrays
@@ -123,10 +121,10 @@ summary they are:
enough to contain all the fields.
::
- >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4','f4']})
+ >>> np.dtype({'names': ['col1', 'col2'], 'formats': ['i4', 'f4']})
dtype([('col1', '<i4'), ('col2', '<f4')])
>>> np.dtype({'names': ['col1', 'col2'],
- ... 'formats': ['i4','f4'],
+ ... 'formats': ['i4', 'f4'],
... 'offsets': [0, 4],
... 'itemsize': 12})
dtype({'names':['col1','col2'], 'formats':['<i4','<f4'], 'offsets':[0,4], 'itemsize':12})
@@ -151,8 +149,8 @@ summary they are:
because older numpy code may use it. The keys of the dictionary are the
field names and the values are tuples specifying type and offset::
- >>> np.dtype=({'col1': ('i1',0), 'col2': ('f4',1)})
- dtype([(('col1'), 'i1'), (('col2'), '>f4')])
+ >>> np.dtype({'col1': ('i1', 0), 'col2': ('f4', 1)})
+ dtype([('col1', 'i1'), ('col2', '<f4')])
This form is discouraged because Python dictionaries do not preserve order
in Python versions before Python 3.6, and the order of the fields in a
@@ -180,7 +178,9 @@ values are tuples containing the dtype and byte offset of each field. ::
mappingproxy({'x': (dtype('int64'), 0), 'y': (dtype('float32'), 8)})
Both the ``names`` and ``fields`` attributes will equal ``None`` for
-unstructured arrays.
+unstructured arrays. The recommended way to test if a dtype is structured is
+with `if dt.names is not None` rather than `if dt.names`, to account for dtypes
+with 0 fields.
The string representation of a structured datatype is shown in the "list of
tuples" form if possible, otherwise numpy falls back to using the more general
@@ -202,7 +202,7 @@ are contiguous in memory. ::
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
- >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2'))
+ >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2'))
offsets: [0, 1, 2, 6, 7, 15]
itemsize: 17
@@ -215,7 +215,7 @@ in bytes for simple datatypes, see :c:member:`PyArray_Descr.alignment`. The
structure will also have trailing padding added so that its itemsize is a
multiple of the largest field's alignment. ::
- >>> print_offsets(np.dtype('u1,u1,i4,u1,i8,u2', align=True))
+ >>> print_offsets(np.dtype('u1, u1, i4, u1, i8, u2', align=True))
offsets: [0, 1, 4, 8, 16, 24]
itemsize: 32
@@ -231,7 +231,7 @@ each field's offset is a multiple of its size and that the itemsize is a
multiple of the largest field size, and raise an exception if not.
If the offsets of the fields and itemsize of a structured array satisfy the
-alignment conditions, the array will have the ``ALIGNED`` :ref:`flag
+alignment conditions, the array will have the ``ALIGNED`` :attr:`flag
<numpy.ndarray.flags>` set.
A convenience function :func:`numpy.lib.recfunctions.repack_fields` converts an
@@ -255,6 +255,7 @@ string, which will be the field's title and field name respectively. For
example::
>>> np.dtype([(('my title', 'name'), 'f4')])
+ dtype([(('my title', 'name'), '<f4')])
When using the first form of dictionary-based specification, the titles may be
supplied as an extra ``'titles'`` key as described above. When using the second
@@ -263,8 +264,9 @@ providing a 3-element tuple ``(datatype, offset, title)`` instead of the usual
2-element tuple::
>>> np.dtype({'name': ('i4', 0, 'my title')})
+ dtype([(('my title', 'name'), '<i4')])
-The ``dtype.fields`` dictionary will contain :term:`titles` as keys, if any
+The ``dtype.fields`` dictionary will contain titles as keys, if any
titles are used. This means effectively that a field with a title will be
represented twice in the fields dictionary. The tuple values for these fields
will also have a third element, the field title. Because of this, and because
@@ -275,6 +277,8 @@ in::
>>> for name in d.names:
... print(d.fields[name][:2])
+ (dtype('int64'), 0)
+ (dtype('float32'), 8)
Union types
-----------
@@ -305,8 +309,8 @@ in the array, and not a list or array as these will trigger numpy's
broadcasting rules. The tuple's elements are assigned to the successive fields
of the array, from left to right::
- >>> x = np.array([(1,2,3),(4,5,6)], dtype='i8,f4,f8')
- >>> x[1] = (7,8,9)
+ >>> x = np.array([(1, 2, 3), (4, 5, 6)], dtype='i8, f4, f8')
+ >>> x[1] = (7, 8, 9)
>>> x
array([(1, 2., 3.), (7, 8., 9.)],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '<f8')])
@@ -318,14 +322,14 @@ A scalar assigned to a structured element will be assigned to all fields. This
happens when a scalar is assigned to a structured array, or when an
unstructured array is assigned to a structured array::
- >>> x = np.zeros(2, dtype='i8,f4,?,S1')
+ >>> x = np.zeros(2, dtype='i8, f4, ?, S1')
>>> x[:] = 3
>>> x
- array([(3, 3.0, True, b'3'), (3, 3.0, True, b'3')],
+ array([(3, 3., True, b'3'), (3, 3., True, b'3')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
>>> x[:] = np.arange(2)
>>> x
- array([(0, 0.0, False, b'0'), (1, 1.0, True, b'1')],
+ array([(0, 0., False, b'0'), (1, 1., True, b'1')],
dtype=[('f0', '<i8'), ('f1', '<f4'), ('f2', '?'), ('f3', 'S1')])
Structured arrays can also be assigned to unstructured arrays, but only if the
@@ -335,10 +339,9 @@ structured datatype has just a single field::
>>> onefield = np.zeros(2, dtype=[('A', 'i4')])
>>> nostruct = np.zeros(2, dtype='i4')
>>> nostruct[:] = twofield
- ValueError: Can't cast from structure to non-structure, except if the structure only has a single field.
- >>> nostruct[:] = onefield
- >>> nostruct
- array([0, 0], dtype=int32)
+ Traceback (most recent call last):
+ ...
+ TypeError: Cannot cast scalar from dtype([('A', '<i4'), ('B', '<i4')]) to dtype('int32') according to the rule 'unsafe'
Assignment from other Structured Arrays
```````````````````````````````````````
@@ -355,7 +358,7 @@ included in any of the fields are unaffected. ::
>>> b = np.ones(3, dtype=[('x', 'f4'), ('y', 'S3'), ('z', 'O')])
>>> b[:] = a
>>> b
- array([(0.0, b'0.0', b''), (0.0, b'0.0', b''), (0.0, b'0.0', b'')],
+ array([(0., b'0.0', b''), (0., b'0.0', b''), (0., b'0.0', b'')],
dtype=[('x', '<f4'), ('y', 'S3'), ('z', 'O')])
@@ -374,7 +377,7 @@ Accessing Individual Fields
Individual fields of a structured array may be accessed and modified by indexing
the array with the field name. ::
- >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
+ >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> x['foo']
array([1, 3])
>>> x['foo'] = 10
@@ -386,9 +389,9 @@ The resulting array is a view into the original array. It shares the same
memory locations and writing to the view will modify the original array. ::
>>> y = x['bar']
- >>> y[:] = 10
+ >>> y[:] = 11
>>> x
- array([(10, 5.), (10, 5.)],
+ array([(10, 11.), (10, 11.)],
dtype=[('foo', '<i8'), ('bar', '<f4')])
This view has the same dtype and itemsize as the indexed field, so it is
@@ -397,6 +400,15 @@ typically a non-structured array, except in the case of nested structures.
>>> y.dtype, y.shape, y.strides
(dtype('float32'), (2,), (12,))
+If the accessed field is a subarray, the dimensions of the subarray
+are appended to the shape of the result::
+
+ >>> x = np.zeros((2, 2), dtype=[('a', np.int32), ('b', np.float64, (3, 3))])
+ >>> x['a'].shape
+ (2, 2)
+ >>> x['b'].shape
+ (2, 2, 3, 3)
+
Accessing Multiple Fields
```````````````````````````
@@ -404,11 +416,10 @@ One can index and assign to a structured array with a multi-field index, where
the index is a list of field names.
.. warning::
- The behavior of multi-field indexes will change from Numpy 1.15 to Numpy
- 1.16.
+ The behavior of multi-field indexes changed from Numpy 1.15 to Numpy 1.16.
-In Numpy 1.16, the result of indexing with a multi-field index will be a view
-into the original array, as follows::
+The result of indexing with a multi-field index is a view into the original
+array, as follows::
>>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
@@ -417,40 +428,71 @@ into the original array, as follows::
Assignment to the view modifies the original array. The view's fields will be
in the order they were indexed. Note that unlike for single-field indexing, the
-view's dtype has the same itemsize as the original array, and has fields at the
-same offsets as in the original array, and unindexed fields are merely missing.
-
-In Numpy 1.15, indexing an array with a multi-field index returns a copy of
-the result above for 1.16, but with fields packed together in memory as if
-passed through :func:`numpy.lib.recfunctions.repack_fields`. This is the
-behavior since Numpy 1.7.
+dtype of the view has the same itemsize as the original array, and has fields
+at the same offsets as in the original array, and unindexed fields are merely
+missing.
.. warning::
- The new behavior in Numpy 1.16 leads to extra "padding" bytes at the
- location of unindexed fields. You will need to update any code which depends
- on the data having a "packed" layout. For instance code such as::
-
- >>> a[['a','c']].view('i8') # will fail in Numpy 1.16
- ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
-
- will need to be changed. This code has raised a ``FutureWarning`` since
- Numpy 1.12.
-
- The following is a recommended fix, which will behave identically in Numpy
- 1.15 and Numpy 1.16::
-
- >>> from numpy.lib.recfunctions import repack_fields
- >>> repack_fields(a[['a','c']]).view('i8') # supported 1.15 and 1.16
- array([0, 0, 0])
-
-Assigning to an array with a multi-field index will behave the same in Numpy
-1.15 and Numpy 1.16. In both versions the assignment will modify the original
-array::
+ In Numpy 1.15, indexing an array with a multi-field index returned a copy of
+ the result above, but with fields packed together in memory as if
+ passed through :func:`numpy.lib.recfunctions.repack_fields`.
+
+ The new behavior as of Numpy 1.16 leads to extra "padding" bytes at the
+ location of unindexed fields compared to 1.15. You will need to update any
+ code which depends on the data having a "packed" layout. For instance code
+ such as::
+
+ >>> a[['a', 'c']].view('i8') # Fails in Numpy 1.16
+ Traceback (most recent call last):
+ File "<stdin>", line 1, in <module>
+ ValueError: When changing to a smaller dtype, its size must be a divisor of the size of original dtype
+
+ will need to be changed. This code has raised a ``FutureWarning`` since
+ Numpy 1.12, and similar code has raised ``FutureWarning`` since 1.7.
+
+ In 1.16 a number of functions have been introduced in the
+ :mod:`numpy.lib.recfunctions` module to help users account for this
+ change. These are
+ :func:`numpy.lib.recfunctions.repack_fields`.
+ :func:`numpy.lib.recfunctions.structured_to_unstructured`,
+ :func:`numpy.lib.recfunctions.unstructured_to_structured`,
+ :func:`numpy.lib.recfunctions.apply_along_fields`,
+ :func:`numpy.lib.recfunctions.assign_fields_by_name`, and
+ :func:`numpy.lib.recfunctions.require_fields`.
+
+ The function :func:`numpy.lib.recfunctions.repack_fields` can always be
+ used to reproduce the old behavior, as it will return a packed copy of the
+ structured array. The code above, for example, can be replaced with:
+
+ >>> from numpy.lib.recfunctions import repack_fields
+ >>> repack_fields(a[['a', 'c']]).view('i8') # supported in 1.16
+ array([0, 0, 0])
+
+ Furthermore, numpy now provides a new function
+ :func:`numpy.lib.recfunctions.structured_to_unstructured` which is a safer
+ and more efficient alternative for users who wish to convert structured
+ arrays to unstructured arrays, as the view above is often indeded to do.
+ This function allows safe conversion to an unstructured type taking into
+ account padding, often avoids a copy, and also casts the datatypes
+ as needed, unlike the view. Code such as:
+
+ >>> b = np.zeros(3, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')])
+ >>> b[['x', 'z']].view('f4')
+ array([0., 0., 0., 0., 0., 0., 0., 0., 0.], dtype=float32)
+
+ can be made safer by replacing with:
+
+ >>> from numpy.lib.recfunctions import structured_to_unstructured
+ >>> structured_to_unstructured(b[['x', 'z']])
+ array([0, 0, 0])
+
+
+Assignment to an array with a multi-field index modifies the original array::
>>> a[['a', 'c']] = (2, 3)
>>> a
- array([(2, 0, 3.0), (2, 0, 3.0), (2, 0, 3.0)],
- dtype=[('a', '<i8'), ('b', '<i4'), ('c', '<f8')])
+ array([(2, 0, 3.), (2, 0, 3.), (2, 0, 3.)],
+ dtype=[('a', '<i4'), ('b', '<i4'), ('c', '<f4')])
This obeys the structured array assignment rules described above. For example,
this means that one can swap the values of two fields using appropriate
@@ -464,19 +506,19 @@ Indexing with an Integer to get a Structured Scalar
Indexing a single element of a structured array (with an integer index) returns
a structured scalar::
- >>> x = np.array([(1, 2., 3.)], dtype='i,f,f')
+ >>> x = np.array([(1, 2., 3.)], dtype='i, f, f')
>>> scalar = x[0]
>>> scalar
(1, 2., 3.)
>>> type(scalar)
- numpy.void
+ <class 'numpy.void'>
Unlike other numpy scalars, structured scalars are mutable and act like views
into the original array, such that modifying the scalar will modify the
original array. Structured scalars also support access and assignment by field
name::
- >>> x = np.array([(1,2),(3,4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
+ >>> x = np.array([(1, 2), (3, 4)], dtype=[('foo', 'i8'), ('bar', 'f4')])
>>> s = x[0]
>>> s['bar'] = 100
>>> x
@@ -485,7 +527,7 @@ name::
Similarly to tuples, structured scalars can also be indexed with an integer::
- >>> scalar = np.array([(1, 2., 3.)], dtype='i,f,f')[0]
+ >>> scalar = np.array([(1, 2., 3.)], dtype='i, f, f')[0]
>>> scalar[0]
1
>>> scalar[1] = 4
@@ -496,7 +538,7 @@ numpy's integer types. Structured scalars may be converted to a tuple by
calling :func:`ndarray.item`::
>>> scalar.item(), type(scalar.item())
- ((1, 2.0, 3.0), tuple)
+ ((1, 4.0, 3.0), <class 'tuple'>)
Viewing Structured Arrays Containing Objects
--------------------------------------------
@@ -540,24 +582,24 @@ structured scalars obtained from the array.
The simplest way to create a record array is with :func:`numpy.rec.array`::
- >>> recordarr = np.rec.array([(1,2.,'Hello'),(2,3.,"World")],
+ >>> recordarr = np.rec.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'S10')])
>>> recordarr.bar
array([ 2., 3.], dtype=float32)
>>> recordarr[1:2]
- rec.array([(2, 3.0, 'World')],
+ rec.array([(2, 3., b'World')],
dtype=[('foo', '<i4'), ('bar', '<f4'), ('baz', 'S10')])
>>> recordarr[1:2].foo
array([2], dtype=int32)
>>> recordarr.foo[1:2]
array([2], dtype=int32)
>>> recordarr[1].baz
- 'World'
+ b'World'
:func:`numpy.rec.array` can convert a wide variety of arguments into record
arrays, including structured arrays::
- >>> arr = array([(1,2.,'Hello'),(2,3.,"World")],
+ >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'), ('bar', 'f4'), ('baz', 'S10')])
>>> recordarr = np.rec.array(arr)
@@ -566,11 +608,11 @@ creating record arrays, see :ref:`record array creation routines
<routines.array-creation.rec>`.
A record array representation of a structured array can be obtained using the
-appropriate :ref:`view`::
+appropriate `view <numpy-ndarray-view>`_::
- >>> arr = np.array([(1,2.,'Hello'),(2,3.,"World")],
+ >>> arr = np.array([(1, 2., 'Hello'), (2, 3., "World")],
... dtype=[('foo', 'i4'),('bar', 'f4'), ('baz', 'a10')])
- >>> recordarr = arr.view(dtype=dtype((np.record, arr.dtype)),
+ >>> recordarr = arr.view(dtype=np.dtype((np.record, arr.dtype)),
... type=np.recarray)
For convenience, viewing an ndarray as type :class:`np.recarray` will
@@ -590,12 +632,12 @@ recordarr was not a structured type::
Record array fields accessed by index or by attribute are returned as a record
array if the field has a structured type but as a plain ndarray otherwise. ::
- >>> recordarr = np.rec.array([('Hello', (1,2)),("World", (3,4))],
+ >>> recordarr = np.rec.array([('Hello', (1, 2)), ("World", (3, 4))],
... dtype=[('foo', 'S6'),('bar', [('A', int), ('B', int)])])
>>> type(recordarr.foo)
- <type 'numpy.ndarray'>
+ <class 'numpy.ndarray'>
>>> type(recordarr.bar)
- <class 'numpy.core.records.recarray'>
+ <class 'numpy.recarray'>
Note that if a field has the same name as an ndarray attribute, the ndarray
attribute takes precedence. Such fields will be inaccessible by attribute but
diff --git a/numpy/doc/subclassing.py b/numpy/doc/subclassing.py
index 4b983893a..d0685328e 100644
--- a/numpy/doc/subclassing.py
+++ b/numpy/doc/subclassing.py
@@ -118,7 +118,8 @@ For example, consider the following Python code:
def __new__(cls, *args):
print('Cls in __new__:', cls)
print('Args in __new__:', args)
- return object.__new__(cls, *args)
+ # The `object` type __new__ method takes a single argument.
+ return object.__new__(cls)
def __init__(self, *args):
print('type(self) in __init__:', type(self))
diff --git a/numpy/doc/ufuncs.py b/numpy/doc/ufuncs.py
index a112e559c..df2c455ec 100644
--- a/numpy/doc/ufuncs.py
+++ b/numpy/doc/ufuncs.py
@@ -13,9 +13,9 @@ example is the addition operator: ::
>>> np.array([0,2,3,4]) + np.array([1,1,-1,2])
array([1, 3, 2, 6])
-The unfunc module lists all the available ufuncs in numpy. Documentation on
+The ufunc module lists all the available ufuncs in numpy. Documentation on
the specific ufuncs may be found in those modules. This documentation is
-intended to address the more general aspects of unfuncs common to most of
+intended to address the more general aspects of ufuncs common to most of
them. All of the ufuncs that make use of Python operators (e.g., +, -, etc.)
have equivalent functions defined (e.g. add() for +)
diff --git a/numpy/dual.py b/numpy/dual.py
index 3a16a8ec5..651e845bb 100644
--- a/numpy/dual.py
+++ b/numpy/dual.py
@@ -51,14 +51,14 @@ _restore_dict = {}
def register_func(name, func):
if name not in __all__:
- raise ValueError("%s not a dual function." % name)
+ raise ValueError("{} not a dual function.".format(name))
f = sys._getframe(0).f_globals
_restore_dict[name] = f[name]
f[name] = func
def restore_func(name):
if name not in __all__:
- raise ValueError("%s not a dual function." % name)
+ raise ValueError("{} not a dual function.".format(name))
try:
val = _restore_dict[name]
except KeyError:
diff --git a/numpy/f2py/__init__.py b/numpy/f2py/__init__.py
index 23a4b7c41..42e3632fd 100644
--- a/numpy/f2py/__init__.py
+++ b/numpy/f2py/__init__.py
@@ -28,12 +28,16 @@ def compile(source,
extension='.f'
):
"""
- Build extension module from processing source with f2py.
+ Build extension module from a Fortran 77 source string with f2py.
Parameters
----------
- source : str
+ source : str or bytes
Fortran source of module / subroutine to compile
+
+ .. versionchanged:: 1.16.0
+ Accept str as well as bytes
+
modulename : str, optional
The name of the compiled python module
extra_args : str or list, optional
@@ -55,6 +59,16 @@ def compile(source,
.. versionadded:: 1.11.0
+ Returns
+ -------
+ result : int
+ 0 on success
+
+ Examples
+ --------
+ .. include:: compile_session.dat
+ :literal:
+
"""
import tempfile
import shlex
@@ -67,9 +81,11 @@ def compile(source,
else:
fname = source_fn
+ if not isinstance(source, str):
+ source = str(source, 'utf-8')
try:
with open(fname, 'w') as f:
- f.write(str(source))
+ f.write(source)
args = ['-c', '-m', modulename, f.name]
@@ -93,6 +109,7 @@ def compile(source,
output = ''
else:
status = 0
+ output = output.decode()
if verbose:
print(output)
finally:
diff --git a/numpy/f2py/__main__.py b/numpy/f2py/__main__.py
index 6eff41099..708f7f362 100644
--- a/numpy/f2py/__main__.py
+++ b/numpy/f2py/__main__.py
@@ -1,6 +1,6 @@
# See http://cens.ioc.ee/projects/f2py2e/
from __future__ import division, print_function
-from f2py2e import main
+from numpy.f2py.f2py2e import main
main()
diff --git a/numpy/f2py/capi_maps.py b/numpy/f2py/capi_maps.py
index 8e63d3cff..c41dd77c6 100644
--- a/numpy/f2py/capi_maps.py
+++ b/numpy/f2py/capi_maps.py
@@ -718,10 +718,7 @@ def modsign2map(m):
def cb_sign2map(a, var, index=None):
ret = {'varname': a}
- if index is None or 1: # disable 7712 patch
- ret['varname_i'] = ret['varname']
- else:
- ret['varname_i'] = ret['varname'] + '_' + str(index)
+ ret['varname_i'] = ret['varname']
ret['ctype'] = getctype(var)
if ret['ctype'] in c2capi_map:
ret['atype'] = c2capi_map[ret['ctype']]
diff --git a/numpy/f2py/cfuncs.py b/numpy/f2py/cfuncs.py
index d59b6301c..ccb7b3a32 100644
--- a/numpy/f2py/cfuncs.py
+++ b/numpy/f2py/cfuncs.py
@@ -542,7 +542,7 @@ cppmacros[
'ARRSIZE'] = '#define ARRSIZE(dims,rank) (_PyArray_multiply_list(dims,rank))'
cppmacros['OLDPYNUM'] = """\
#ifdef OLDPYNUM
-#error You need to install Numeric Python version 13 or higher. Get it from http:/sourceforge.net/project/?group_id=1369
+#error You need to install NumPy version 13 or higher. See https://scipy.org/install.html
#endif
"""
################# C functions ###############
@@ -1049,8 +1049,10 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg
CFUNCSMESS(\"create_cb_arglist\\n\");
tot=opt=ext=siz=0;
/* Get the total number of arguments */
- if (PyFunction_Check(fun))
+ if (PyFunction_Check(fun)) {
tmp_fun = fun;
+ Py_INCREF(tmp_fun);
+ }
else {
di = 1;
if (PyObject_HasAttrString(fun,\"im_func\")) {
@@ -1062,6 +1064,7 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg
tmp_fun = PyObject_GetAttrString(tmp,\"im_func\");
else {
tmp_fun = fun; /* built-in function */
+ Py_INCREF(tmp_fun);
tot = maxnofargs;
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
@@ -1073,6 +1076,7 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg
if (xa != NULL)
tot += PyTuple_Size((PyObject *)xa);
tmp_fun = fun;
+ Py_INCREF(tmp_fun);
}
else if (F2PyCapsule_Check(fun)) {
tot = maxnofargs;
@@ -1083,6 +1087,7 @@ static int create_cb_arglist(PyObject* fun,PyTupleObject* xa,const int maxnofarg
goto capi_fail;
}
tmp_fun = fun;
+ Py_INCREF(tmp_fun);
}
}
if (tmp_fun==NULL) {
@@ -1091,13 +1096,19 @@ goto capi_fail;
}
#if PY_VERSION_HEX >= 0x03000000
if (PyObject_HasAttrString(tmp_fun,\"__code__\")) {
- if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"__code__\"),\"co_argcount\")) {
#else
if (PyObject_HasAttrString(tmp_fun,\"func_code\")) {
- if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\"))
+ if (PyObject_HasAttrString(tmp = PyObject_GetAttrString(tmp_fun,\"func_code\"),\"co_argcount\")) {
#endif
- tot = PyInt_AsLong(PyObject_GetAttrString(tmp,\"co_argcount\")) - di;
- Py_XDECREF(tmp);
+ PyObject *tmp_argcount = PyObject_GetAttrString(tmp,\"co_argcount\");
+ Py_DECREF(tmp);
+ if (tmp_argcount == NULL) {
+ goto capi_fail;
+ }
+ tot = PyInt_AsLong(tmp_argcount) - di;
+ Py_DECREF(tmp_argcount);
+ }
}
/* Get the number of optional arguments */
#if PY_VERSION_HEX >= 0x03000000
@@ -1136,10 +1147,12 @@ goto capi_fail;
PyTuple_SET_ITEM(*args,i,tmp);
}
CFUNCSMESS(\"create_cb_arglist-end\\n\");
+ Py_DECREF(tmp_fun);
return 1;
capi_fail:
if ((PyErr_Occurred())==NULL)
PyErr_SetString(#modulename#_error,errmess);
+ Py_XDECREF(tmp_fun);
return 0;
}
"""
diff --git a/numpy/f2py/common_rules.py b/numpy/f2py/common_rules.py
index 1940d4211..f61d8810a 100644
--- a/numpy/f2py/common_rules.py
+++ b/numpy/f2py/common_rules.py
@@ -31,11 +31,9 @@ from .crackfortran import rmbadname
def findcommonblocks(block, top=1):
ret = []
if hascommon(block):
- for n in block['common'].keys():
- vars = {}
- for v in block['common'][n]:
- vars[v] = block['vars'][v]
- ret.append((n, block['common'][n], vars))
+ for key, value in block['common'].items():
+ vars_ = {v: block['vars'][v] for v in value}
+ ret.append((key, value, vars_))
elif hasbody(block):
for b in block['body']:
ret = ret + findcommonblocks(b, 0)
@@ -126,8 +124,9 @@ def buildhooks(m):
cadd('\t%s(f2pyinit%s,F2PYINIT%s)(f2py_setup_%s);'
% (F_FUNC, lower_name, name.upper(), name))
cadd('}\n')
- iadd('\tF2PyDict_SetItemString(d, \"%s\", PyFortranObject_New(f2py_%s_def,f2py_init_%s));' % (
- name, name, name))
+ iadd('\ttmp = PyFortranObject_New(f2py_%s_def,f2py_init_%s);' % (name, name))
+ iadd('\tF2PyDict_SetItemString(d, \"%s\", tmp);' % name)
+ iadd('\tPy_DECREF(tmp);')
tname = name.replace('_', '\\_')
dadd('\\subsection{Common block \\texttt{%s}}\n' % (tname))
dadd('\\begin{description}')
diff --git a/numpy/f2py/crackfortran.py b/numpy/f2py/crackfortran.py
index 361203a57..2aaf5d7c6 100755
--- a/numpy/f2py/crackfortran.py
+++ b/numpy/f2py/crackfortran.py
@@ -1849,10 +1849,8 @@ def postcrack2(block, tab='', param_map=None):
if not f90modulevars:
return block
if isinstance(block, list):
- ret = []
- for g in block:
- g = postcrack2(g, tab=tab + '\t', param_map=param_map)
- ret.append(g)
+ ret = [postcrack2(g, tab=tab + '\t', param_map=param_map)
+ for g in block]
return ret
setmesstext(block)
outmess('%sBlock: %s\n' % (tab, block['name']), 0)
@@ -1870,10 +1868,8 @@ def postcrack2(block, tab='', param_map=None):
val = kind['kind']
if val in param_map:
kind['kind'] = param_map[val]
- new_body = []
- for b in block['body']:
- b = postcrack2(b, tab=tab + '\t', param_map=param_map)
- new_body.append(b)
+ new_body = [postcrack2(b, tab=tab + '\t', param_map=param_map)
+ for b in block['body']]
block['body'] = new_body
return block
@@ -2403,7 +2399,7 @@ def _selected_real_kind_func(p, r=0, radix=0):
if p < 16:
return 8
machine = platform.machine().lower()
- if machine.startswith(('aarch64', 'power', 'ppc64', 's390x')):
+ if machine.startswith(('aarch64', 'power', 'ppc', 'riscv', 's390x', 'sparc')):
if p <= 20:
return 16
else:
@@ -2516,7 +2512,7 @@ def _eval_scalar(value, params):
value = value.split('_')[0]
try:
value = str(eval(value, {}, params))
- except (NameError, SyntaxError):
+ except (NameError, SyntaxError, TypeError):
return value
except Exception as msg:
errmess('"%s" in evaluating %r '
@@ -3211,10 +3207,8 @@ def vars2fortran(block, vars, args, tab='', as_interface=False):
vardef = '%s(kind=%s)' % (vardef, selector['kind'])
c = ' '
if 'attrspec' in vars[a]:
- attr = []
- for l in vars[a]['attrspec']:
- if l not in ['external']:
- attr.append(l)
+ attr = [l for l in vars[a]['attrspec']
+ if l not in ['external']]
if attr:
vardef = '%s, %s' % (vardef, ','.join(attr))
c = ','
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index 8750ed0b3..110337f92 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -269,7 +269,8 @@ def scaninputline(inputline):
options["f2py_wrapper_output"] = l
elif f == 1:
try:
- open(l).close()
+ with open(l):
+ pass
files.append(l)
except IOError as detail:
errmess('IOError: %s. Skipping file "%s".\n' %
@@ -333,9 +334,8 @@ def callcrackfortran(files, options):
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
- f = open(options['signsfile'], 'w')
- f.write(pyf)
- f.close()
+ with open(options['signsfile'], 'w') as f:
+ f.write(pyf)
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
@@ -396,8 +396,25 @@ def dict_append(d_out, d_in):
def run_main(comline_list):
- """Run f2py as if string.join(comline_list,' ') is used as a command line.
- In case of using -h flag, return None.
+ """
+ Equivalent to running::
+
+ f2py <args>
+
+ where ``<args>=string.join(<list>,' ')``, but in Python. Unless
+ ``-h`` is used, this function returns a dictionary containing
+ information on generated modules and their dependencies on source
+ files. For example, the command ``f2py -m scalar scalar.f`` can be
+ executed from Python as follows
+
+ You cannot build extension modules with this function, that is,
+ using ``-c`` is not allowed. Use ``compile`` command instead
+
+ Examples
+ --------
+ .. include:: run_main_session.dat
+ :literal:
+
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
diff --git a/numpy/f2py/info.py b/numpy/f2py/info.py
deleted file mode 100644
index c895c5de2..000000000
--- a/numpy/f2py/info.py
+++ /dev/null
@@ -1,6 +0,0 @@
-"""Fortran to Python Interface Generator.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-postpone_import = True
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 23d36b2c2..f2f713bde 100644..100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -202,7 +202,7 @@ PyMODINIT_FUNC PyInit_#modulename#(void) {
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
-\tPyObject *m,*d, *s;
+\tPyObject *m,*d, *s, *tmp;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
@@ -215,6 +215,7 @@ PyMODINIT_FUNC init#modulename#(void) {
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R""" + """evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
+\tPy_DECREF(s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
@@ -222,10 +223,19 @@ PyMODINIT_FUNC init#modulename#(void) {
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
-\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
-\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
-\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
+\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
+\t/*
+\t * Store the error object inside the dict, so that it could get deallocated.
+\t * (in practice, this is a module, so it likely will not and cannot.)
+\t */
+\tPyDict_SetItemString(d, \"_#modulename#_error\", #modulename#_error);
+\tPy_DECREF(#modulename#_error);
+\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++) {
+\t\ttmp = PyFortranObject_NewAsAttr(&f2py_routine_defs[i]);
+\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name, tmp);
+\t\tPy_DECREF(tmp);
+\t}
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
@@ -235,7 +245,6 @@ PyMODINIT_FUNC init#modulename#(void) {
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
-
\treturn RETVAL;
}
#ifdef __cplusplus
@@ -436,12 +445,16 @@ rout_rules = [
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
- PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
+ tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
+ PyObject_SetAttrString(o,"_cpointer", tmp);
+ Py_DECREF(tmp);
#if PY_VERSION_HEX >= 0x03000000
- PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
+ s = PyUnicode_FromString("#name#");
#else
- PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
+ s = PyString_FromString("#name#");
#endif
+ PyObject_SetAttrString(o,"__name__", s);
+ Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
@@ -474,12 +487,16 @@ rout_rules = [
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
- PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
+ tmp = F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL);
+ PyObject_SetAttrString(o,"_cpointer", tmp);
+ Py_DECREF(tmp);
#if PY_VERSION_HEX >= 0x03000000
- PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
+ s = PyUnicode_FromString("#name#");
#else
- PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
+ s = PyString_FromString("#name#");
#endif
+ PyObject_SetAttrString(o,"__name__", s);
+ Py_DECREF(s);
}
'''},
'need': {l_not(l_or(ismoduleroutine, isdummyroutine)): ['F_WRAPPEDFUNC', 'F_FUNC']},
@@ -791,10 +808,13 @@ if (#varname#_capi==Py_None) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
- if (capi_tmp)
+ if (capi_tmp) {
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
- else
+ Py_DECREF(capi_tmp);
+ }
+ else {
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
+ }
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
@@ -1257,82 +1277,77 @@ def buildmodule(m, um):
fn = os.path.join(options['buildpath'], vrd['coutput'])
ret['csrc'] = fn
- f = open(fn, 'w')
- f.write(ar['modulebody'].replace('\t', 2 * ' '))
- f.close()
+ with open(fn, 'w') as f:
+ f.write(ar['modulebody'].replace('\t', 2 * ' '))
outmess('\tWrote C/API module "%s" to file "%s"\n' % (m['name'], fn))
if options['dorestdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.rest')
- f = open(fn, 'w')
- f.write('.. -*- rest -*-\n')
- f.write('\n'.join(ar['restdoc']))
- f.close()
+ with open(fn, 'w') as f:
+ f.write('.. -*- rest -*-\n')
+ f.write('\n'.join(ar['restdoc']))
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n' %
(options['buildpath'], vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(
options['buildpath'], vrd['modulename'] + 'module.tex')
ret['ltx'] = fn
- f = open(fn, 'w')
- f.write(
- '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
- if 'shortlatex' not in options:
+ with open(fn, 'w') as f:
f.write(
- '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
- f.write('\n'.join(ar['latexdoc']))
- if 'shortlatex' not in options:
- f.write('\\end{document}')
- f.close()
+ '%% This file is auto-generated with f2py (version:%s)\n' % (f2py_version))
+ if 'shortlatex' not in options:
+ f.write(
+ '\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
+ f.write('\n'.join(ar['latexdoc']))
+ if 'shortlatex' not in options:
+ f.write('\\end{document}')
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n' %
(options['buildpath'], vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'], vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
- f = open(wn, 'w')
- f.write('C -*- fortran -*-\n')
- f.write(
- 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
- f.write(
- 'C It contains Fortran 77 wrappers to fortran functions.\n')
- lines = []
- for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
- if l and l[0] == ' ':
- while len(l) >= 66:
- lines.append(l[:66] + '\n &')
- l = l[66:]
- lines.append(l + '\n')
- else:
- lines.append(l + '\n')
- lines = ''.join(lines).replace('\n &\n', '\n')
- f.write(lines)
- f.close()
+ with open(wn, 'w') as f:
+ f.write('C -*- fortran -*-\n')
+ f.write(
+ 'C This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
+ f.write(
+ 'C It contains Fortran 77 wrappers to fortran functions.\n')
+ lines = []
+ for l in ('\n\n'.join(funcwrappers) + '\n').split('\n'):
+ if l and l[0] == ' ':
+ while len(l) >= 66:
+ lines.append(l[:66] + '\n &')
+ l = l[66:]
+ lines.append(l + '\n')
+ else:
+ lines.append(l + '\n')
+ lines = ''.join(lines).replace('\n &\n', '\n')
+ f.write(lines)
outmess('\tFortran 77 wrappers are saved to "%s"\n' % (wn))
if funcwrappers2:
wn = os.path.join(
options['buildpath'], '%s-f2pywrappers2.f90' % (vrd['modulename']))
ret['fsrc'] = wn
- f = open(wn, 'w')
- f.write('! -*- f90 -*-\n')
- f.write(
- '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
- f.write(
- '! It contains Fortran 90 wrappers to fortran functions.\n')
- lines = []
- for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
- if len(l) > 72 and l[0] == ' ':
- lines.append(l[:72] + '&\n &')
- l = l[72:]
- while len(l) > 66:
- lines.append(l[:66] + '&\n &')
- l = l[66:]
- lines.append(l + '\n')
- else:
- lines.append(l + '\n')
- lines = ''.join(lines).replace('\n &\n', '\n')
- f.write(lines)
- f.close()
+ with open(wn, 'w') as f:
+ f.write('! -*- f90 -*-\n')
+ f.write(
+ '! This file is autogenerated with f2py (version:%s)\n' % (f2py_version))
+ f.write(
+ '! It contains Fortran 90 wrappers to fortran functions.\n')
+ lines = []
+ for l in ('\n\n'.join(funcwrappers2) + '\n').split('\n'):
+ if len(l) > 72 and l[0] == ' ':
+ lines.append(l[:72] + '&\n &')
+ l = l[72:]
+ while len(l) > 66:
+ lines.append(l[:66] + '&\n &')
+ l = l[66:]
+ lines.append(l + '\n')
+ else:
+ lines.append(l + '\n')
+ lines = ''.join(lines).replace('\n &\n', '\n')
+ f.write(lines)
outmess('\tFortran 90 wrappers are saved to "%s"\n' % (wn))
return ret
diff --git a/numpy/f2py/setup.py b/numpy/f2py/setup.py
index c0c50ce54..a8c1401aa 100644
--- a/numpy/f2py/setup.py
+++ b/numpy/f2py/setup.py
@@ -3,7 +3,7 @@
setup.py for installing F2PY
Usage:
- python setup.py install
+ pip install .
Copyright 2001-2005 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
diff --git a/numpy/f2py/src/fortranobject.c b/numpy/f2py/src/fortranobject.c
index 78b06f066..8aa55555d 100644
--- a/numpy/f2py/src/fortranobject.c
+++ b/numpy/f2py/src/fortranobject.c
@@ -39,19 +39,33 @@ PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) {
int i;
PyFortranObject *fp = NULL;
PyObject *v = NULL;
- if (init!=NULL) /* Initialize F90 module objects */
+ if (init!=NULL) { /* Initialize F90 module objects */
(*(init))();
- if ((fp = PyObject_New(PyFortranObject, &PyFortran_Type))==NULL) return NULL;
- if ((fp->dict = PyDict_New())==NULL) return NULL;
+ }
+ fp = PyObject_New(PyFortranObject, &PyFortran_Type);
+ if (fp == NULL) {
+ return NULL;
+ }
+ if ((fp->dict = PyDict_New()) == NULL) {
+ Py_DECREF(fp);
+ return NULL;
+ }
fp->len = 0;
- while (defs[fp->len].name != NULL) fp->len++;
- if (fp->len == 0) goto fail;
+ while (defs[fp->len].name != NULL) {
+ fp->len++;
+ }
+ if (fp->len == 0) {
+ goto fail;
+ }
fp->defs = defs;
- for (i=0;i<fp->len;i++)
+ for (i=0;i<fp->len;i++) {
if (fp->defs[i].rank == -1) { /* Is Fortran routine */
v = PyFortranObject_NewAsAttr(&(fp->defs[i]));
- if (v==NULL) return NULL;
+ if (v==NULL) {
+ goto fail;
+ }
PyDict_SetItemString(fp->dict,fp->defs[i].name,v);
+ Py_XDECREF(v);
} else
if ((fp->defs[i].data)!=NULL) { /* Is Fortran variable or array (not allocatable) */
if (fp->defs[i].type == NPY_STRING) {
@@ -65,13 +79,16 @@ PyFortranObject_New(FortranDataDef* defs, f2py_void_func init) {
fp->defs[i].type, NULL, fp->defs[i].data, 0, NPY_ARRAY_FARRAY,
NULL);
}
- if (v==NULL) return NULL;
+ if (v==NULL) {
+ goto fail;
+ }
PyDict_SetItemString(fp->dict,fp->defs[i].name,v);
+ Py_XDECREF(v);
}
- Py_XDECREF(v);
+ }
return (PyObject *)fp;
fail:
- Py_XDECREF(v);
+ Py_XDECREF(fp);
return NULL;
}
@@ -80,7 +97,10 @@ PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module
PyFortranObject *fp = NULL;
fp = PyObject_New(PyFortranObject, &PyFortran_Type);
if (fp == NULL) return NULL;
- if ((fp->dict = PyDict_New())==NULL) return NULL;
+ if ((fp->dict = PyDict_New())==NULL) {
+ PyObject_Del(fp);
+ return NULL;
+ }
fp->len = 1;
fp->defs = defs;
return (PyObject *)fp;
@@ -91,7 +111,7 @@ PyFortranObject_NewAsAttr(FortranDataDef* defs) { /* used for calling F90 module
static void
fortran_dealloc(PyFortranObject *fp) {
Py_XDECREF(fp->dict);
- PyMem_Del(fp);
+ PyObject_Del(fp);
}
@@ -135,7 +155,7 @@ format_def(char *buf, Py_ssize_t size, FortranDataDef def)
if (def.data == NULL) {
static const char notalloc[] = ", not allocated";
- if (size < sizeof(notalloc)) {
+ if ((size_t) size < sizeof(notalloc)) {
return -1;
}
memcpy(p, notalloc, sizeof(notalloc));
diff --git a/numpy/f2py/src/test/foomodule.c b/numpy/f2py/src/test/foomodule.c
index d7ecc2519..caf3590d4 100644
--- a/numpy/f2py/src/test/foomodule.c
+++ b/numpy/f2py/src/test/foomodule.c
@@ -115,9 +115,7 @@ static PyMethodDef foo_module_methods[] = {
void initfoo() {
int i;
- PyObject *m, *d, *s;
- PyTypeObject *t;
- PyObject *f;
+ PyObject *m, *d, *s, *tmp;
import_array();
m = Py_InitModule("foo", foo_module_methods);
@@ -127,11 +125,17 @@ void initfoo() {
PyDict_SetItemString(d, "__doc__", s);
/* Fortran objects: */
- PyDict_SetItemString(d, "mod", PyFortranObject_New(f2py_mod_def,f2py_init_mod));
- PyDict_SetItemString(d, "foodata", PyFortranObject_New(f2py_foodata_def,f2py_init_foodata));
- for(i=0;f2py_routines_def[i].name!=NULL;i++)
- PyDict_SetItemString(d, f2py_routines_def[i].name,
- PyFortranObject_NewAsAttr(&f2py_routines_def[i]));
+ tmp = PyFortranObject_New(f2py_mod_def,f2py_init_mod);
+ PyDict_SetItemString(d, "mod", tmp);
+ Py_DECREF(tmp);
+ tmp = PyFortranObject_New(f2py_foodata_def,f2py_init_foodata);
+ PyDict_SetItemString(d, "foodata", tmp);
+ Py_DECREF(tmp);
+ for(i=0;f2py_routines_def[i].name!=NULL;i++) {
+ tmp = PyFortranObject_NewAsAttr(&f2py_routines_def[i]);
+ PyDict_SetItemString(d, f2py_routines_def[i].name, tmp);
+ Py_DECREF(tmp);
+ }
Py_DECREF(s);
diff --git a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
index 7f46303b0..978db4e69 100644
--- a/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
+++ b/numpy/f2py/tests/src/array_from_pyobj/wrapmodule.c
@@ -49,9 +49,18 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self,
return NULL;
rank = PySequence_Length(dims_capi);
dims = malloc(rank*sizeof(npy_intp));
- for (i=0;i<rank;++i)
- dims[i] = (npy_intp)PyInt_AsLong(PySequence_GetItem(dims_capi,i));
-
+ for (i=0;i<rank;++i) {
+ PyObject *tmp;
+ tmp = PySequence_GetItem(dims_capi, i);
+ if (tmp == NULL) {
+ goto fail;
+ }
+ dims[i] = (npy_intp)PyInt_AsLong(tmp);
+ Py_DECREF(tmp);
+ if (dims[i] == -1 && PyErr_Occurred()) {
+ goto fail;
+ }
+ }
capi_arr_tmp = array_from_pyobj(type_num,dims,rank,intent|F2PY_INTENT_OUT,arr_capi);
if (capi_arr_tmp == NULL) {
free(dims);
@@ -60,6 +69,10 @@ static PyObject *f2py_rout_wrap_call(PyObject *capi_self,
capi_buildvalue = Py_BuildValue("N",capi_arr_tmp);
free(dims);
return capi_buildvalue;
+
+fail:
+ free(dims);
+ return NULL;
}
static char doc_f2py_rout_wrap_attrs[] = "\
@@ -97,7 +110,7 @@ static PyObject *f2py_rout_wrap_attrs(PyObject *capi_self,
PyTuple_SetItem(dimensions,i,PyInt_FromLong(PyArray_DIM(arr,i)));
PyTuple_SetItem(strides,i,PyInt_FromLong(PyArray_STRIDE(arr,i)));
}
- return Py_BuildValue("siOOO(cciii)ii",s,PyArray_NDIM(arr),
+ return Py_BuildValue("siNNO(cciii)ii",s,PyArray_NDIM(arr),
dimensions,strides,
(PyArray_BASE(arr)==NULL?Py_None:PyArray_BASE(arr)),
PyArray_DESCR(arr)->kind,
@@ -154,61 +167,69 @@ PyMODINIT_FUNC inittest_array_from_pyobj_ext(void) {
PyDict_SetItemString(d, "__doc__", s);
wrap_error = PyErr_NewException ("wrap.error", NULL, NULL);
Py_DECREF(s);
- PyDict_SetItemString(d, "F2PY_INTENT_IN", PyInt_FromLong(F2PY_INTENT_IN));
- PyDict_SetItemString(d, "F2PY_INTENT_INOUT", PyInt_FromLong(F2PY_INTENT_INOUT));
- PyDict_SetItemString(d, "F2PY_INTENT_OUT", PyInt_FromLong(F2PY_INTENT_OUT));
- PyDict_SetItemString(d, "F2PY_INTENT_HIDE", PyInt_FromLong(F2PY_INTENT_HIDE));
- PyDict_SetItemString(d, "F2PY_INTENT_CACHE", PyInt_FromLong(F2PY_INTENT_CACHE));
- PyDict_SetItemString(d, "F2PY_INTENT_COPY", PyInt_FromLong(F2PY_INTENT_COPY));
- PyDict_SetItemString(d, "F2PY_INTENT_C", PyInt_FromLong(F2PY_INTENT_C));
- PyDict_SetItemString(d, "F2PY_OPTIONAL", PyInt_FromLong(F2PY_OPTIONAL));
- PyDict_SetItemString(d, "F2PY_INTENT_INPLACE", PyInt_FromLong(F2PY_INTENT_INPLACE));
- PyDict_SetItemString(d, "NPY_BOOL", PyInt_FromLong(NPY_BOOL));
- PyDict_SetItemString(d, "NPY_BYTE", PyInt_FromLong(NPY_BYTE));
- PyDict_SetItemString(d, "NPY_UBYTE", PyInt_FromLong(NPY_UBYTE));
- PyDict_SetItemString(d, "NPY_SHORT", PyInt_FromLong(NPY_SHORT));
- PyDict_SetItemString(d, "NPY_USHORT", PyInt_FromLong(NPY_USHORT));
- PyDict_SetItemString(d, "NPY_INT", PyInt_FromLong(NPY_INT));
- PyDict_SetItemString(d, "NPY_UINT", PyInt_FromLong(NPY_UINT));
- PyDict_SetItemString(d, "NPY_INTP", PyInt_FromLong(NPY_INTP));
- PyDict_SetItemString(d, "NPY_UINTP", PyInt_FromLong(NPY_UINTP));
- PyDict_SetItemString(d, "NPY_LONG", PyInt_FromLong(NPY_LONG));
- PyDict_SetItemString(d, "NPY_ULONG", PyInt_FromLong(NPY_ULONG));
- PyDict_SetItemString(d, "NPY_LONGLONG", PyInt_FromLong(NPY_LONGLONG));
- PyDict_SetItemString(d, "NPY_ULONGLONG", PyInt_FromLong(NPY_ULONGLONG));
- PyDict_SetItemString(d, "NPY_FLOAT", PyInt_FromLong(NPY_FLOAT));
- PyDict_SetItemString(d, "NPY_DOUBLE", PyInt_FromLong(NPY_DOUBLE));
- PyDict_SetItemString(d, "NPY_LONGDOUBLE", PyInt_FromLong(NPY_LONGDOUBLE));
- PyDict_SetItemString(d, "NPY_CFLOAT", PyInt_FromLong(NPY_CFLOAT));
- PyDict_SetItemString(d, "NPY_CDOUBLE", PyInt_FromLong(NPY_CDOUBLE));
- PyDict_SetItemString(d, "NPY_CLONGDOUBLE", PyInt_FromLong(NPY_CLONGDOUBLE));
- PyDict_SetItemString(d, "NPY_OBJECT", PyInt_FromLong(NPY_OBJECT));
- PyDict_SetItemString(d, "NPY_STRING", PyInt_FromLong(NPY_STRING));
- PyDict_SetItemString(d, "NPY_UNICODE", PyInt_FromLong(NPY_UNICODE));
- PyDict_SetItemString(d, "NPY_VOID", PyInt_FromLong(NPY_VOID));
- PyDict_SetItemString(d, "NPY_NTYPES", PyInt_FromLong(NPY_NTYPES));
- PyDict_SetItemString(d, "NPY_NOTYPE", PyInt_FromLong(NPY_NOTYPE));
- PyDict_SetItemString(d, "NPY_USERDEF", PyInt_FromLong(NPY_USERDEF));
-
- PyDict_SetItemString(d, "CONTIGUOUS", PyInt_FromLong(NPY_ARRAY_C_CONTIGUOUS));
- PyDict_SetItemString(d, "FORTRAN", PyInt_FromLong(NPY_ARRAY_F_CONTIGUOUS));
- PyDict_SetItemString(d, "OWNDATA", PyInt_FromLong(NPY_ARRAY_OWNDATA));
- PyDict_SetItemString(d, "FORCECAST", PyInt_FromLong(NPY_ARRAY_FORCECAST));
- PyDict_SetItemString(d, "ENSURECOPY", PyInt_FromLong(NPY_ARRAY_ENSURECOPY));
- PyDict_SetItemString(d, "ENSUREARRAY", PyInt_FromLong(NPY_ARRAY_ENSUREARRAY));
- PyDict_SetItemString(d, "ALIGNED", PyInt_FromLong(NPY_ARRAY_ALIGNED));
- PyDict_SetItemString(d, "WRITEABLE", PyInt_FromLong(NPY_ARRAY_WRITEABLE));
- PyDict_SetItemString(d, "UPDATEIFCOPY", PyInt_FromLong(NPY_ARRAY_UPDATEIFCOPY));
- PyDict_SetItemString(d, "WRITEBACKIFCOPY", PyInt_FromLong(NPY_ARRAY_WRITEBACKIFCOPY));
-
- PyDict_SetItemString(d, "BEHAVED", PyInt_FromLong(NPY_ARRAY_BEHAVED));
- PyDict_SetItemString(d, "BEHAVED_NS", PyInt_FromLong(NPY_ARRAY_BEHAVED_NS));
- PyDict_SetItemString(d, "CARRAY", PyInt_FromLong(NPY_ARRAY_CARRAY));
- PyDict_SetItemString(d, "FARRAY", PyInt_FromLong(NPY_ARRAY_FARRAY));
- PyDict_SetItemString(d, "CARRAY_RO", PyInt_FromLong(NPY_ARRAY_CARRAY_RO));
- PyDict_SetItemString(d, "FARRAY_RO", PyInt_FromLong(NPY_ARRAY_FARRAY_RO));
- PyDict_SetItemString(d, "DEFAULT", PyInt_FromLong(NPY_ARRAY_DEFAULT));
- PyDict_SetItemString(d, "UPDATE_ALL", PyInt_FromLong(NPY_ARRAY_UPDATE_ALL));
+
+#define ADDCONST(NAME, CONST) \
+ s = PyInt_FromLong(CONST); \
+ PyDict_SetItemString(d, NAME, s); \
+ Py_DECREF(s)
+
+ ADDCONST("F2PY_INTENT_IN", F2PY_INTENT_IN);
+ ADDCONST("F2PY_INTENT_INOUT", F2PY_INTENT_INOUT);
+ ADDCONST("F2PY_INTENT_OUT", F2PY_INTENT_OUT);
+ ADDCONST("F2PY_INTENT_HIDE", F2PY_INTENT_HIDE);
+ ADDCONST("F2PY_INTENT_CACHE", F2PY_INTENT_CACHE);
+ ADDCONST("F2PY_INTENT_COPY", F2PY_INTENT_COPY);
+ ADDCONST("F2PY_INTENT_C", F2PY_INTENT_C);
+ ADDCONST("F2PY_OPTIONAL", F2PY_OPTIONAL);
+ ADDCONST("F2PY_INTENT_INPLACE", F2PY_INTENT_INPLACE);
+ ADDCONST("NPY_BOOL", NPY_BOOL);
+ ADDCONST("NPY_BYTE", NPY_BYTE);
+ ADDCONST("NPY_UBYTE", NPY_UBYTE);
+ ADDCONST("NPY_SHORT", NPY_SHORT);
+ ADDCONST("NPY_USHORT", NPY_USHORT);
+ ADDCONST("NPY_INT", NPY_INT);
+ ADDCONST("NPY_UINT", NPY_UINT);
+ ADDCONST("NPY_INTP", NPY_INTP);
+ ADDCONST("NPY_UINTP", NPY_UINTP);
+ ADDCONST("NPY_LONG", NPY_LONG);
+ ADDCONST("NPY_ULONG", NPY_ULONG);
+ ADDCONST("NPY_LONGLONG", NPY_LONGLONG);
+ ADDCONST("NPY_ULONGLONG", NPY_ULONGLONG);
+ ADDCONST("NPY_FLOAT", NPY_FLOAT);
+ ADDCONST("NPY_DOUBLE", NPY_DOUBLE);
+ ADDCONST("NPY_LONGDOUBLE", NPY_LONGDOUBLE);
+ ADDCONST("NPY_CFLOAT", NPY_CFLOAT);
+ ADDCONST("NPY_CDOUBLE", NPY_CDOUBLE);
+ ADDCONST("NPY_CLONGDOUBLE", NPY_CLONGDOUBLE);
+ ADDCONST("NPY_OBJECT", NPY_OBJECT);
+ ADDCONST("NPY_STRING", NPY_STRING);
+ ADDCONST("NPY_UNICODE", NPY_UNICODE);
+ ADDCONST("NPY_VOID", NPY_VOID);
+ ADDCONST("NPY_NTYPES", NPY_NTYPES);
+ ADDCONST("NPY_NOTYPE", NPY_NOTYPE);
+ ADDCONST("NPY_USERDEF", NPY_USERDEF);
+
+ ADDCONST("CONTIGUOUS", NPY_ARRAY_C_CONTIGUOUS);
+ ADDCONST("FORTRAN", NPY_ARRAY_F_CONTIGUOUS);
+ ADDCONST("OWNDATA", NPY_ARRAY_OWNDATA);
+ ADDCONST("FORCECAST", NPY_ARRAY_FORCECAST);
+ ADDCONST("ENSURECOPY", NPY_ARRAY_ENSURECOPY);
+ ADDCONST("ENSUREARRAY", NPY_ARRAY_ENSUREARRAY);
+ ADDCONST("ALIGNED", NPY_ARRAY_ALIGNED);
+ ADDCONST("WRITEABLE", NPY_ARRAY_WRITEABLE);
+ ADDCONST("UPDATEIFCOPY", NPY_ARRAY_UPDATEIFCOPY);
+ ADDCONST("WRITEBACKIFCOPY", NPY_ARRAY_WRITEBACKIFCOPY);
+
+ ADDCONST("BEHAVED", NPY_ARRAY_BEHAVED);
+ ADDCONST("BEHAVED_NS", NPY_ARRAY_BEHAVED_NS);
+ ADDCONST("CARRAY", NPY_ARRAY_CARRAY);
+ ADDCONST("FARRAY", NPY_ARRAY_FARRAY);
+ ADDCONST("CARRAY_RO", NPY_ARRAY_CARRAY_RO);
+ ADDCONST("FARRAY_RO", NPY_ARRAY_FARRAY_RO);
+ ADDCONST("DEFAULT", NPY_ARRAY_DEFAULT);
+ ADDCONST("UPDATE_ALL", NPY_ARRAY_UPDATE_ALL);
+
+#undef ADDCONST(
if (PyErr_Occurred())
Py_FatalError("can't initialize module wrap");
diff --git a/numpy/f2py/tests/test_block_docstring.py b/numpy/f2py/tests/test_block_docstring.py
index b2981aa82..4f1678980 100644
--- a/numpy/f2py/tests/test_block_docstring.py
+++ b/numpy/f2py/tests/test_block_docstring.py
@@ -1,11 +1,10 @@
from __future__ import division, absolute_import, print_function
-import textwrap
import sys
import pytest
from . import util
-from numpy.testing import assert_equal
+from numpy.testing import assert_equal, IS_PYPY
class TestBlockDocString(util.F2PyTest):
code = """
@@ -19,6 +18,7 @@ class TestBlockDocString(util.F2PyTest):
@pytest.mark.skipif(sys.platform=='win32',
reason='Fails with MinGW64 Gfortran (Issue #9673)')
+ @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_block_docstring(self):
expected = "'i'-array(2,3)\n"
assert_equal(self.module.block.__doc__, expected)
diff --git a/numpy/f2py/tests/test_callback.py b/numpy/f2py/tests/test_callback.py
index 824ef7b0c..21c29ba5f 100644
--- a/numpy/f2py/tests/test_callback.py
+++ b/numpy/f2py/tests/test_callback.py
@@ -68,7 +68,7 @@ cf2py intent(out) a
@pytest.mark.slow
def test_docstring(self):
- expected = """
+ expected = textwrap.dedent("""\
a = t(fun,[fun_extra_args])
Wrapper for ``t``.
@@ -93,8 +93,8 @@ cf2py intent(out) a
def fun(): return a
Return objects:
a : int
- """
- assert_equal(self.module.t.__doc__, textwrap.dedent(expected).lstrip())
+ """)
+ assert_equal(self.module.t.__doc__, expected)
def check_function(self, name):
t = getattr(self.module, name)
diff --git a/numpy/f2py/tests/test_compile_function.py b/numpy/f2py/tests/test_compile_function.py
index 74e0804e2..40ea7997f 100644
--- a/numpy/f2py/tests/test_compile_function.py
+++ b/numpy/f2py/tests/test_compile_function.py
@@ -29,6 +29,7 @@ def setup_module():
@pytest.mark.parametrize(
"extra_args", [['--noopt', '--debug'], '--noopt --debug', '']
)
+@pytest.mark.leaks_references(reason="Imported module seems never deleted.")
def test_f2py_init_compile(extra_args):
# flush through the f2py __init__ compile() function code path as a
# crude test for input handling following migration from
@@ -81,6 +82,9 @@ def test_f2py_init_compile(extra_args):
return_check = import_module(modname)
calc_result = return_check.foo()
assert_equal(calc_result, 15)
+ # Removal from sys.modules, is not as such necessary. Even with
+ # removal, the module (dict) stays alive.
+ del sys.modules[modname]
def test_f2py_init_compile_failure():
@@ -106,3 +110,20 @@ def test_f2py_init_compile_bad_cmd():
assert_equal(ret_val, 127)
finally:
sys.executable = temp
+
+
+@pytest.mark.parametrize('fsource',
+ ['program test_f2py\nend program test_f2py',
+ b'program test_f2py\nend program test_f2py',])
+def test_compile_from_strings(tmpdir, fsource):
+ # Make sure we can compile str and bytes gh-12796
+ cwd = os.getcwd()
+ try:
+ os.chdir(str(tmpdir))
+ ret_val = numpy.f2py.compile(
+ fsource,
+ modulename='test_compile_from_strings',
+ extension='.f90')
+ assert_equal(ret_val, 0)
+ finally:
+ os.chdir(cwd)
diff --git a/numpy/f2py/tests/test_mixed.py b/numpy/f2py/tests/test_mixed.py
index 28268ecc0..0337538ff 100644
--- a/numpy/f2py/tests/test_mixed.py
+++ b/numpy/f2py/tests/test_mixed.py
@@ -25,7 +25,7 @@ class TestMixed(util.F2PyTest):
@pytest.mark.slow
def test_docstring(self):
- expected = """
+ expected = textwrap.dedent("""\
a = bar11()
Wrapper for ``bar11``.
@@ -33,6 +33,5 @@ class TestMixed(util.F2PyTest):
Returns
-------
a : int
- """
- assert_equal(self.module.bar11.__doc__,
- textwrap.dedent(expected).lstrip())
+ """)
+ assert_equal(self.module.bar11.__doc__, expected)
diff --git a/numpy/f2py/tests/test_parameter.py b/numpy/f2py/tests/test_parameter.py
index a0bbd9460..6a378687a 100644
--- a/numpy/f2py/tests/test_parameter.py
+++ b/numpy/f2py/tests/test_parameter.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import os
-import math
import pytest
import numpy as np
diff --git a/numpy/f2py/tests/test_quoted_character.py b/numpy/f2py/tests/test_quoted_character.py
index 38e380802..c9a1c36f5 100644
--- a/numpy/f2py/tests/test_quoted_character.py
+++ b/numpy/f2py/tests/test_quoted_character.py
@@ -4,13 +4,9 @@
from __future__ import division, absolute_import, print_function
import sys
-import os
-import uuid
from importlib import import_module
import pytest
-import numpy.f2py
-
from numpy.testing import assert_equal
from . import util
diff --git a/numpy/f2py/tests/test_regression.py b/numpy/f2py/tests/test_regression.py
index d695de61b..3adae635d 100644
--- a/numpy/f2py/tests/test_regression.py
+++ b/numpy/f2py/tests/test_regression.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import os
-import math
import pytest
import numpy as np
diff --git a/numpy/f2py/tests/util.py b/numpy/f2py/tests/util.py
index 73fc27b96..77cb612d0 100644
--- a/numpy/f2py/tests/util.py
+++ b/numpy/f2py/tests/util.py
@@ -15,9 +15,7 @@ import shutil
import atexit
import textwrap
import re
-import random
import pytest
-import numpy.f2py
from numpy.compat import asbytes, asstr
from numpy.testing import temppath
@@ -26,13 +24,14 @@ from importlib import import_module
try:
from hashlib import md5
except ImportError:
- from md5 import new as md5
+ from md5 import new as md5 # noqa: F401
#
# Maintaining a temporary module directory
#
_module_dir = None
+_module_num = 5403
def _cleanup():
@@ -61,13 +60,14 @@ def get_module_dir():
def get_temp_module_name():
# Assume single-threaded, and the module dir usable only by this thread
+ global _module_num
d = get_module_dir()
- for j in range(5403, 9999999):
- name = "_test_ext_module_%d" % j
- fn = os.path.join(d, name)
- if name not in sys.modules and not os.path.isfile(fn + '.py'):
- return name
- raise RuntimeError("Failed to create a temporary module name")
+ name = "_test_ext_module_%d" % _module_num
+ _module_num += 1
+ if name in sys.modules:
+ # this should not be possible, but check anyway
+ raise RuntimeError("Temporary module name already in use.")
+ return name
def _memoize(func):
@@ -182,27 +182,27 @@ def _get_compiler_status():
# XXX: this is really ugly. But I don't know how to invoke Distutils
# in a safer way...
- code = """
-import os
-import sys
-sys.path = %(syspath)s
-
-def configuration(parent_name='',top_path=None):
- global config
- from numpy.distutils.misc_util import Configuration
- config = Configuration('', parent_name, top_path)
- return config
-
-from numpy.distutils.core import setup
-setup(configuration=configuration)
-
-config_cmd = config.get_config_cmd()
-have_c = config_cmd.try_compile('void foo() {}')
-print('COMPILERS:%%d,%%d,%%d' %% (have_c,
- config.have_f77c(),
- config.have_f90c()))
-sys.exit(99)
-"""
+ code = textwrap.dedent("""\
+ import os
+ import sys
+ sys.path = %(syspath)s
+
+ def configuration(parent_name='',top_path=None):
+ global config
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('', parent_name, top_path)
+ return config
+
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
+
+ config_cmd = config.get_config_cmd()
+ have_c = config_cmd.try_compile('void foo() {}')
+ print('COMPILERS:%%d,%%d,%%d' %% (have_c,
+ config.have_f77c(),
+ config.have_f90c()))
+ sys.exit(99)
+ """)
code = code % dict(syspath=repr(sys.path))
with temppath(suffix='.py') as script:
@@ -261,21 +261,21 @@ def build_module_distutils(source_files, config_code, module_name, **kw):
# Build script
config_code = textwrap.dedent(config_code).replace("\n", "\n ")
- code = """\
-import os
-import sys
-sys.path = %(syspath)s
-
-def configuration(parent_name='',top_path=None):
- from numpy.distutils.misc_util import Configuration
- config = Configuration('', parent_name, top_path)
- %(config_code)s
- return config
-
-if __name__ == "__main__":
- from numpy.distutils.core import setup
- setup(configuration=configuration)
-""" % dict(config_code=config_code, syspath=repr(sys.path))
+ code = textwrap.dedent("""\
+ import os
+ import sys
+ sys.path = %(syspath)s
+
+ def configuration(parent_name='',top_path=None):
+ from numpy.distutils.misc_util import Configuration
+ config = Configuration('', parent_name, top_path)
+ %(config_code)s
+ return config
+
+ if __name__ == "__main__":
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
+ """) % dict(config_code=config_code, syspath=repr(sys.path))
script = os.path.join(d, get_temp_module_name() + '.py')
dst_sources.append(script)
diff --git a/numpy/fft/README.md b/numpy/fft/README.md
new file mode 100644
index 000000000..f79188139
--- /dev/null
+++ b/numpy/fft/README.md
@@ -0,0 +1,48 @@
+PocketFFT
+---------
+
+This is a heavily modified implementation of FFTPack [1,2], with the following
+advantages:
+
+- strictly C99 compliant
+- more accurate twiddle factor computation
+- very fast plan generation
+- worst case complexity for transform sizes with large prime factors is
+ `N*log(N)`, because Bluestein's algorithm [3] is used for these cases.
+
+
+Some code details
+-----------------
+
+Twiddle factor computation:
+
+- making use of symmetries to reduce number of sin/cos evaluations
+- all angles are reduced to the range `[0; pi/4]` for higher accuracy
+- an adapted implementation of `sincospi()` is used, which actually computes
+ `sin(x)` and `(cos(x)-1)`.
+- if `n` sin/cos pairs are required, the adjusted `sincospi()` is only called
+ `2*sqrt(n)` times; the remaining values are obtained by evaluating the
+ angle addition theorems in a numerically accurate way.
+
+Parallel invocation:
+
+- Plans only contain read-only data; all temporary arrays are allocated and
+ deallocated during an individual FFT execution. This means that a single plan
+ can be used in several threads at the same time.
+
+Efficient codelets are available for the factors:
+
+- 2, 3, 4, 5, 7, 11 for complex-valued FFTs
+- 2, 3, 4, 5 for real-valued FFTs
+
+Larger prime factors are handled by somewhat less efficient, generic routines.
+
+For lengths with very large prime factors, Bluestein's algorithm is used, and
+instead of an FFT of length `n`, a convolution of length `n2 >= 2*n-1`
+is performed, where `n2` is chosen to be highly composite.
+
+
+[1] Swarztrauber, P. 1982, Vectorizing the Fast Fourier Transforms
+ (New York: Academic Press), 51
+[2] https://www.netlib.org/fftpack/
+[3] https://en.wikipedia.org/wiki/Chirp_Z-transform
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index 44243b483..fe95d8b17 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -1,9 +1,191 @@
-from __future__ import division, absolute_import, print_function
+"""
+Discrete Fourier Transform (:mod:`numpy.fft`)
+=============================================
+
+.. currentmodule:: numpy.fft
+
+Standard FFTs
+-------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fft Discrete Fourier transform.
+ ifft Inverse discrete Fourier transform.
+ fft2 Discrete Fourier transform in two dimensions.
+ ifft2 Inverse discrete Fourier transform in two dimensions.
+ fftn Discrete Fourier transform in N-dimensions.
+ ifftn Inverse discrete Fourier transform in N dimensions.
+
+Real FFTs
+---------
+
+.. autosummary::
+ :toctree: generated/
+
+ rfft Real discrete Fourier transform.
+ irfft Inverse real discrete Fourier transform.
+ rfft2 Real discrete Fourier transform in two dimensions.
+ irfft2 Inverse real discrete Fourier transform in two dimensions.
+ rfftn Real discrete Fourier transform in N dimensions.
+ irfftn Inverse real discrete Fourier transform in N dimensions.
+
+Hermitian FFTs
+--------------
+
+.. autosummary::
+ :toctree: generated/
+
+ hfft Hermitian discrete Fourier transform.
+ ihfft Inverse Hermitian discrete Fourier transform.
+
+Helper routines
+---------------
+
+.. autosummary::
+ :toctree: generated/
+
+ fftfreq Discrete Fourier Transform sample frequencies.
+ rfftfreq DFT sample frequencies (for usage with rfft, irfft).
+ fftshift Shift zero-frequency component to center of spectrum.
+ ifftshift Inverse of fftshift.
+
+
+Background information
+----------------------
+
+Fourier analysis is fundamentally a method for expressing a function as a
+sum of periodic components, and for recovering the function from those
+components. When both the function and its Fourier transform are
+replaced with discretized counterparts, it is called the discrete Fourier
+transform (DFT). The DFT has become a mainstay of numerical computing in
+part because of a very fast algorithm for computing it, called the Fast
+Fourier Transform (FFT), which was known to Gauss (1805) and was brought
+to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
+provide an accessible introduction to Fourier analysis and its
+applications.
+
+Because the discrete Fourier transform separates its input into
+components that contribute at discrete frequencies, it has a great number
+of applications in digital signal processing, e.g., for filtering, and in
+this context the discretized input to the transform is customarily
+referred to as a *signal*, which exists in the *time domain*. The output
+is called a *spectrum* or *transform* and exists in the *frequency
+domain*.
+
+Implementation details
+----------------------
+
+There are many ways to define the DFT, varying in the sign of the
+exponent, normalization, etc. In this implementation, the DFT is defined
+as
+
+.. math::
+ A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
+ \\qquad k = 0,\\ldots,n-1.
+
+The DFT is in general defined for complex inputs and outputs, and a
+single-frequency component at linear frequency :math:`f` is
+represented by a complex exponential
+:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
+is the sampling interval.
-# To get sub-modules
-from .info import __doc__
+The values in the result follow so-called "standard" order: If ``A =
+fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
+the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
+contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
+negative-frequency terms, in order of decreasingly negative frequency.
+For an even number of input points, ``A[n/2]`` represents both positive and
+negative Nyquist frequency, and is also purely real for real input. For
+an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
+frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
+The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
+of corresponding elements in the output. The routine
+``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
+zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
+that shift.
+
+When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
+is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
+The phase spectrum is obtained by ``np.angle(A)``.
+
+The inverse DFT is defined as
+
+.. math::
+ a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
+ \\qquad m = 0,\\ldots,n-1.
+
+It differs from the forward transform by the sign of the exponential
+argument and the default normalization by :math:`1/n`.
+
+Normalization
+-------------
+The default normalization has the direct transforms unscaled and the inverse
+transforms are scaled by :math:`1/n`. It is possible to obtain unitary
+transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
+`None`) so that both direct and inverse transforms will be scaled by
+:math:`1/\\sqrt{n}`.
+
+Real and Hermitian transforms
+-----------------------------
+
+When the input is purely real, its transform is Hermitian, i.e., the
+component at frequency :math:`f_k` is the complex conjugate of the
+component at frequency :math:`-f_k`, which means that for real
+inputs there is no information in the negative frequency components that
+is not already available from the positive frequency components.
+The family of `rfft` functions is
+designed to operate on real inputs, and exploits this symmetry by
+computing only the positive frequency components, up to and including the
+Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
+output points. The inverses of this family assumes the same symmetry of
+its input, and for an output of ``n`` points uses ``n/2+1`` input points.
+
+Correspondingly, when the spectrum is purely real, the signal is
+Hermitian. The `hfft` family of functions exploits this symmetry by
+using ``n/2+1`` complex points in the input (time) domain for ``n`` real
+points in the frequency domain.
+
+In higher dimensions, FFTs are used, e.g., for image analysis and
+filtering. The computational efficiency of the FFT means that it can
+also be a faster way to compute large convolutions, using the property
+that a convolution in the time domain is equivalent to a point-by-point
+multiplication in the frequency domain.
+
+Higher dimensions
+-----------------
+
+In two dimensions, the DFT is defined as
+
+.. math::
+ A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
+ a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
+ \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
+
+which extends in the obvious way to higher dimensions, and the inverses
+in higher dimensions also extend in the same way.
+
+References
+----------
+
+.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
+ machine calculation of complex Fourier series," *Math. Comput.*
+ 19: 297-301.
+
+.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
+ 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
+ 12-13. Cambridge Univ. Press, Cambridge, UK.
+
+Examples
+--------
+
+For examples, see the various functions.
+
+"""
+
+from __future__ import division, absolute_import, print_function
-from .fftpack import *
+from ._pocketfft import *
from .helper import *
from numpy._pytesttester import PytestTester
diff --git a/numpy/fft/_pocketfft.c b/numpy/fft/_pocketfft.c
new file mode 100644
index 000000000..d75b9983c
--- /dev/null
+++ b/numpy/fft/_pocketfft.c
@@ -0,0 +1,2406 @@
+/*
+ * This file is part of pocketfft.
+ * Licensed under a 3-clause BSD style license - see LICENSE.md
+ */
+
+/*
+ * Main implementation file.
+ *
+ * Copyright (C) 2004-2018 Max-Planck-Society
+ * \author Martin Reinecke
+ */
+
+#include <math.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include "npy_config.h"
+#define restrict NPY_RESTRICT
+
+#define RALLOC(type,num) \
+ ((type *)malloc((num)*sizeof(type)))
+#define DEALLOC(ptr) \
+ do { free(ptr); (ptr)=NULL; } while(0)
+
+#define SWAP(a,b,type) \
+ do { type tmp_=(a); (a)=(b); (b)=tmp_; } while(0)
+
+#ifdef __GNUC__
+#define NOINLINE __attribute__((noinline))
+#define WARN_UNUSED_RESULT __attribute__ ((warn_unused_result))
+#else
+#define NOINLINE
+#define WARN_UNUSED_RESULT
+#endif
+
+struct cfft_plan_i;
+typedef struct cfft_plan_i * cfft_plan;
+struct rfft_plan_i;
+typedef struct rfft_plan_i * rfft_plan;
+
+// adapted from https://stackoverflow.com/questions/42792939/
+// CAUTION: this function only works for arguments in the range [-0.25; 0.25]!
+static void my_sincosm1pi (double a, double *restrict res)
+ {
+ double s = a * a;
+ /* Approximate cos(pi*x)-1 for x in [-0.25,0.25] */
+ double r = -1.0369917389758117e-4;
+ r = fma (r, s, 1.9294935641298806e-3);
+ r = fma (r, s, -2.5806887942825395e-2);
+ r = fma (r, s, 2.3533063028328211e-1);
+ r = fma (r, s, -1.3352627688538006e+0);
+ r = fma (r, s, 4.0587121264167623e+0);
+ r = fma (r, s, -4.9348022005446790e+0);
+ double c = r*s;
+ /* Approximate sin(pi*x) for x in [-0.25,0.25] */
+ r = 4.6151442520157035e-4;
+ r = fma (r, s, -7.3700183130883555e-3);
+ r = fma (r, s, 8.2145868949323936e-2);
+ r = fma (r, s, -5.9926452893214921e-1);
+ r = fma (r, s, 2.5501640398732688e+0);
+ r = fma (r, s, -5.1677127800499516e+0);
+ s = s * a;
+ r = r * s;
+ s = fma (a, 3.1415926535897931e+0, r);
+ res[0] = c;
+ res[1] = s;
+ }
+
+NOINLINE static void calc_first_octant(size_t den, double * restrict res)
+ {
+ size_t n = (den+4)>>3;
+ if (n==0) return;
+ res[0]=1.; res[1]=0.;
+ if (n==1) return;
+ size_t l1=(size_t)sqrt(n);
+ for (size_t i=1; i<l1; ++i)
+ my_sincosm1pi((2.*i)/den,&res[2*i]);
+ size_t start=l1;
+ while(start<n)
+ {
+ double cs[2];
+ my_sincosm1pi((2.*start)/den,cs);
+ res[2*start] = cs[0]+1.;
+ res[2*start+1] = cs[1];
+ size_t end = l1;
+ if (start+end>n) end = n-start;
+ for (size_t i=1; i<end; ++i)
+ {
+ double csx[2]={res[2*i], res[2*i+1]};
+ res[2*(start+i)] = ((cs[0]*csx[0] - cs[1]*csx[1] + cs[0]) + csx[0]) + 1.;
+ res[2*(start+i)+1] = (cs[0]*csx[1] + cs[1]*csx[0]) + cs[1] + csx[1];
+ }
+ start += l1;
+ }
+ for (size_t i=1; i<l1; ++i)
+ res[2*i] += 1.;
+ }
+
+NOINLINE static void calc_first_quadrant(size_t n, double * restrict res)
+ {
+ double * restrict p = res+n;
+ calc_first_octant(n<<1, p);
+ size_t ndone=(n+2)>>2;
+ size_t i=0, idx1=0, idx2=2*ndone-2;
+ for (; i+1<ndone; i+=2, idx1+=2, idx2-=2)
+ {
+ res[idx1] = p[2*i];
+ res[idx1+1] = p[2*i+1];
+ res[idx2] = p[2*i+3];
+ res[idx2+1] = p[2*i+2];
+ }
+ if (i!=ndone)
+ {
+ res[idx1 ] = p[2*i];
+ res[idx1+1] = p[2*i+1];
+ }
+ }
+
+NOINLINE static void calc_first_half(size_t n, double * restrict res)
+ {
+ int ndone=(n+1)>>1;
+ double * p = res+n-1;
+ calc_first_octant(n<<2, p);
+ int i4=0, in=n, i=0;
+ for (; i4<=in-i4; ++i, i4+=4) // octant 0
+ {
+ res[2*i] = p[2*i4]; res[2*i+1] = p[2*i4+1];
+ }
+ for (; i4-in <= 0; ++i, i4+=4) // octant 1
+ {
+ int xm = in-i4;
+ res[2*i] = p[2*xm+1]; res[2*i+1] = p[2*xm];
+ }
+ for (; i4<=3*in-i4; ++i, i4+=4) // octant 2
+ {
+ int xm = i4-in;
+ res[2*i] = -p[2*xm+1]; res[2*i+1] = p[2*xm];
+ }
+ for (; i<ndone; ++i, i4+=4) // octant 3
+ {
+ int xm = 2*in-i4;
+ res[2*i] = -p[2*xm]; res[2*i+1] = p[2*xm+1];
+ }
+ }
+
+NOINLINE static void fill_first_quadrant(size_t n, double * restrict res)
+ {
+ const double hsqt2 = 0.707106781186547524400844362104849;
+ size_t quart = n>>2;
+ if ((n&7)==0)
+ res[quart] = res[quart+1] = hsqt2;
+ for (size_t i=2, j=2*quart-2; i<quart; i+=2, j-=2)
+ {
+ res[j ] = res[i+1];
+ res[j+1] = res[i ];
+ }
+ }
+
+NOINLINE static void fill_first_half(size_t n, double * restrict res)
+ {
+ size_t half = n>>1;
+ if ((n&3)==0)
+ for (size_t i=0; i<half; i+=2)
+ {
+ res[i+half] = -res[i+1];
+ res[i+half+1] = res[i ];
+ }
+ else
+ for (size_t i=2, j=2*half-2; i<half; i+=2, j-=2)
+ {
+ res[j ] = -res[i ];
+ res[j+1] = res[i+1];
+ }
+ }
+
+NOINLINE static void fill_second_half(size_t n, double * restrict res)
+ {
+ if ((n&1)==0)
+ for (size_t i=0; i<n; ++i)
+ res[i+n] = -res[i];
+ else
+ for (size_t i=2, j=2*n-2; i<n; i+=2, j-=2)
+ {
+ res[j ] = res[i ];
+ res[j+1] = -res[i+1];
+ }
+ }
+
+NOINLINE static void sincos_2pibyn_half(size_t n, double * restrict res)
+ {
+ if ((n&3)==0)
+ {
+ calc_first_octant(n, res);
+ fill_first_quadrant(n, res);
+ fill_first_half(n, res);
+ }
+ else if ((n&1)==0)
+ {
+ calc_first_quadrant(n, res);
+ fill_first_half(n, res);
+ }
+ else
+ calc_first_half(n, res);
+ }
+
+NOINLINE static void sincos_2pibyn(size_t n, double * restrict res)
+ {
+ sincos_2pibyn_half(n, res);
+ fill_second_half(n, res);
+ }
+
+NOINLINE static size_t largest_prime_factor (size_t n)
+ {
+ size_t res=1;
+ size_t tmp;
+ while (((tmp=(n>>1))<<1)==n)
+ { res=2; n=tmp; }
+
+ size_t limit=(size_t)sqrt(n+0.01);
+ for (size_t x=3; x<=limit; x+=2)
+ while (((tmp=(n/x))*x)==n)
+ {
+ res=x;
+ n=tmp;
+ limit=(size_t)sqrt(n+0.01);
+ }
+ if (n>1) res=n;
+
+ return res;
+ }
+
+NOINLINE static double cost_guess (size_t n)
+ {
+ const double lfp=1.1; // penalty for non-hardcoded larger factors
+ size_t ni=n;
+ double result=0.;
+ size_t tmp;
+ while (((tmp=(n>>1))<<1)==n)
+ { result+=2; n=tmp; }
+
+ size_t limit=(size_t)sqrt(n+0.01);
+ for (size_t x=3; x<=limit; x+=2)
+ while ((tmp=(n/x))*x==n)
+ {
+ result+= (x<=5) ? x : lfp*x; // penalize larger prime factors
+ n=tmp;
+ limit=(size_t)sqrt(n+0.01);
+ }
+ if (n>1) result+=(n<=5) ? n : lfp*n;
+
+ return result*ni;
+ }
+
+/* returns the smallest composite of 2, 3, 5, 7 and 11 which is >= n */
+NOINLINE static size_t good_size(size_t n)
+ {
+ if (n<=6) return n;
+
+ size_t bestfac=2*n;
+ for (size_t f2=1; f2<bestfac; f2*=2)
+ for (size_t f23=f2; f23<bestfac; f23*=3)
+ for (size_t f235=f23; f235<bestfac; f235*=5)
+ for (size_t f2357=f235; f2357<bestfac; f2357*=7)
+ for (size_t f235711=f2357; f235711<bestfac; f235711*=11)
+ if (f235711>=n) bestfac=f235711;
+ return bestfac;
+ }
+
+typedef struct cmplx {
+ double r,i;
+} cmplx;
+
+#define NFCT 25
+typedef struct cfftp_fctdata
+ {
+ size_t fct;
+ cmplx *tw, *tws;
+ } cfftp_fctdata;
+
+typedef struct cfftp_plan_i
+ {
+ size_t length, nfct;
+ cmplx *mem;
+ cfftp_fctdata fct[NFCT];
+ } cfftp_plan_i;
+typedef struct cfftp_plan_i * cfftp_plan;
+
+#define PMC(a,b,c,d) { a.r=c.r+d.r; a.i=c.i+d.i; b.r=c.r-d.r; b.i=c.i-d.i; }
+#define ADDC(a,b,c) { a.r=b.r+c.r; a.i=b.i+c.i; }
+#define SCALEC(a,b) { a.r*=b; a.i*=b; }
+#define ROT90(a) { double tmp_=a.r; a.r=-a.i; a.i=tmp_; }
+#define ROTM90(a) { double tmp_=-a.r; a.r=a.i; a.i=tmp_; }
+#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))]
+#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))]
+#define WA(x,i) wa[(i)-1+(x)*(ido-1)]
+/* a = b*c */
+#define A_EQ_B_MUL_C(a,b,c) { a.r=b.r*c.r-b.i*c.i; a.i=b.r*c.i+b.i*c.r; }
+/* a = conj(b)*c*/
+#define A_EQ_CB_MUL_C(a,b,c) { a.r=b.r*c.r+b.i*c.i; a.i=b.r*c.i-b.i*c.r; }
+
+#define PMSIGNC(a,b,c,d) { a.r=c.r+sign*d.r; a.i=c.i+sign*d.i; b.r=c.r-sign*d.r; b.i=c.i-sign*d.i; }
+/* a = b*c */
+#define MULPMSIGNC(a,b,c) { a.r=b.r*c.r-sign*b.i*c.i; a.i=b.r*c.i+sign*b.i*c.r; }
+/* a *= b */
+#define MULPMSIGNCEQ(a,b) { double xtmp=a.r; a.r=b.r*a.r-sign*b.i*a.i; a.i=b.r*a.i+sign*b.i*xtmp; }
+
+NOINLINE static void pass2b (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=2;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
+ for (size_t i=1; i<ido; ++i)
+ {
+ cmplx t;
+ PMC (CH(i,k,0),t,CC(i,0,k),CC(i,1,k))
+ A_EQ_B_MUL_C (CH(i,k,1),WA(0,i),t)
+ }
+ }
+ }
+
+NOINLINE static void pass2f (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=2;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ PMC (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(0,1,k))
+ for (size_t i=1; i<ido; ++i)
+ {
+ cmplx t;
+ PMC (CH(i,k,0),t,CC(i,0,k),CC(i,1,k))
+ A_EQ_CB_MUL_C (CH(i,k,1),WA(0,i),t)
+ }
+ }
+ }
+
+#define PREP3(idx) \
+ cmplx t0 = CC(idx,0,k), t1, t2; \
+ PMC (t1,t2,CC(idx,1,k),CC(idx,2,k)) \
+ CH(idx,k,0).r=t0.r+t1.r; \
+ CH(idx,k,0).i=t0.i+t1.i;
+#define PARTSTEP3a(u1,u2,twr,twi) \
+ { \
+ cmplx ca,cb; \
+ ca.r=t0.r+twr*t1.r; \
+ ca.i=t0.i+twr*t1.i; \
+ cb.i=twi*t2.r; \
+ cb.r=-(twi*t2.i); \
+ PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) \
+ }
+
+#define PARTSTEP3b(u1,u2,twr,twi) \
+ { \
+ cmplx ca,cb,da,db; \
+ ca.r=t0.r+twr*t1.r; \
+ ca.i=t0.i+twr*t1.i; \
+ cb.i=twi*t2.r; \
+ cb.r=-(twi*t2.i); \
+ PMC(da,db,ca,cb) \
+ A_EQ_B_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
+ A_EQ_B_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
+ }
+NOINLINE static void pass3b (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=3;
+ const double tw1r=-0.5, tw1i= 0.86602540378443864676;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ PREP3(0)
+ PARTSTEP3a(1,2,tw1r,tw1i)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ PREP3(0)
+ PARTSTEP3a(1,2,tw1r,tw1i)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ PREP3(i)
+ PARTSTEP3b(1,2,tw1r,tw1i)
+ }
+ }
+ }
+#define PARTSTEP3f(u1,u2,twr,twi) \
+ { \
+ cmplx ca,cb,da,db; \
+ ca.r=t0.r+twr*t1.r; \
+ ca.i=t0.i+twr*t1.i; \
+ cb.i=twi*t2.r; \
+ cb.r=-(twi*t2.i); \
+ PMC(da,db,ca,cb) \
+ A_EQ_CB_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
+ A_EQ_CB_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
+ }
+NOINLINE static void pass3f (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=3;
+ const double tw1r=-0.5, tw1i= -0.86602540378443864676;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ PREP3(0)
+ PARTSTEP3a(1,2,tw1r,tw1i)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ PREP3(0)
+ PARTSTEP3a(1,2,tw1r,tw1i)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ PREP3(i)
+ PARTSTEP3f(1,2,tw1r,tw1i)
+ }
+ }
+ }
+
+NOINLINE static void pass4b (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=4;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ cmplx t1, t2, t3, t4;
+ PMC(t2,t1,CC(0,0,k),CC(0,2,k))
+ PMC(t3,t4,CC(0,1,k),CC(0,3,k))
+ ROT90(t4)
+ PMC(CH(0,k,0),CH(0,k,2),t2,t3)
+ PMC(CH(0,k,1),CH(0,k,3),t1,t4)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ cmplx t1, t2, t3, t4;
+ PMC(t2,t1,CC(0,0,k),CC(0,2,k))
+ PMC(t3,t4,CC(0,1,k),CC(0,3,k))
+ ROT90(t4)
+ PMC(CH(0,k,0),CH(0,k,2),t2,t3)
+ PMC(CH(0,k,1),CH(0,k,3),t1,t4)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ cmplx c2, c3, c4, t1, t2, t3, t4;
+ cmplx cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k);
+ PMC(t2,t1,cc0,cc2)
+ PMC(t3,t4,cc1,cc3)
+ ROT90(t4)
+ cmplx wa0=WA(0,i), wa1=WA(1,i),wa2=WA(2,i);
+ PMC(CH(i,k,0),c3,t2,t3)
+ PMC(c2,c4,t1,t4)
+ A_EQ_B_MUL_C (CH(i,k,1),wa0,c2)
+ A_EQ_B_MUL_C (CH(i,k,2),wa1,c3)
+ A_EQ_B_MUL_C (CH(i,k,3),wa2,c4)
+ }
+ }
+ }
+NOINLINE static void pass4f (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=4;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ cmplx t1, t2, t3, t4;
+ PMC(t2,t1,CC(0,0,k),CC(0,2,k))
+ PMC(t3,t4,CC(0,1,k),CC(0,3,k))
+ ROTM90(t4)
+ PMC(CH(0,k,0),CH(0,k,2),t2,t3)
+ PMC(CH(0,k,1),CH(0,k,3),t1,t4)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ cmplx t1, t2, t3, t4;
+ PMC(t2,t1,CC(0,0,k),CC(0,2,k))
+ PMC(t3,t4,CC(0,1,k),CC(0,3,k))
+ ROTM90(t4)
+ PMC(CH(0,k,0),CH(0,k,2),t2,t3)
+ PMC (CH(0,k,1),CH(0,k,3),t1,t4)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ cmplx c2, c3, c4, t1, t2, t3, t4;
+ cmplx cc0=CC(i,0,k), cc1=CC(i,1,k),cc2=CC(i,2,k),cc3=CC(i,3,k);
+ PMC(t2,t1,cc0,cc2)
+ PMC(t3,t4,cc1,cc3)
+ ROTM90(t4)
+ cmplx wa0=WA(0,i), wa1=WA(1,i),wa2=WA(2,i);
+ PMC(CH(i,k,0),c3,t2,t3)
+ PMC(c2,c4,t1,t4)
+ A_EQ_CB_MUL_C (CH(i,k,1),wa0,c2)
+ A_EQ_CB_MUL_C (CH(i,k,2),wa1,c3)
+ A_EQ_CB_MUL_C (CH(i,k,3),wa2,c4)
+ }
+ }
+ }
+
+#define PREP5(idx) \
+ cmplx t0 = CC(idx,0,k), t1, t2, t3, t4; \
+ PMC (t1,t4,CC(idx,1,k),CC(idx,4,k)) \
+ PMC (t2,t3,CC(idx,2,k),CC(idx,3,k)) \
+ CH(idx,k,0).r=t0.r+t1.r+t2.r; \
+ CH(idx,k,0).i=t0.i+t1.i+t2.i;
+
+#define PARTSTEP5a(u1,u2,twar,twbr,twai,twbi) \
+ { \
+ cmplx ca,cb; \
+ ca.r=t0.r+twar*t1.r+twbr*t2.r; \
+ ca.i=t0.i+twar*t1.i+twbr*t2.i; \
+ cb.i=twai*t4.r twbi*t3.r; \
+ cb.r=-(twai*t4.i twbi*t3.i); \
+ PMC(CH(0,k,u1),CH(0,k,u2),ca,cb) \
+ }
+
+#define PARTSTEP5b(u1,u2,twar,twbr,twai,twbi) \
+ { \
+ cmplx ca,cb,da,db; \
+ ca.r=t0.r+twar*t1.r+twbr*t2.r; \
+ ca.i=t0.i+twar*t1.i+twbr*t2.i; \
+ cb.i=twai*t4.r twbi*t3.r; \
+ cb.r=-(twai*t4.i twbi*t3.i); \
+ PMC(da,db,ca,cb) \
+ A_EQ_B_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
+ A_EQ_B_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
+ }
+NOINLINE static void pass5b (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=5;
+ const double tw1r= 0.3090169943749474241,
+ tw1i= 0.95105651629515357212,
+ tw2r= -0.8090169943749474241,
+ tw2i= 0.58778525229247312917;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ PREP5(0)
+ PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
+ PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ PREP5(0)
+ PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
+ PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ PREP5(i)
+ PARTSTEP5b(1,4,tw1r,tw2r,+tw1i,+tw2i)
+ PARTSTEP5b(2,3,tw2r,tw1r,+tw2i,-tw1i)
+ }
+ }
+ }
+#define PARTSTEP5f(u1,u2,twar,twbr,twai,twbi) \
+ { \
+ cmplx ca,cb,da,db; \
+ ca.r=t0.r+twar*t1.r+twbr*t2.r; \
+ ca.i=t0.i+twar*t1.i+twbr*t2.i; \
+ cb.i=twai*t4.r twbi*t3.r; \
+ cb.r=-(twai*t4.i twbi*t3.i); \
+ PMC(da,db,ca,cb) \
+ A_EQ_CB_MUL_C (CH(i,k,u1),WA(u1-1,i),da) \
+ A_EQ_CB_MUL_C (CH(i,k,u2),WA(u2-1,i),db) \
+ }
+NOINLINE static void pass5f (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa)
+ {
+ const size_t cdim=5;
+ const double tw1r= 0.3090169943749474241,
+ tw1i= -0.95105651629515357212,
+ tw2r= -0.8090169943749474241,
+ tw2i= -0.58778525229247312917;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ PREP5(0)
+ PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
+ PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ PREP5(0)
+ PARTSTEP5a(1,4,tw1r,tw2r,+tw1i,+tw2i)
+ PARTSTEP5a(2,3,tw2r,tw1r,+tw2i,-tw1i)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ PREP5(i)
+ PARTSTEP5f(1,4,tw1r,tw2r,+tw1i,+tw2i)
+ PARTSTEP5f(2,3,tw2r,tw1r,+tw2i,-tw1i)
+ }
+ }
+ }
+
+#define PREP7(idx) \
+ cmplx t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7; \
+ PMC (t2,t7,CC(idx,1,k),CC(idx,6,k)) \
+ PMC (t3,t6,CC(idx,2,k),CC(idx,5,k)) \
+ PMC (t4,t5,CC(idx,3,k),CC(idx,4,k)) \
+ CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r; \
+ CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i;
+
+#define PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,out1,out2) \
+ { \
+ cmplx ca,cb; \
+ ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r; \
+ ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i; \
+ cb.i=y1*t7.r y2*t6.r y3*t5.r; \
+ cb.r=-(y1*t7.i y2*t6.i y3*t5.i); \
+ PMC(out1,out2,ca,cb) \
+ }
+#define PARTSTEP7a(u1,u2,x1,x2,x3,y1,y2,y3) \
+ PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,CH(0,k,u1),CH(0,k,u2))
+#define PARTSTEP7(u1,u2,x1,x2,x3,y1,y2,y3) \
+ { \
+ cmplx da,db; \
+ PARTSTEP7a0(u1,u2,x1,x2,x3,y1,y2,y3,da,db) \
+ MULPMSIGNC (CH(i,k,u1),WA(u1-1,i),da) \
+ MULPMSIGNC (CH(i,k,u2),WA(u2-1,i),db) \
+ }
+
+NOINLINE static void pass7(size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa, const int sign)
+ {
+ const size_t cdim=7;
+ const double tw1r= 0.623489801858733530525,
+ tw1i= sign * 0.7818314824680298087084,
+ tw2r= -0.222520933956314404289,
+ tw2i= sign * 0.9749279121818236070181,
+ tw3r= -0.9009688679024191262361,
+ tw3i= sign * 0.4338837391175581204758;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ PREP7(0)
+ PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
+ PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
+ PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ PREP7(0)
+ PARTSTEP7a(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
+ PARTSTEP7a(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
+ PARTSTEP7a(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ PREP7(i)
+ PARTSTEP7(1,6,tw1r,tw2r,tw3r,+tw1i,+tw2i,+tw3i)
+ PARTSTEP7(2,5,tw2r,tw3r,tw1r,+tw2i,-tw3i,-tw1i)
+ PARTSTEP7(3,4,tw3r,tw1r,tw2r,+tw3i,-tw1i,+tw2i)
+ }
+ }
+ }
+
+#define PREP11(idx) \
+ cmplx t1 = CC(idx,0,k), t2, t3, t4, t5, t6, t7, t8, t9, t10, t11; \
+ PMC (t2,t11,CC(idx,1,k),CC(idx,10,k)) \
+ PMC (t3,t10,CC(idx,2,k),CC(idx, 9,k)) \
+ PMC (t4,t9 ,CC(idx,3,k),CC(idx, 8,k)) \
+ PMC (t5,t8 ,CC(idx,4,k),CC(idx, 7,k)) \
+ PMC (t6,t7 ,CC(idx,5,k),CC(idx, 6,k)) \
+ CH(idx,k,0).r=t1.r+t2.r+t3.r+t4.r+t5.r+t6.r; \
+ CH(idx,k,0).i=t1.i+t2.i+t3.i+t4.i+t5.i+t6.i;
+
+#define PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,out1,out2) \
+ { \
+ cmplx ca,cb; \
+ ca.r=t1.r+x1*t2.r+x2*t3.r+x3*t4.r+x4*t5.r+x5*t6.r; \
+ ca.i=t1.i+x1*t2.i+x2*t3.i+x3*t4.i+x4*t5.i+x5*t6.i; \
+ cb.i=y1*t11.r y2*t10.r y3*t9.r y4*t8.r y5*t7.r; \
+ cb.r=-(y1*t11.i y2*t10.i y3*t9.i y4*t8.i y5*t7.i ); \
+ PMC(out1,out2,ca,cb) \
+ }
+#define PARTSTEP11a(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
+ PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,CH(0,k,u1),CH(0,k,u2))
+#define PARTSTEP11(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5) \
+ { \
+ cmplx da,db; \
+ PARTSTEP11a0(u1,u2,x1,x2,x3,x4,x5,y1,y2,y3,y4,y5,da,db) \
+ MULPMSIGNC (CH(i,k,u1),WA(u1-1,i),da) \
+ MULPMSIGNC (CH(i,k,u2),WA(u2-1,i),db) \
+ }
+
+NOINLINE static void pass11 (size_t ido, size_t l1, const cmplx * restrict cc,
+ cmplx * restrict ch, const cmplx * restrict wa, const int sign)
+ {
+ const size_t cdim=11;
+ const double tw1r = 0.8412535328311811688618,
+ tw1i = sign * 0.5406408174555975821076,
+ tw2r = 0.4154150130018864255293,
+ tw2i = sign * 0.9096319953545183714117,
+ tw3r = -0.1423148382732851404438,
+ tw3i = sign * 0.9898214418809327323761,
+ tw4r = -0.6548607339452850640569,
+ tw4i = sign * 0.755749574354258283774,
+ tw5r = -0.9594929736144973898904,
+ tw5i = sign * 0.2817325568414296977114;
+
+ if (ido==1)
+ for (size_t k=0; k<l1; ++k)
+ {
+ PREP11(0)
+ PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
+ PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
+ PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
+ PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
+ PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
+ }
+ else
+ for (size_t k=0; k<l1; ++k)
+ {
+ {
+ PREP11(0)
+ PARTSTEP11a(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
+ PARTSTEP11a(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
+ PARTSTEP11a(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
+ PARTSTEP11a(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
+ PARTSTEP11a(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
+ }
+ for (size_t i=1; i<ido; ++i)
+ {
+ PREP11(i)
+ PARTSTEP11(1,10,tw1r,tw2r,tw3r,tw4r,tw5r,+tw1i,+tw2i,+tw3i,+tw4i,+tw5i)
+ PARTSTEP11(2, 9,tw2r,tw4r,tw5r,tw3r,tw1r,+tw2i,+tw4i,-tw5i,-tw3i,-tw1i)
+ PARTSTEP11(3, 8,tw3r,tw5r,tw2r,tw1r,tw4r,+tw3i,-tw5i,-tw2i,+tw1i,+tw4i)
+ PARTSTEP11(4, 7,tw4r,tw3r,tw1r,tw5r,tw2r,+tw4i,-tw3i,+tw1i,+tw5i,-tw2i)
+ PARTSTEP11(5, 6,tw5r,tw1r,tw4r,tw2r,tw3r,+tw5i,-tw1i,+tw4i,-tw2i,+tw3i)
+ }
+ }
+ }
+
+#define CX(a,b,c) cc[(a)+ido*((b)+l1*(c))]
+#define CX2(a,b) cc[(a)+idl1*(b)]
+#define CH2(a,b) ch[(a)+idl1*(b)]
+
+NOINLINE static int passg (size_t ido, size_t ip, size_t l1,
+ cmplx * restrict cc, cmplx * restrict ch, const cmplx * restrict wa,
+ const cmplx * restrict csarr, const int sign)
+ {
+ const size_t cdim=ip;
+ size_t ipph = (ip+1)/2;
+ size_t idl1 = ido*l1;
+
+ cmplx * restrict wal=RALLOC(cmplx,ip);
+ if (!wal) return -1;
+ wal[0]=(cmplx){1.,0.};
+ for (size_t i=1; i<ip; ++i)
+ wal[i]=(cmplx){csarr[i].r,sign*csarr[i].i};
+
+ for (size_t k=0; k<l1; ++k)
+ for (size_t i=0; i<ido; ++i)
+ CH(i,k,0) = CC(i,0,k);
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
+ for (size_t k=0; k<l1; ++k)
+ for (size_t i=0; i<ido; ++i)
+ PMC(CH(i,k,j),CH(i,k,jc),CC(i,j,k),CC(i,jc,k))
+ for (size_t k=0; k<l1; ++k)
+ for (size_t i=0; i<ido; ++i)
+ {
+ cmplx tmp = CH(i,k,0);
+ for (size_t j=1; j<ipph; ++j)
+ ADDC(tmp,tmp,CH(i,k,j))
+ CX(i,k,0) = tmp;
+ }
+ for (size_t l=1, lc=ip-1; l<ipph; ++l, --lc)
+ {
+ // j=0
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ CX2(ik,l).r = CH2(ik,0).r+wal[l].r*CH2(ik,1).r+wal[2*l].r*CH2(ik,2).r;
+ CX2(ik,l).i = CH2(ik,0).i+wal[l].r*CH2(ik,1).i+wal[2*l].r*CH2(ik,2).i;
+ CX2(ik,lc).r=-wal[l].i*CH2(ik,ip-1).i-wal[2*l].i*CH2(ik,ip-2).i;
+ CX2(ik,lc).i=wal[l].i*CH2(ik,ip-1).r+wal[2*l].i*CH2(ik,ip-2).r;
+ }
+
+ size_t iwal=2*l;
+ size_t j=3, jc=ip-3;
+ for (; j<ipph-1; j+=2, jc-=2)
+ {
+ iwal+=l; if (iwal>ip) iwal-=ip;
+ cmplx xwal=wal[iwal];
+ iwal+=l; if (iwal>ip) iwal-=ip;
+ cmplx xwal2=wal[iwal];
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ CX2(ik,l).r += CH2(ik,j).r*xwal.r+CH2(ik,j+1).r*xwal2.r;
+ CX2(ik,l).i += CH2(ik,j).i*xwal.r+CH2(ik,j+1).i*xwal2.r;
+ CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i+CH2(ik,jc-1).i*xwal2.i;
+ CX2(ik,lc).i += CH2(ik,jc).r*xwal.i+CH2(ik,jc-1).r*xwal2.i;
+ }
+ }
+ for (; j<ipph; ++j, --jc)
+ {
+ iwal+=l; if (iwal>ip) iwal-=ip;
+ cmplx xwal=wal[iwal];
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ CX2(ik,l).r += CH2(ik,j).r*xwal.r;
+ CX2(ik,l).i += CH2(ik,j).i*xwal.r;
+ CX2(ik,lc).r -= CH2(ik,jc).i*xwal.i;
+ CX2(ik,lc).i += CH2(ik,jc).r*xwal.i;
+ }
+ }
+ }
+ DEALLOC(wal);
+
+ // shuffling and twiddling
+ if (ido==1)
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc)
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ cmplx t1=CX2(ik,j), t2=CX2(ik,jc);
+ PMC(CX2(ik,j),CX2(ik,jc),t1,t2)
+ }
+ else
+ {
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc)
+ for (size_t k=0; k<l1; ++k)
+ {
+ cmplx t1=CX(0,k,j), t2=CX(0,k,jc);
+ PMC(CX(0,k,j),CX(0,k,jc),t1,t2)
+ for (size_t i=1; i<ido; ++i)
+ {
+ cmplx x1, x2;
+ PMC(x1,x2,CX(i,k,j),CX(i,k,jc))
+ size_t idij=(j-1)*(ido-1)+i-1;
+ MULPMSIGNC (CX(i,k,j),wa[idij],x1)
+ idij=(jc-1)*(ido-1)+i-1;
+ MULPMSIGNC (CX(i,k,jc),wa[idij],x2)
+ }
+ }
+ }
+ return 0;
+ }
+
+#undef CH2
+#undef CX2
+#undef CX
+
+NOINLINE WARN_UNUSED_RESULT static int pass_all(cfftp_plan plan, cmplx c[], double fct,
+ const int sign)
+ {
+ if (plan->length==1) return 0;
+ size_t len=plan->length;
+ size_t l1=1, nf=plan->nfct;
+ cmplx *ch = RALLOC(cmplx, len);
+ if (!ch) return -1;
+ cmplx *p1=c, *p2=ch;
+
+ for(size_t k1=0; k1<nf; k1++)
+ {
+ size_t ip=plan->fct[k1].fct;
+ size_t l2=ip*l1;
+ size_t ido = len/l2;
+ if (ip==4)
+ sign>0 ? pass4b (ido, l1, p1, p2, plan->fct[k1].tw)
+ : pass4f (ido, l1, p1, p2, plan->fct[k1].tw);
+ else if(ip==2)
+ sign>0 ? pass2b (ido, l1, p1, p2, plan->fct[k1].tw)
+ : pass2f (ido, l1, p1, p2, plan->fct[k1].tw);
+ else if(ip==3)
+ sign>0 ? pass3b (ido, l1, p1, p2, plan->fct[k1].tw)
+ : pass3f (ido, l1, p1, p2, plan->fct[k1].tw);
+ else if(ip==5)
+ sign>0 ? pass5b (ido, l1, p1, p2, plan->fct[k1].tw)
+ : pass5f (ido, l1, p1, p2, plan->fct[k1].tw);
+ else if(ip==7) pass7 (ido, l1, p1, p2, plan->fct[k1].tw, sign);
+ else if(ip==11) pass11(ido, l1, p1, p2, plan->fct[k1].tw, sign);
+ else
+ {
+ if (passg(ido, ip, l1, p1, p2, plan->fct[k1].tw, plan->fct[k1].tws, sign))
+ { DEALLOC(ch); return -1; }
+ SWAP(p1,p2,cmplx *);
+ }
+ SWAP(p1,p2,cmplx *);
+ l1=l2;
+ }
+ if (p1!=c)
+ {
+ if (fct!=1.)
+ for (size_t i=0; i<len; ++i)
+ {
+ c[i].r = ch[i].r*fct;
+ c[i].i = ch[i].i*fct;
+ }
+ else
+ memcpy (c,p1,len*sizeof(cmplx));
+ }
+ else
+ if (fct!=1.)
+ for (size_t i=0; i<len; ++i)
+ {
+ c[i].r *= fct;
+ c[i].i *= fct;
+ }
+ DEALLOC(ch);
+ return 0;
+ }
+
+#undef PMSIGNC
+#undef A_EQ_B_MUL_C
+#undef A_EQ_CB_MUL_C
+#undef MULPMSIGNC
+#undef MULPMSIGNCEQ
+
+#undef WA
+#undef CC
+#undef CH
+#undef ROT90
+#undef SCALEC
+#undef ADDC
+#undef PMC
+
+NOINLINE WARN_UNUSED_RESULT
+static int cfftp_forward(cfftp_plan plan, double c[], double fct)
+ { return pass_all(plan,(cmplx *)c, fct, -1); }
+
+NOINLINE WARN_UNUSED_RESULT
+static int cfftp_backward(cfftp_plan plan, double c[], double fct)
+ { return pass_all(plan,(cmplx *)c, fct, 1); }
+
+NOINLINE WARN_UNUSED_RESULT
+static int cfftp_factorize (cfftp_plan plan)
+ {
+ size_t length=plan->length;
+ size_t nfct=0;
+ while ((length%4)==0)
+ { if (nfct>=NFCT) return -1; plan->fct[nfct++].fct=4; length>>=2; }
+ if ((length%2)==0)
+ {
+ length>>=1;
+ // factor 2 should be at the front of the factor list
+ if (nfct>=NFCT) return -1;
+ plan->fct[nfct++].fct=2;
+ SWAP(plan->fct[0].fct, plan->fct[nfct-1].fct,size_t);
+ }
+ size_t maxl=(size_t)(sqrt((double)length))+1;
+ for (size_t divisor=3; (length>1)&&(divisor<maxl); divisor+=2)
+ if ((length%divisor)==0)
+ {
+ while ((length%divisor)==0)
+ {
+ if (nfct>=NFCT) return -1;
+ plan->fct[nfct++].fct=divisor;
+ length/=divisor;
+ }
+ maxl=(size_t)(sqrt((double)length))+1;
+ }
+ if (length>1) plan->fct[nfct++].fct=length;
+ plan->nfct=nfct;
+ return 0;
+ }
+
+NOINLINE static size_t cfftp_twsize (cfftp_plan plan)
+ {
+ size_t twsize=0, l1=1;
+ for (size_t k=0; k<plan->nfct; ++k)
+ {
+ size_t ip=plan->fct[k].fct, ido= plan->length/(l1*ip);
+ twsize+=(ip-1)*(ido-1);
+ if (ip>11)
+ twsize+=ip;
+ l1*=ip;
+ }
+ return twsize;
+ }
+
+NOINLINE WARN_UNUSED_RESULT static int cfftp_comp_twiddle (cfftp_plan plan)
+ {
+ size_t length=plan->length;
+ double *twid = RALLOC(double, 2*length);
+ if (!twid) return -1;
+ sincos_2pibyn(length, twid);
+ size_t l1=1;
+ size_t memofs=0;
+ for (size_t k=0; k<plan->nfct; ++k)
+ {
+ size_t ip=plan->fct[k].fct, ido= length/(l1*ip);
+ plan->fct[k].tw=plan->mem+memofs;
+ memofs+=(ip-1)*(ido-1);
+ for (size_t j=1; j<ip; ++j)
+ for (size_t i=1; i<ido; ++i)
+ {
+ plan->fct[k].tw[(j-1)*(ido-1)+i-1].r = twid[2*j*l1*i];
+ plan->fct[k].tw[(j-1)*(ido-1)+i-1].i = twid[2*j*l1*i+1];
+ }
+ if (ip>11)
+ {
+ plan->fct[k].tws=plan->mem+memofs;
+ memofs+=ip;
+ for (size_t j=0; j<ip; ++j)
+ {
+ plan->fct[k].tws[j].r = twid[2*j*l1*ido];
+ plan->fct[k].tws[j].i = twid[2*j*l1*ido+1];
+ }
+ }
+ l1*=ip;
+ }
+ DEALLOC(twid);
+ return 0;
+ }
+
+static cfftp_plan make_cfftp_plan (size_t length)
+ {
+ if (length==0) return NULL;
+ cfftp_plan plan = RALLOC(cfftp_plan_i,1);
+ if (!plan) return NULL;
+ plan->length=length;
+ plan->nfct=0;
+ for (size_t i=0; i<NFCT; ++i)
+ plan->fct[i]=(cfftp_fctdata){0,0,0};
+ plan->mem=0;
+ if (length==1) return plan;
+ if (cfftp_factorize(plan)!=0) { DEALLOC(plan); return NULL; }
+ size_t tws=cfftp_twsize(plan);
+ plan->mem=RALLOC(cmplx,tws);
+ if (!plan->mem) { DEALLOC(plan); return NULL; }
+ if (cfftp_comp_twiddle(plan)!=0)
+ { DEALLOC(plan->mem); DEALLOC(plan); return NULL; }
+ return plan;
+ }
+
+static void destroy_cfftp_plan (cfftp_plan plan)
+ {
+ DEALLOC(plan->mem);
+ DEALLOC(plan);
+ }
+
+typedef struct rfftp_fctdata
+ {
+ size_t fct;
+ double *tw, *tws;
+ } rfftp_fctdata;
+
+typedef struct rfftp_plan_i
+ {
+ size_t length, nfct;
+ double *mem;
+ rfftp_fctdata fct[NFCT];
+ } rfftp_plan_i;
+typedef struct rfftp_plan_i * rfftp_plan;
+
+#define WA(x,i) wa[(i)+(x)*(ido-1)]
+#define PM(a,b,c,d) { a=c+d; b=c-d; }
+/* (a+ib) = conj(c+id) * (e+if) */
+#define MULPM(a,b,c,d,e,f) { a=c*e+d*f; b=c*f-d*e; }
+
+#define CC(a,b,c) cc[(a)+ido*((b)+l1*(c))]
+#define CH(a,b,c) ch[(a)+ido*((b)+cdim*(c))]
+
+NOINLINE static void radf2 (size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=2;
+
+ for (size_t k=0; k<l1; k++)
+ PM (CH(0,0,k),CH(ido-1,1,k),CC(0,k,0),CC(0,k,1))
+ if ((ido&1)==0)
+ for (size_t k=0; k<l1; k++)
+ {
+ CH( 0,1,k) = -CC(ido-1,k,1);
+ CH(ido-1,0,k) = CC(ido-1,k,0);
+ }
+ if (ido<=2) return;
+ for (size_t k=0; k<l1; k++)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ size_t ic=ido-i;
+ double tr2, ti2;
+ MULPM (tr2,ti2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1))
+ PM (CH(i-1,0,k),CH(ic-1,1,k),CC(i-1,k,0),tr2)
+ PM (CH(i ,0,k),CH(ic ,1,k),ti2,CC(i ,k,0))
+ }
+ }
+
+NOINLINE static void radf3(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=3;
+ static const double taur=-0.5, taui=0.86602540378443864676;
+
+ for (size_t k=0; k<l1; k++)
+ {
+ double cr2=CC(0,k,1)+CC(0,k,2);
+ CH(0,0,k) = CC(0,k,0)+cr2;
+ CH(0,2,k) = taui*(CC(0,k,2)-CC(0,k,1));
+ CH(ido-1,1,k) = CC(0,k,0)+taur*cr2;
+ }
+ if (ido==1) return;
+ for (size_t k=0; k<l1; k++)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ size_t ic=ido-i;
+ double di2, di3, dr2, dr3;
+ MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1)) // d2=conj(WA0)*CC1
+ MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2)) // d3=conj(WA1)*CC2
+ double cr2=dr2+dr3; // c add
+ double ci2=di2+di3;
+ CH(i-1,0,k) = CC(i-1,k,0)+cr2; // c add
+ CH(i ,0,k) = CC(i ,k,0)+ci2;
+ double tr2 = CC(i-1,k,0)+taur*cr2; // c add
+ double ti2 = CC(i ,k,0)+taur*ci2;
+ double tr3 = taui*(di2-di3); // t3 = taui*i*(d3-d2)?
+ double ti3 = taui*(dr3-dr2);
+ PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr3) // PM(i) = t2+t3
+ PM(CH(i ,2,k),CH(ic ,1,k),ti3,ti2) // PM(ic) = conj(t2-t3)
+ }
+ }
+
+NOINLINE static void radf4(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=4;
+ static const double hsqt2=0.70710678118654752440;
+
+ for (size_t k=0; k<l1; k++)
+ {
+ double tr1,tr2;
+ PM (tr1,CH(0,2,k),CC(0,k,3),CC(0,k,1))
+ PM (tr2,CH(ido-1,1,k),CC(0,k,0),CC(0,k,2))
+ PM (CH(0,0,k),CH(ido-1,3,k),tr2,tr1)
+ }
+ if ((ido&1)==0)
+ for (size_t k=0; k<l1; k++)
+ {
+ double ti1=-hsqt2*(CC(ido-1,k,1)+CC(ido-1,k,3));
+ double tr1= hsqt2*(CC(ido-1,k,1)-CC(ido-1,k,3));
+ PM (CH(ido-1,0,k),CH(ido-1,2,k),CC(ido-1,k,0),tr1)
+ PM (CH( 0,3,k),CH( 0,1,k),ti1,CC(ido-1,k,2))
+ }
+ if (ido<=2) return;
+ for (size_t k=0; k<l1; k++)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ size_t ic=ido-i;
+ double ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+ MULPM(cr2,ci2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1))
+ MULPM(cr3,ci3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2))
+ MULPM(cr4,ci4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3))
+ PM(tr1,tr4,cr4,cr2)
+ PM(ti1,ti4,ci2,ci4)
+ PM(tr2,tr3,CC(i-1,k,0),cr3)
+ PM(ti2,ti3,CC(i ,k,0),ci3)
+ PM(CH(i-1,0,k),CH(ic-1,3,k),tr2,tr1)
+ PM(CH(i ,0,k),CH(ic ,3,k),ti1,ti2)
+ PM(CH(i-1,2,k),CH(ic-1,1,k),tr3,ti4)
+ PM(CH(i ,2,k),CH(ic ,1,k),tr4,ti3)
+ }
+ }
+
+NOINLINE static void radf5(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=5;
+ static const double tr11= 0.3090169943749474241, ti11=0.95105651629515357212,
+ tr12=-0.8090169943749474241, ti12=0.58778525229247312917;
+
+ for (size_t k=0; k<l1; k++)
+ {
+ double cr2, cr3, ci4, ci5;
+ PM (cr2,ci5,CC(0,k,4),CC(0,k,1))
+ PM (cr3,ci4,CC(0,k,3),CC(0,k,2))
+ CH(0,0,k)=CC(0,k,0)+cr2+cr3;
+ CH(ido-1,1,k)=CC(0,k,0)+tr11*cr2+tr12*cr3;
+ CH(0,2,k)=ti11*ci5+ti12*ci4;
+ CH(ido-1,3,k)=CC(0,k,0)+tr12*cr2+tr11*cr3;
+ CH(0,4,k)=ti12*ci5-ti11*ci4;
+ }
+ if (ido==1) return;
+ for (size_t k=0; k<l1;++k)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ double ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3,
+ dr4, dr5, cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5;
+ size_t ic=ido-i;
+ MULPM (dr2,di2,WA(0,i-2),WA(0,i-1),CC(i-1,k,1),CC(i,k,1))
+ MULPM (dr3,di3,WA(1,i-2),WA(1,i-1),CC(i-1,k,2),CC(i,k,2))
+ MULPM (dr4,di4,WA(2,i-2),WA(2,i-1),CC(i-1,k,3),CC(i,k,3))
+ MULPM (dr5,di5,WA(3,i-2),WA(3,i-1),CC(i-1,k,4),CC(i,k,4))
+ PM(cr2,ci5,dr5,dr2)
+ PM(ci2,cr5,di2,di5)
+ PM(cr3,ci4,dr4,dr3)
+ PM(ci3,cr4,di3,di4)
+ CH(i-1,0,k)=CC(i-1,k,0)+cr2+cr3;
+ CH(i ,0,k)=CC(i ,k,0)+ci2+ci3;
+ tr2=CC(i-1,k,0)+tr11*cr2+tr12*cr3;
+ ti2=CC(i ,k,0)+tr11*ci2+tr12*ci3;
+ tr3=CC(i-1,k,0)+tr12*cr2+tr11*cr3;
+ ti3=CC(i ,k,0)+tr12*ci2+tr11*ci3;
+ MULPM(tr5,tr4,cr5,cr4,ti11,ti12)
+ MULPM(ti5,ti4,ci5,ci4,ti11,ti12)
+ PM(CH(i-1,2,k),CH(ic-1,1,k),tr2,tr5)
+ PM(CH(i ,2,k),CH(ic ,1,k),ti5,ti2)
+ PM(CH(i-1,4,k),CH(ic-1,3,k),tr3,tr4)
+ PM(CH(i ,4,k),CH(ic ,3,k),ti4,ti3)
+ }
+ }
+
+#undef CC
+#undef CH
+#define C1(a,b,c) cc[(a)+ido*((b)+l1*(c))]
+#define C2(a,b) cc[(a)+idl1*(b)]
+#define CH2(a,b) ch[(a)+idl1*(b)]
+#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))]
+#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))]
+NOINLINE static void radfg(size_t ido, size_t ip, size_t l1,
+ double * restrict cc, double * restrict ch, const double * restrict wa,
+ const double * restrict csarr)
+ {
+ const size_t cdim=ip;
+ size_t ipph=(ip+1)/2;
+ size_t idl1 = ido*l1;
+
+ if (ido>1)
+ {
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 114
+ {
+ size_t is=(j-1)*(ido-1),
+ is2=(jc-1)*(ido-1);
+ for (size_t k=0; k<l1; ++k) // 113
+ {
+ size_t idij=is;
+ size_t idij2=is2;
+ for (size_t i=1; i<=ido-2; i+=2) // 112
+ {
+ double t1=C1(i,k,j ), t2=C1(i+1,k,j ),
+ t3=C1(i,k,jc), t4=C1(i+1,k,jc);
+ double x1=wa[idij]*t1 + wa[idij+1]*t2,
+ x2=wa[idij]*t2 - wa[idij+1]*t1,
+ x3=wa[idij2]*t3 + wa[idij2+1]*t4,
+ x4=wa[idij2]*t4 - wa[idij2+1]*t3;
+ C1(i ,k,j ) = x1+x3;
+ C1(i ,k,jc) = x2-x4;
+ C1(i+1,k,j ) = x2+x4;
+ C1(i+1,k,jc) = x3-x1;
+ idij+=2;
+ idij2+=2;
+ }
+ }
+ }
+ }
+
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 123
+ for (size_t k=0; k<l1; ++k) // 122
+ {
+ double t1=C1(0,k,j), t2=C1(0,k,jc);
+ C1(0,k,j ) = t1+t2;
+ C1(0,k,jc) = t2-t1;
+ }
+
+//everything in C
+//memset(ch,0,ip*l1*ido*sizeof(double));
+
+ for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc) // 127
+ {
+ for (size_t ik=0; ik<idl1; ++ik) // 124
+ {
+ CH2(ik,l ) = C2(ik,0)+csarr[2*l]*C2(ik,1)+csarr[4*l]*C2(ik,2);
+ CH2(ik,lc) = csarr[2*l+1]*C2(ik,ip-1)+csarr[4*l+1]*C2(ik,ip-2);
+ }
+ size_t iang = 2*l;
+ size_t j=3, jc=ip-3;
+ for (; j<ipph-3; j+=4,jc-=4) // 126
+ {
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar1=csarr[2*iang], ai1=csarr[2*iang+1];
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar2=csarr[2*iang], ai2=csarr[2*iang+1];
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar3=csarr[2*iang], ai3=csarr[2*iang+1];
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar4=csarr[2*iang], ai4=csarr[2*iang+1];
+ for (size_t ik=0; ik<idl1; ++ik) // 125
+ {
+ CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1)
+ +ar3*C2(ik,j +2)+ar4*C2(ik,j +3);
+ CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1)
+ +ai3*C2(ik,jc-2)+ai4*C2(ik,jc-3);
+ }
+ }
+ for (; j<ipph-1; j+=2,jc-=2) // 126
+ {
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar1=csarr[2*iang], ai1=csarr[2*iang+1];
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar2=csarr[2*iang], ai2=csarr[2*iang+1];
+ for (size_t ik=0; ik<idl1; ++ik) // 125
+ {
+ CH2(ik,l ) += ar1*C2(ik,j )+ar2*C2(ik,j +1);
+ CH2(ik,lc) += ai1*C2(ik,jc)+ai2*C2(ik,jc-1);
+ }
+ }
+ for (; j<ipph; ++j,--jc) // 126
+ {
+ iang+=l; if (iang>=ip) iang-=ip;
+ double ar=csarr[2*iang], ai=csarr[2*iang+1];
+ for (size_t ik=0; ik<idl1; ++ik) // 125
+ {
+ CH2(ik,l ) += ar*C2(ik,j );
+ CH2(ik,lc) += ai*C2(ik,jc);
+ }
+ }
+ }
+ for (size_t ik=0; ik<idl1; ++ik) // 101
+ CH2(ik,0) = C2(ik,0);
+ for (size_t j=1; j<ipph; ++j) // 129
+ for (size_t ik=0; ik<idl1; ++ik) // 128
+ CH2(ik,0) += C2(ik,j);
+
+// everything in CH at this point!
+//memset(cc,0,ip*l1*ido*sizeof(double));
+
+ for (size_t k=0; k<l1; ++k) // 131
+ for (size_t i=0; i<ido; ++i) // 130
+ CC(i,0,k) = CH(i,k,0);
+
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 137
+ {
+ size_t j2=2*j-1;
+ for (size_t k=0; k<l1; ++k) // 136
+ {
+ CC(ido-1,j2,k) = CH(0,k,j);
+ CC(0,j2+1,k) = CH(0,k,jc);
+ }
+ }
+
+ if (ido==1) return;
+
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 140
+ {
+ size_t j2=2*j-1;
+ for(size_t k=0; k<l1; ++k) // 139
+ for(size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 138
+ {
+ CC(i ,j2+1,k) = CH(i ,k,j )+CH(i ,k,jc);
+ CC(ic ,j2 ,k) = CH(i ,k,j )-CH(i ,k,jc);
+ CC(i+1 ,j2+1,k) = CH(i+1,k,j )+CH(i+1,k,jc);
+ CC(ic+1,j2 ,k) = CH(i+1,k,jc)-CH(i+1,k,j );
+ }
+ }
+ }
+#undef C1
+#undef C2
+#undef CH2
+
+#undef CH
+#undef CC
+#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))]
+#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))]
+
+NOINLINE static void radb2(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=2;
+
+ for (size_t k=0; k<l1; k++)
+ PM (CH(0,k,0),CH(0,k,1),CC(0,0,k),CC(ido-1,1,k))
+ if ((ido&1)==0)
+ for (size_t k=0; k<l1; k++)
+ {
+ CH(ido-1,k,0) = 2.*CC(ido-1,0,k);
+ CH(ido-1,k,1) =-2.*CC(0 ,1,k);
+ }
+ if (ido<=2) return;
+ for (size_t k=0; k<l1;++k)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ size_t ic=ido-i;
+ double ti2, tr2;
+ PM (CH(i-1,k,0),tr2,CC(i-1,0,k),CC(ic-1,1,k))
+ PM (ti2,CH(i ,k,0),CC(i ,0,k),CC(ic ,1,k))
+ MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ti2,tr2)
+ }
+ }
+
+NOINLINE static void radb3(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=3;
+ static const double taur=-0.5, taui=0.86602540378443864676;
+
+ for (size_t k=0; k<l1; k++)
+ {
+ double tr2=2.*CC(ido-1,1,k);
+ double cr2=CC(0,0,k)+taur*tr2;
+ CH(0,k,0)=CC(0,0,k)+tr2;
+ double ci3=2.*taui*CC(0,2,k);
+ PM (CH(0,k,2),CH(0,k,1),cr2,ci3);
+ }
+ if (ido==1) return;
+ for (size_t k=0; k<l1; k++)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ size_t ic=ido-i;
+ double tr2=CC(i-1,2,k)+CC(ic-1,1,k); // t2=CC(I) + conj(CC(ic))
+ double ti2=CC(i ,2,k)-CC(ic ,1,k);
+ double cr2=CC(i-1,0,k)+taur*tr2; // c2=CC +taur*t2
+ double ci2=CC(i ,0,k)+taur*ti2;
+ CH(i-1,k,0)=CC(i-1,0,k)+tr2; // CH=CC+t2
+ CH(i ,k,0)=CC(i ,0,k)+ti2;
+ double cr3=taui*(CC(i-1,2,k)-CC(ic-1,1,k));// c3=taui*(CC(i)-conj(CC(ic)))
+ double ci3=taui*(CC(i ,2,k)+CC(ic ,1,k));
+ double di2, di3, dr2, dr3;
+ PM(dr3,dr2,cr2,ci3) // d2= (cr2-ci3, ci2+cr3) = c2+i*c3
+ PM(di2,di3,ci2,cr3) // d3= (cr2+ci3, ci2-cr3) = c2-i*c3
+ MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2) // ch = WA*d2
+ MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3)
+ }
+ }
+
+NOINLINE static void radb4(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=4;
+ static const double sqrt2=1.41421356237309504880;
+
+ for (size_t k=0; k<l1; k++)
+ {
+ double tr1, tr2;
+ PM (tr2,tr1,CC(0,0,k),CC(ido-1,3,k))
+ double tr3=2.*CC(ido-1,1,k);
+ double tr4=2.*CC(0,2,k);
+ PM (CH(0,k,0),CH(0,k,2),tr2,tr3)
+ PM (CH(0,k,3),CH(0,k,1),tr1,tr4)
+ }
+ if ((ido&1)==0)
+ for (size_t k=0; k<l1; k++)
+ {
+ double tr1,tr2,ti1,ti2;
+ PM (ti1,ti2,CC(0 ,3,k),CC(0 ,1,k))
+ PM (tr2,tr1,CC(ido-1,0,k),CC(ido-1,2,k))
+ CH(ido-1,k,0)=tr2+tr2;
+ CH(ido-1,k,1)=sqrt2*(tr1-ti1);
+ CH(ido-1,k,2)=ti2+ti2;
+ CH(ido-1,k,3)=-sqrt2*(tr1+ti1);
+ }
+ if (ido<=2) return;
+ for (size_t k=0; k<l1;++k)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ double ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
+ size_t ic=ido-i;
+ PM (tr2,tr1,CC(i-1,0,k),CC(ic-1,3,k))
+ PM (ti1,ti2,CC(i ,0,k),CC(ic ,3,k))
+ PM (tr4,ti3,CC(i ,2,k),CC(ic ,1,k))
+ PM (tr3,ti4,CC(i-1,2,k),CC(ic-1,1,k))
+ PM (CH(i-1,k,0),cr3,tr2,tr3)
+ PM (CH(i ,k,0),ci3,ti2,ti3)
+ PM (cr4,cr2,tr1,tr4)
+ PM (ci2,ci4,ti1,ti4)
+ MULPM (CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),ci2,cr2)
+ MULPM (CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),ci3,cr3)
+ MULPM (CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),ci4,cr4)
+ }
+ }
+
+NOINLINE static void radb5(size_t ido, size_t l1, const double * restrict cc,
+ double * restrict ch, const double * restrict wa)
+ {
+ const size_t cdim=5;
+ static const double tr11= 0.3090169943749474241, ti11=0.95105651629515357212,
+ tr12=-0.8090169943749474241, ti12=0.58778525229247312917;
+
+ for (size_t k=0; k<l1; k++)
+ {
+ double ti5=CC(0,2,k)+CC(0,2,k);
+ double ti4=CC(0,4,k)+CC(0,4,k);
+ double tr2=CC(ido-1,1,k)+CC(ido-1,1,k);
+ double tr3=CC(ido-1,3,k)+CC(ido-1,3,k);
+ CH(0,k,0)=CC(0,0,k)+tr2+tr3;
+ double cr2=CC(0,0,k)+tr11*tr2+tr12*tr3;
+ double cr3=CC(0,0,k)+tr12*tr2+tr11*tr3;
+ double ci4, ci5;
+ MULPM(ci5,ci4,ti5,ti4,ti11,ti12)
+ PM(CH(0,k,4),CH(0,k,1),cr2,ci5)
+ PM(CH(0,k,3),CH(0,k,2),cr3,ci4)
+ }
+ if (ido==1) return;
+ for (size_t k=0; k<l1;++k)
+ for (size_t i=2; i<ido; i+=2)
+ {
+ size_t ic=ido-i;
+ double tr2, tr3, tr4, tr5, ti2, ti3, ti4, ti5;
+ PM(tr2,tr5,CC(i-1,2,k),CC(ic-1,1,k))
+ PM(ti5,ti2,CC(i ,2,k),CC(ic ,1,k))
+ PM(tr3,tr4,CC(i-1,4,k),CC(ic-1,3,k))
+ PM(ti4,ti3,CC(i ,4,k),CC(ic ,3,k))
+ CH(i-1,k,0)=CC(i-1,0,k)+tr2+tr3;
+ CH(i ,k,0)=CC(i ,0,k)+ti2+ti3;
+ double cr2=CC(i-1,0,k)+tr11*tr2+tr12*tr3;
+ double ci2=CC(i ,0,k)+tr11*ti2+tr12*ti3;
+ double cr3=CC(i-1,0,k)+tr12*tr2+tr11*tr3;
+ double ci3=CC(i ,0,k)+tr12*ti2+tr11*ti3;
+ double ci4, ci5, cr5, cr4;
+ MULPM(cr5,cr4,tr5,tr4,ti11,ti12)
+ MULPM(ci5,ci4,ti5,ti4,ti11,ti12)
+ double dr2, dr3, dr4, dr5, di2, di3, di4, di5;
+ PM(dr4,dr3,cr3,ci4)
+ PM(di3,di4,ci3,cr4)
+ PM(dr5,dr2,cr2,ci5)
+ PM(di2,di5,ci2,cr5)
+ MULPM(CH(i,k,1),CH(i-1,k,1),WA(0,i-2),WA(0,i-1),di2,dr2)
+ MULPM(CH(i,k,2),CH(i-1,k,2),WA(1,i-2),WA(1,i-1),di3,dr3)
+ MULPM(CH(i,k,3),CH(i-1,k,3),WA(2,i-2),WA(2,i-1),di4,dr4)
+ MULPM(CH(i,k,4),CH(i-1,k,4),WA(3,i-2),WA(3,i-1),di5,dr5)
+ }
+ }
+
+#undef CC
+#undef CH
+#define CC(a,b,c) cc[(a)+ido*((b)+cdim*(c))]
+#define CH(a,b,c) ch[(a)+ido*((b)+l1*(c))]
+#define C1(a,b,c) cc[(a)+ido*((b)+l1*(c))]
+#define C2(a,b) cc[(a)+idl1*(b)]
+#define CH2(a,b) ch[(a)+idl1*(b)]
+
+NOINLINE static void radbg(size_t ido, size_t ip, size_t l1,
+ double * restrict cc, double * restrict ch, const double * restrict wa,
+ const double * restrict csarr)
+ {
+ const size_t cdim=ip;
+ size_t ipph=(ip+1)/ 2;
+ size_t idl1 = ido*l1;
+
+ for (size_t k=0; k<l1; ++k) // 102
+ for (size_t i=0; i<ido; ++i) // 101
+ CH(i,k,0) = CC(i,0,k);
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 108
+ {
+ size_t j2=2*j-1;
+ for (size_t k=0; k<l1; ++k)
+ {
+ CH(0,k,j ) = 2*CC(ido-1,j2,k);
+ CH(0,k,jc) = 2*CC(0,j2+1,k);
+ }
+ }
+
+ if (ido!=1)
+ {
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 111
+ {
+ size_t j2=2*j-1;
+ for (size_t k=0; k<l1; ++k)
+ for (size_t i=1, ic=ido-i-2; i<=ido-2; i+=2, ic-=2) // 109
+ {
+ CH(i ,k,j ) = CC(i ,j2+1,k)+CC(ic ,j2,k);
+ CH(i ,k,jc) = CC(i ,j2+1,k)-CC(ic ,j2,k);
+ CH(i+1,k,j ) = CC(i+1,j2+1,k)-CC(ic+1,j2,k);
+ CH(i+1,k,jc) = CC(i+1,j2+1,k)+CC(ic+1,j2,k);
+ }
+ }
+ }
+ for (size_t l=1,lc=ip-1; l<ipph; ++l,--lc)
+ {
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ C2(ik,l ) = CH2(ik,0)+csarr[2*l]*CH2(ik,1)+csarr[4*l]*CH2(ik,2);
+ C2(ik,lc) = csarr[2*l+1]*CH2(ik,ip-1)+csarr[4*l+1]*CH2(ik,ip-2);
+ }
+ size_t iang=2*l;
+ size_t j=3,jc=ip-3;
+ for(; j<ipph-3; j+=4,jc-=4)
+ {
+ iang+=l; if(iang>ip) iang-=ip;
+ double ar1=csarr[2*iang], ai1=csarr[2*iang+1];
+ iang+=l; if(iang>ip) iang-=ip;
+ double ar2=csarr[2*iang], ai2=csarr[2*iang+1];
+ iang+=l; if(iang>ip) iang-=ip;
+ double ar3=csarr[2*iang], ai3=csarr[2*iang+1];
+ iang+=l; if(iang>ip) iang-=ip;
+ double ar4=csarr[2*iang], ai4=csarr[2*iang+1];
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1)
+ +ar3*CH2(ik,j +2)+ar4*CH2(ik,j +3);
+ C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1)
+ +ai3*CH2(ik,jc-2)+ai4*CH2(ik,jc-3);
+ }
+ }
+ for(; j<ipph-1; j+=2,jc-=2)
+ {
+ iang+=l; if(iang>ip) iang-=ip;
+ double ar1=csarr[2*iang], ai1=csarr[2*iang+1];
+ iang+=l; if(iang>ip) iang-=ip;
+ double ar2=csarr[2*iang], ai2=csarr[2*iang+1];
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ C2(ik,l ) += ar1*CH2(ik,j )+ar2*CH2(ik,j +1);
+ C2(ik,lc) += ai1*CH2(ik,jc)+ai2*CH2(ik,jc-1);
+ }
+ }
+ for(; j<ipph; ++j,--jc)
+ {
+ iang+=l; if(iang>ip) iang-=ip;
+ double war=csarr[2*iang], wai=csarr[2*iang+1];
+ for (size_t ik=0; ik<idl1; ++ik)
+ {
+ C2(ik,l ) += war*CH2(ik,j );
+ C2(ik,lc) += wai*CH2(ik,jc);
+ }
+ }
+ }
+ for (size_t j=1; j<ipph; ++j)
+ for (size_t ik=0; ik<idl1; ++ik)
+ CH2(ik,0) += CH2(ik,j);
+ for (size_t j=1, jc=ip-1; j<ipph; ++j,--jc) // 124
+ for (size_t k=0; k<l1; ++k)
+ {
+ CH(0,k,j ) = C1(0,k,j)-C1(0,k,jc);
+ CH(0,k,jc) = C1(0,k,j)+C1(0,k,jc);
+ }
+
+ if (ido==1) return;
+
+ for (size_t j=1, jc=ip-1; j<ipph; ++j, --jc) // 127
+ for (size_t k=0; k<l1; ++k)
+ for (size_t i=1; i<=ido-2; i+=2)
+ {
+ CH(i ,k,j ) = C1(i ,k,j)-C1(i+1,k,jc);
+ CH(i ,k,jc) = C1(i ,k,j)+C1(i+1,k,jc);
+ CH(i+1,k,j ) = C1(i+1,k,j)+C1(i ,k,jc);
+ CH(i+1,k,jc) = C1(i+1,k,j)-C1(i ,k,jc);
+ }
+
+// All in CH
+
+ for (size_t j=1; j<ip; ++j)
+ {
+ size_t is = (j-1)*(ido-1);
+ for (size_t k=0; k<l1; ++k)
+ {
+ size_t idij = is;
+ for (size_t i=1; i<=ido-2; i+=2)
+ {
+ double t1=CH(i,k,j), t2=CH(i+1,k,j);
+ CH(i ,k,j) = wa[idij]*t1-wa[idij+1]*t2;
+ CH(i+1,k,j) = wa[idij]*t2+wa[idij+1]*t1;
+ idij+=2;
+ }
+ }
+ }
+ }
+#undef C1
+#undef C2
+#undef CH2
+
+#undef CC
+#undef CH
+#undef PM
+#undef MULPM
+#undef WA
+
+static void copy_and_norm(double *c, double *p1, size_t n, double fct)
+ {
+ if (p1!=c)
+ {
+ if (fct!=1.)
+ for (size_t i=0; i<n; ++i)
+ c[i] = fct*p1[i];
+ else
+ memcpy (c,p1,n*sizeof(double));
+ }
+ else
+ if (fct!=1.)
+ for (size_t i=0; i<n; ++i)
+ c[i] *= fct;
+ }
+
+WARN_UNUSED_RESULT
+static int rfftp_forward(rfftp_plan plan, double c[], double fct)
+ {
+ if (plan->length==1) return 0;
+ size_t n=plan->length;
+ size_t l1=n, nf=plan->nfct;
+ double *ch = RALLOC(double, n);
+ if (!ch) return -1;
+ double *p1=c, *p2=ch;
+
+ for(size_t k1=0; k1<nf;++k1)
+ {
+ size_t k=nf-k1-1;
+ size_t ip=plan->fct[k].fct;
+ size_t ido=n / l1;
+ l1 /= ip;
+ if(ip==4)
+ radf4(ido, l1, p1, p2, plan->fct[k].tw);
+ else if(ip==2)
+ radf2(ido, l1, p1, p2, plan->fct[k].tw);
+ else if(ip==3)
+ radf3(ido, l1, p1, p2, plan->fct[k].tw);
+ else if(ip==5)
+ radf5(ido, l1, p1, p2, plan->fct[k].tw);
+ else
+ {
+ radfg(ido, ip, l1, p1, p2, plan->fct[k].tw, plan->fct[k].tws);
+ SWAP (p1,p2,double *);
+ }
+ SWAP (p1,p2,double *);
+ }
+ copy_and_norm(c,p1,n,fct);
+ DEALLOC(ch);
+ return 0;
+ }
+
+WARN_UNUSED_RESULT
+static int rfftp_backward(rfftp_plan plan, double c[], double fct)
+ {
+ if (plan->length==1) return 0;
+ size_t n=plan->length;
+ size_t l1=1, nf=plan->nfct;
+ double *ch = RALLOC(double, n);
+ if (!ch) return -1;
+ double *p1=c, *p2=ch;
+
+ for(size_t k=0; k<nf; k++)
+ {
+ size_t ip = plan->fct[k].fct,
+ ido= n/(ip*l1);
+ if(ip==4)
+ radb4(ido, l1, p1, p2, plan->fct[k].tw);
+ else if(ip==2)
+ radb2(ido, l1, p1, p2, plan->fct[k].tw);
+ else if(ip==3)
+ radb3(ido, l1, p1, p2, plan->fct[k].tw);
+ else if(ip==5)
+ radb5(ido, l1, p1, p2, plan->fct[k].tw);
+ else
+ radbg(ido, ip, l1, p1, p2, plan->fct[k].tw, plan->fct[k].tws);
+ SWAP (p1,p2,double *);
+ l1*=ip;
+ }
+ copy_and_norm(c,p1,n,fct);
+ DEALLOC(ch);
+ return 0;
+ }
+
+WARN_UNUSED_RESULT
+static int rfftp_factorize (rfftp_plan plan)
+ {
+ size_t length=plan->length;
+ size_t nfct=0;
+ while ((length%4)==0)
+ { if (nfct>=NFCT) return -1; plan->fct[nfct++].fct=4; length>>=2; }
+ if ((length%2)==0)
+ {
+ length>>=1;
+ // factor 2 should be at the front of the factor list
+ if (nfct>=NFCT) return -1;
+ plan->fct[nfct++].fct=2;
+ SWAP(plan->fct[0].fct, plan->fct[nfct-1].fct,size_t);
+ }
+ size_t maxl=(size_t)(sqrt((double)length))+1;
+ for (size_t divisor=3; (length>1)&&(divisor<maxl); divisor+=2)
+ if ((length%divisor)==0)
+ {
+ while ((length%divisor)==0)
+ {
+ if (nfct>=NFCT) return -1;
+ plan->fct[nfct++].fct=divisor;
+ length/=divisor;
+ }
+ maxl=(size_t)(sqrt((double)length))+1;
+ }
+ if (length>1) plan->fct[nfct++].fct=length;
+ plan->nfct=nfct;
+ return 0;
+ }
+
+static size_t rfftp_twsize(rfftp_plan plan)
+ {
+ size_t twsize=0, l1=1;
+ for (size_t k=0; k<plan->nfct; ++k)
+ {
+ size_t ip=plan->fct[k].fct, ido= plan->length/(l1*ip);
+ twsize+=(ip-1)*(ido-1);
+ if (ip>5) twsize+=2*ip;
+ l1*=ip;
+ }
+ return twsize;
+ return 0;
+ }
+
+WARN_UNUSED_RESULT NOINLINE static int rfftp_comp_twiddle (rfftp_plan plan)
+ {
+ size_t length=plan->length;
+ double *twid = RALLOC(double, 2*length);
+ if (!twid) return -1;
+ sincos_2pibyn_half(length, twid);
+ size_t l1=1;
+ double *ptr=plan->mem;
+ for (size_t k=0; k<plan->nfct; ++k)
+ {
+ size_t ip=plan->fct[k].fct, ido=length/(l1*ip);
+ if (k<plan->nfct-1) // last factor doesn't need twiddles
+ {
+ plan->fct[k].tw=ptr; ptr+=(ip-1)*(ido-1);
+ for (size_t j=1; j<ip; ++j)
+ for (size_t i=1; i<=(ido-1)/2; ++i)
+ {
+ plan->fct[k].tw[(j-1)*(ido-1)+2*i-2] = twid[2*j*l1*i];
+ plan->fct[k].tw[(j-1)*(ido-1)+2*i-1] = twid[2*j*l1*i+1];
+ }
+ }
+ if (ip>5) // special factors required by *g functions
+ {
+ plan->fct[k].tws=ptr; ptr+=2*ip;
+ plan->fct[k].tws[0] = 1.;
+ plan->fct[k].tws[1] = 0.;
+ for (size_t i=1; i<=(ip>>1); ++i)
+ {
+ plan->fct[k].tws[2*i ] = twid[2*i*(length/ip)];
+ plan->fct[k].tws[2*i+1] = twid[2*i*(length/ip)+1];
+ plan->fct[k].tws[2*(ip-i) ] = twid[2*i*(length/ip)];
+ plan->fct[k].tws[2*(ip-i)+1] = -twid[2*i*(length/ip)+1];
+ }
+ }
+ l1*=ip;
+ }
+ DEALLOC(twid);
+ return 0;
+ }
+
+NOINLINE static rfftp_plan make_rfftp_plan (size_t length)
+ {
+ if (length==0) return NULL;
+ rfftp_plan plan = RALLOC(rfftp_plan_i,1);
+ if (!plan) return NULL;
+ plan->length=length;
+ plan->nfct=0;
+ plan->mem=NULL;
+ for (size_t i=0; i<NFCT; ++i)
+ plan->fct[i]=(rfftp_fctdata){0,0,0};
+ if (length==1) return plan;
+ if (rfftp_factorize(plan)!=0) { DEALLOC(plan); return NULL; }
+ size_t tws=rfftp_twsize(plan);
+ plan->mem=RALLOC(double,tws);
+ if (!plan->mem) { DEALLOC(plan); return NULL; }
+ if (rfftp_comp_twiddle(plan)!=0)
+ { DEALLOC(plan->mem); DEALLOC(plan); return NULL; }
+ return plan;
+ }
+
+NOINLINE static void destroy_rfftp_plan (rfftp_plan plan)
+ {
+ DEALLOC(plan->mem);
+ DEALLOC(plan);
+ }
+
+typedef struct fftblue_plan_i
+ {
+ size_t n, n2;
+ cfftp_plan plan;
+ double *mem;
+ double *bk, *bkf;
+ } fftblue_plan_i;
+typedef struct fftblue_plan_i * fftblue_plan;
+
+NOINLINE static fftblue_plan make_fftblue_plan (size_t length)
+ {
+ fftblue_plan plan = RALLOC(fftblue_plan_i,1);
+ if (!plan) return NULL;
+ plan->n = length;
+ plan->n2 = good_size(plan->n*2-1);
+ plan->mem = RALLOC(double, 2*plan->n+2*plan->n2);
+ if (!plan->mem) { DEALLOC(plan); return NULL; }
+ plan->bk = plan->mem;
+ plan->bkf = plan->bk+2*plan->n;
+
+/* initialize b_k */
+ double *tmp = RALLOC(double,4*plan->n);
+ if (!tmp) { DEALLOC(plan->mem); DEALLOC(plan); return NULL; }
+ sincos_2pibyn(2*plan->n,tmp);
+ plan->bk[0] = 1;
+ plan->bk[1] = 0;
+
+ size_t coeff=0;
+ for (size_t m=1; m<plan->n; ++m)
+ {
+ coeff+=2*m-1;
+ if (coeff>=2*plan->n) coeff-=2*plan->n;
+ plan->bk[2*m ] = tmp[2*coeff ];
+ plan->bk[2*m+1] = tmp[2*coeff+1];
+ }
+
+ /* initialize the zero-padded, Fourier transformed b_k. Add normalisation. */
+ double xn2 = 1./plan->n2;
+ plan->bkf[0] = plan->bk[0]*xn2;
+ plan->bkf[1] = plan->bk[1]*xn2;
+ for (size_t m=2; m<2*plan->n; m+=2)
+ {
+ plan->bkf[m] = plan->bkf[2*plan->n2-m] = plan->bk[m] *xn2;
+ plan->bkf[m+1] = plan->bkf[2*plan->n2-m+1] = plan->bk[m+1] *xn2;
+ }
+ for (size_t m=2*plan->n;m<=(2*plan->n2-2*plan->n+1);++m)
+ plan->bkf[m]=0.;
+ plan->plan=make_cfftp_plan(plan->n2);
+ if (!plan->plan)
+ { DEALLOC(tmp); DEALLOC(plan->mem); DEALLOC(plan); return NULL; }
+ if (cfftp_forward(plan->plan,plan->bkf,1.)!=0)
+ { DEALLOC(tmp); DEALLOC(plan->mem); DEALLOC(plan); return NULL; }
+ DEALLOC(tmp);
+
+ return plan;
+ }
+
+NOINLINE static void destroy_fftblue_plan (fftblue_plan plan)
+ {
+ DEALLOC(plan->mem);
+ destroy_cfftp_plan(plan->plan);
+ DEALLOC(plan);
+ }
+
+NOINLINE WARN_UNUSED_RESULT
+static int fftblue_fft(fftblue_plan plan, double c[], int isign, double fct)
+ {
+ size_t n=plan->n;
+ size_t n2=plan->n2;
+ double *bk = plan->bk;
+ double *bkf = plan->bkf;
+ double *akf = RALLOC(double, 2*n2);
+ if (!akf) return -1;
+
+/* initialize a_k and FFT it */
+ if (isign>0)
+ for (size_t m=0; m<2*n; m+=2)
+ {
+ akf[m] = c[m]*bk[m] - c[m+1]*bk[m+1];
+ akf[m+1] = c[m]*bk[m+1] + c[m+1]*bk[m];
+ }
+ else
+ for (size_t m=0; m<2*n; m+=2)
+ {
+ akf[m] = c[m]*bk[m] + c[m+1]*bk[m+1];
+ akf[m+1] =-c[m]*bk[m+1] + c[m+1]*bk[m];
+ }
+ for (size_t m=2*n; m<2*n2; ++m)
+ akf[m]=0;
+
+ if (cfftp_forward (plan->plan,akf,fct)!=0)
+ { DEALLOC(akf); return -1; }
+
+/* do the convolution */
+ if (isign>0)
+ for (size_t m=0; m<2*n2; m+=2)
+ {
+ double im = -akf[m]*bkf[m+1] + akf[m+1]*bkf[m];
+ akf[m ] = akf[m]*bkf[m] + akf[m+1]*bkf[m+1];
+ akf[m+1] = im;
+ }
+ else
+ for (size_t m=0; m<2*n2; m+=2)
+ {
+ double im = akf[m]*bkf[m+1] + akf[m+1]*bkf[m];
+ akf[m ] = akf[m]*bkf[m] - akf[m+1]*bkf[m+1];
+ akf[m+1] = im;
+ }
+
+/* inverse FFT */
+ if (cfftp_backward (plan->plan,akf,1.)!=0)
+ { DEALLOC(akf); return -1; }
+
+/* multiply by b_k */
+ if (isign>0)
+ for (size_t m=0; m<2*n; m+=2)
+ {
+ c[m] = bk[m] *akf[m] - bk[m+1]*akf[m+1];
+ c[m+1] = bk[m+1]*akf[m] + bk[m] *akf[m+1];
+ }
+ else
+ for (size_t m=0; m<2*n; m+=2)
+ {
+ c[m] = bk[m] *akf[m] + bk[m+1]*akf[m+1];
+ c[m+1] =-bk[m+1]*akf[m] + bk[m] *akf[m+1];
+ }
+ DEALLOC(akf);
+ return 0;
+ }
+
+WARN_UNUSED_RESULT
+static int cfftblue_backward(fftblue_plan plan, double c[], double fct)
+ { return fftblue_fft(plan,c,1,fct); }
+
+WARN_UNUSED_RESULT
+static int cfftblue_forward(fftblue_plan plan, double c[], double fct)
+ { return fftblue_fft(plan,c,-1,fct); }
+
+WARN_UNUSED_RESULT
+static int rfftblue_backward(fftblue_plan plan, double c[], double fct)
+ {
+ size_t n=plan->n;
+ double *tmp = RALLOC(double,2*n);
+ if (!tmp) return -1;
+ tmp[0]=c[0];
+ tmp[1]=0.;
+ memcpy (tmp+2,c+1, (n-1)*sizeof(double));
+ if ((n&1)==0) tmp[n+1]=0.;
+ for (size_t m=2; m<n; m+=2)
+ {
+ tmp[2*n-m]=tmp[m];
+ tmp[2*n-m+1]=-tmp[m+1];
+ }
+ if (fftblue_fft(plan,tmp,1,fct)!=0)
+ { DEALLOC(tmp); return -1; }
+ for (size_t m=0; m<n; ++m)
+ c[m] = tmp[2*m];
+ DEALLOC(tmp);
+ return 0;
+ }
+
+WARN_UNUSED_RESULT
+static int rfftblue_forward(fftblue_plan plan, double c[], double fct)
+ {
+ size_t n=plan->n;
+ double *tmp = RALLOC(double,2*n);
+ if (!tmp) return -1;
+ for (size_t m=0; m<n; ++m)
+ {
+ tmp[2*m] = c[m];
+ tmp[2*m+1] = 0.;
+ }
+ if (fftblue_fft(plan,tmp,-1,fct)!=0)
+ { DEALLOC(tmp); return -1; }
+ c[0] = tmp[0];
+ memcpy (c+1, tmp+2, (n-1)*sizeof(double));
+ DEALLOC(tmp);
+ return 0;
+ }
+
+typedef struct cfft_plan_i
+ {
+ cfftp_plan packplan;
+ fftblue_plan blueplan;
+ } cfft_plan_i;
+
+static cfft_plan make_cfft_plan (size_t length)
+ {
+ if (length==0) return NULL;
+ cfft_plan plan = RALLOC(cfft_plan_i,1);
+ if (!plan) return NULL;
+ plan->blueplan=0;
+ plan->packplan=0;
+ if ((length<50) || (largest_prime_factor(length)<=sqrt(length)))
+ {
+ plan->packplan=make_cfftp_plan(length);
+ if (!plan->packplan) { DEALLOC(plan); return NULL; }
+ return plan;
+ }
+ double comp1 = cost_guess(length);
+ double comp2 = 2*cost_guess(good_size(2*length-1));
+ comp2*=1.5; /* fudge factor that appears to give good overall performance */
+ if (comp2<comp1) // use Bluestein
+ {
+ plan->blueplan=make_fftblue_plan(length);
+ if (!plan->blueplan) { DEALLOC(plan); return NULL; }
+ }
+ else
+ {
+ plan->packplan=make_cfftp_plan(length);
+ if (!plan->packplan) { DEALLOC(plan); return NULL; }
+ }
+ return plan;
+ }
+
+static void destroy_cfft_plan (cfft_plan plan)
+ {
+ if (plan->blueplan)
+ destroy_fftblue_plan(plan->blueplan);
+ if (plan->packplan)
+ destroy_cfftp_plan(plan->packplan);
+ DEALLOC(plan);
+ }
+
+WARN_UNUSED_RESULT static int cfft_backward(cfft_plan plan, double c[], double fct)
+ {
+ if (plan->packplan)
+ return cfftp_backward(plan->packplan,c,fct);
+ // if (plan->blueplan)
+ return cfftblue_backward(plan->blueplan,c,fct);
+ }
+
+WARN_UNUSED_RESULT static int cfft_forward(cfft_plan plan, double c[], double fct)
+ {
+ if (plan->packplan)
+ return cfftp_forward(plan->packplan,c,fct);
+ // if (plan->blueplan)
+ return cfftblue_forward(plan->blueplan,c,fct);
+ }
+
+typedef struct rfft_plan_i
+ {
+ rfftp_plan packplan;
+ fftblue_plan blueplan;
+ } rfft_plan_i;
+
+static rfft_plan make_rfft_plan (size_t length)
+ {
+ if (length==0) return NULL;
+ rfft_plan plan = RALLOC(rfft_plan_i,1);
+ if (!plan) return NULL;
+ plan->blueplan=0;
+ plan->packplan=0;
+ if ((length<50) || (largest_prime_factor(length)<=sqrt(length)))
+ {
+ plan->packplan=make_rfftp_plan(length);
+ if (!plan->packplan) { DEALLOC(plan); return NULL; }
+ return plan;
+ }
+ double comp1 = 0.5*cost_guess(length);
+ double comp2 = 2*cost_guess(good_size(2*length-1));
+ comp2*=1.5; /* fudge factor that appears to give good overall performance */
+ if (comp2<comp1) // use Bluestein
+ {
+ plan->blueplan=make_fftblue_plan(length);
+ if (!plan->blueplan) { DEALLOC(plan); return NULL; }
+ }
+ else
+ {
+ plan->packplan=make_rfftp_plan(length);
+ if (!plan->packplan) { DEALLOC(plan); return NULL; }
+ }
+ return plan;
+ }
+
+static void destroy_rfft_plan (rfft_plan plan)
+ {
+ if (plan->blueplan)
+ destroy_fftblue_plan(plan->blueplan);
+ if (plan->packplan)
+ destroy_rfftp_plan(plan->packplan);
+ DEALLOC(plan);
+ }
+
+WARN_UNUSED_RESULT static int rfft_backward(rfft_plan plan, double c[], double fct)
+ {
+ if (plan->packplan)
+ return rfftp_backward(plan->packplan,c,fct);
+ else // if (plan->blueplan)
+ return rfftblue_backward(plan->blueplan,c,fct);
+ }
+
+WARN_UNUSED_RESULT static int rfft_forward(rfft_plan plan, double c[], double fct)
+ {
+ if (plan->packplan)
+ return rfftp_forward(plan->packplan,c,fct);
+ else // if (plan->blueplan)
+ return rfftblue_forward(plan->blueplan,c,fct);
+ }
+
+#define NPY_NO_DEPRECATED_API NPY_API_VERSION
+
+#include "Python.h"
+#include "numpy/arrayobject.h"
+
+static PyObject *
+execute_complex(PyObject *a1, int is_forward, double fct)
+{
+ PyArrayObject *data = (PyArrayObject *)PyArray_FromAny(a1,
+ PyArray_DescrFromType(NPY_CDOUBLE), 1, 0,
+ NPY_ARRAY_ENSURECOPY | NPY_ARRAY_DEFAULT |
+ NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_FORCECAST,
+ NULL);
+ if (!data) return NULL;
+
+ int npts = PyArray_DIM(data, PyArray_NDIM(data) - 1);
+ cfft_plan plan=NULL;
+
+ int nrepeats = PyArray_SIZE(data)/npts;
+ double *dptr = (double *)PyArray_DATA(data);
+ int fail=0;
+ Py_BEGIN_ALLOW_THREADS;
+ NPY_SIGINT_ON;
+ plan = make_cfft_plan(npts);
+ if (!plan) fail=1;
+ if (!fail)
+ for (int i = 0; i < nrepeats; i++) {
+ int res = is_forward ?
+ cfft_forward(plan, dptr, fct) : cfft_backward(plan, dptr, fct);
+ if (res!=0) { fail=1; break; }
+ dptr += npts*2;
+ }
+ if (plan) destroy_cfft_plan(plan);
+ NPY_SIGINT_OFF;
+ Py_END_ALLOW_THREADS;
+ if (fail) {
+ Py_XDECREF(data);
+ return PyErr_NoMemory();
+ }
+ return (PyObject *)data;
+}
+
+static PyObject *
+execute_real_forward(PyObject *a1, double fct)
+{
+ rfft_plan plan=NULL;
+ int fail = 0;
+ PyArrayObject *data = (PyArrayObject *)PyArray_FromAny(a1,
+ PyArray_DescrFromType(NPY_DOUBLE), 1, 0,
+ NPY_ARRAY_DEFAULT | NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_FORCECAST,
+ NULL);
+ if (!data) return NULL;
+
+ int ndim = PyArray_NDIM(data);
+ const npy_intp *odim = PyArray_DIMS(data);
+ int npts = odim[ndim - 1];
+ npy_intp *tdim=(npy_intp *)malloc(ndim*sizeof(npy_intp));
+ if (!tdim)
+ { Py_XDECREF(data); return NULL; }
+ for (int d=0; d<ndim-1; ++d)
+ tdim[d] = odim[d];
+ tdim[ndim-1] = npts/2 + 1;
+ PyArrayObject *ret = (PyArrayObject *)PyArray_Empty(ndim,
+ tdim, PyArray_DescrFromType(NPY_CDOUBLE), 0);
+ free(tdim);
+ if (!ret) fail=1;
+ if (!fail) {
+ int rstep = PyArray_DIM(ret, PyArray_NDIM(ret) - 1)*2;
+
+ int nrepeats = PyArray_SIZE(data)/npts;
+ double *rptr = (double *)PyArray_DATA(ret),
+ *dptr = (double *)PyArray_DATA(data);
+
+ Py_BEGIN_ALLOW_THREADS;
+ NPY_SIGINT_ON;
+ plan = make_rfft_plan(npts);
+ if (!plan) fail=1;
+ if (!fail)
+ for (int i = 0; i < nrepeats; i++) {
+ rptr[rstep-1] = 0.0;
+ memcpy((char *)(rptr+1), dptr, npts*sizeof(double));
+ if (rfft_forward(plan, rptr+1, fct)!=0) {fail=1; break;}
+ rptr[0] = rptr[1];
+ rptr[1] = 0.0;
+ rptr += rstep;
+ dptr += npts;
+ }
+ if (plan) destroy_rfft_plan(plan);
+ NPY_SIGINT_OFF;
+ Py_END_ALLOW_THREADS;
+ }
+ if (fail) {
+ Py_XDECREF(data);
+ Py_XDECREF(ret);
+ return PyErr_NoMemory();
+ }
+ Py_DECREF(data);
+ return (PyObject *)ret;
+}
+static PyObject *
+execute_real_backward(PyObject *a1, double fct)
+{
+ rfft_plan plan=NULL;
+ PyArrayObject *data = (PyArrayObject *)PyArray_FromAny(a1,
+ PyArray_DescrFromType(NPY_CDOUBLE), 1, 0,
+ NPY_ARRAY_DEFAULT | NPY_ARRAY_ENSUREARRAY | NPY_ARRAY_FORCECAST,
+ NULL);
+ if (!data) return NULL;
+ int npts = PyArray_DIM(data, PyArray_NDIM(data) - 1);
+ PyArrayObject *ret = (PyArrayObject *)PyArray_Empty(PyArray_NDIM(data),
+ PyArray_DIMS(data), PyArray_DescrFromType(NPY_DOUBLE), 0);
+ int fail = 0;
+ if (!ret) fail=1;
+ if (!fail) {
+ int nrepeats = PyArray_SIZE(ret)/npts;
+ double *rptr = (double *)PyArray_DATA(ret),
+ *dptr = (double *)PyArray_DATA(data);
+
+ Py_BEGIN_ALLOW_THREADS;
+ NPY_SIGINT_ON;
+ plan = make_rfft_plan(npts);
+ if (!plan) fail=1;
+ if (!fail) {
+ for (int i = 0; i < nrepeats; i++) {
+ memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double));
+ rptr[0] = dptr[0];
+ if (rfft_backward(plan, rptr, fct)!=0) {fail=1; break;}
+ rptr += npts;
+ dptr += npts*2;
+ }
+ }
+ if (plan) destroy_rfft_plan(plan);
+ NPY_SIGINT_OFF;
+ Py_END_ALLOW_THREADS;
+ }
+ if (fail) {
+ Py_XDECREF(data);
+ Py_XDECREF(ret);
+ return PyErr_NoMemory();
+ }
+ Py_DECREF(data);
+ return (PyObject *)ret;
+}
+
+static PyObject *
+execute_real(PyObject *a1, int is_forward, double fct)
+{
+ return is_forward ? execute_real_forward(a1, fct)
+ : execute_real_backward(a1, fct);
+}
+
+static const char execute__doc__[] = "";
+
+static PyObject *
+execute(PyObject *NPY_UNUSED(self), PyObject *args)
+{
+ PyObject *a1;
+ int is_real, is_forward;
+ double fct;
+
+ if(!PyArg_ParseTuple(args, "Oiid:execute", &a1, &is_real, &is_forward, &fct)) {
+ return NULL;
+ }
+
+ return is_real ? execute_real(a1, is_forward, fct)
+ : execute_complex(a1, is_forward, fct);
+}
+
+/* List of methods defined in the module */
+
+static struct PyMethodDef methods[] = {
+ {"execute", execute, 1, execute__doc__},
+ {NULL, NULL, 0, NULL} /* sentinel */
+};
+
+#if PY_MAJOR_VERSION >= 3
+static struct PyModuleDef moduledef = {
+ PyModuleDef_HEAD_INIT,
+ "_pocketfft_internal",
+ NULL,
+ -1,
+ methods,
+ NULL,
+ NULL,
+ NULL,
+ NULL
+};
+#endif
+
+/* Initialization function for the module */
+#if PY_MAJOR_VERSION >= 3
+#define RETVAL(x) x
+PyMODINIT_FUNC PyInit__pocketfft_internal(void)
+#else
+#define RETVAL(x)
+PyMODINIT_FUNC
+init_pocketfft_internal(void)
+#endif
+{
+ PyObject *m;
+#if PY_MAJOR_VERSION >= 3
+ m = PyModule_Create(&moduledef);
+#else
+ static const char module_documentation[] = "";
+
+ m = Py_InitModule4("_pocketfft_internal", methods,
+ module_documentation,
+ (PyObject*)NULL,PYTHON_API_VERSION);
+#endif
+ if (m == NULL) {
+ return RETVAL(NULL);
+ }
+
+ /* Import the array object */
+ import_array();
+
+ /* XXXX Add constants here */
+
+ return RETVAL(m);
+}
diff --git a/numpy/fft/fftpack.py b/numpy/fft/_pocketfft.py
index e0e96cc79..50720cda4 100644
--- a/numpy/fft/fftpack.py
+++ b/numpy/fft/_pocketfft.py
@@ -26,30 +26,30 @@ n = n-dimensional transform
(Note: 2D routines are just nD routines with different default
behavior.)
-The underlying code for these functions is an f2c-translated and modified
-version of the FFTPACK routines.
-
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fft', 'ifft', 'rfft', 'irfft', 'hfft', 'ihfft', 'rfftn',
'irfftn', 'rfft2', 'irfft2', 'fft2', 'ifft2', 'fftn', 'ifftn']
-from numpy.core import (array, asarray, zeros, swapaxes, shape, conjugate,
- take, sqrt)
+import functools
+
+from numpy.core import asarray, zeros, swapaxes, conjugate, take, sqrt
+from . import _pocketfft_internal as pfi
from numpy.core.multiarray import normalize_axis_index
-from . import fftpack_lite as fftpack
-from .helper import _FFTCache
+from numpy.core import overrides
-_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
-_real_fft_cache = _FFTCache(max_size_in_mb=100, max_item_count=32)
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.fft')
-def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
- work_function=fftpack.cfftf, fft_cache=_fft_cache):
- a = asarray(a)
- axis = normalize_axis_index(axis, a.ndim)
+# `inv_norm` is a float by which the result of the transform needs to be
+# divided. This replaces the original, more intuitive 'fct` parameter to avoid
+# divisions by zero (or alternatively additional checks) in the case of
+# zero-length axes during its computation.
+def _raw_fft(a, n, axis, is_real, is_forward, inv_norm):
+ axis = normalize_axis_index(axis, a.ndim)
if n is None:
n = a.shape[axis]
@@ -57,14 +57,7 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
raise ValueError("Invalid number of FFT data points (%d) specified."
% n)
- # We have to ensure that only a single thread can access a wsave array
- # at any given time. Thus we remove it from the cache and insert it
- # again after it has been used. Multiple threads might create multiple
- # copies of the wsave array. This is intentional and a limitation of
- # the current C code.
- wsave = fft_cache.pop_twiddle_factors(n)
- if wsave is None:
- wsave = init_function(n)
+ fct = 1/inv_norm
if a.shape[axis] != n:
s = list(a.shape)
@@ -80,27 +73,29 @@ def _raw_fft(a, n=None, axis=-1, init_function=fftpack.cffti,
z[tuple(index)] = a
a = z
- if axis != a.ndim - 1:
+ if axis == a.ndim-1:
+ r = pfi.execute(a, is_real, is_forward, fct)
+ else:
a = swapaxes(a, axis, -1)
- r = work_function(a, wsave)
- if axis != a.ndim - 1:
+ r = pfi.execute(a, is_real, is_forward, fct)
r = swapaxes(r, axis, -1)
-
- # As soon as we put wsave back into the cache, another thread could pick it
- # up and start using it, so we must not do this until after we're
- # completely done using it ourselves.
- fft_cache.put_twiddle_factors(n, wsave)
-
return r
def _unitary(norm):
- if norm not in (None, "ortho"):
- raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
- % norm)
- return norm is not None
+ if norm is None:
+ return False
+ if norm=="ortho":
+ return True
+ raise ValueError("Invalid norm value %s, should be None or \"ortho\"."
+ % norm)
+def _fft_dispatcher(a, n=None, axis=None, norm=None):
+ return (a,)
+
+
+@array_function_dispatch(_fft_dispatcher)
def fft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform.
@@ -165,14 +160,10 @@ def fft(a, n=None, axis=-1, norm=None):
Examples
--------
>>> np.fft.fft(np.exp(2j * np.pi * np.arange(8) / 8))
- array([ -3.44505240e-16 +1.14383329e-17j,
- 8.00000000e+00 -5.71092652e-15j,
- 2.33482938e-16 +1.22460635e-16j,
- 1.64863782e-15 +1.77635684e-15j,
- 9.95839695e-17 +2.33482938e-16j,
- 0.00000000e+00 +1.66837030e-15j,
- 1.14383329e-17 +1.22460635e-16j,
- -1.64863782e-15 +1.77635684e-15j])
+ array([-2.33486982e-16+1.14423775e-17j, 8.00000000e+00-1.25557246e-15j,
+ 2.33486982e-16+2.33486982e-16j, 0.00000000e+00+1.22464680e-16j,
+ -1.14423775e-17+2.33486982e-16j, 0.00000000e+00+5.20784380e-16j,
+ 1.14423775e-17+1.14423775e-17j, 0.00000000e+00+1.22464680e-16j])
In this example, real input has an FFT which is Hermitian, i.e., symmetric
in the real part and anti-symmetric in the imaginary part, as described in
@@ -188,15 +179,17 @@ def fft(a, n=None, axis=-1, norm=None):
"""
- a = asarray(a).astype(complex, copy=False)
+ a = asarray(a)
if n is None:
n = a.shape[axis]
- output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftf, _fft_cache)
- if _unitary(norm):
- output *= 1 / sqrt(n)
+ inv_norm = 1
+ if norm is not None and _unitary(norm):
+ inv_norm = sqrt(n)
+ output = _raw_fft(a, n, axis, False, True, inv_norm)
return output
+@array_function_dispatch(_fft_dispatcher)
def ifft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional inverse discrete Fourier Transform.
@@ -265,7 +258,7 @@ def ifft(a, n=None, axis=-1, norm=None):
Examples
--------
>>> np.fft.ifft([0, 4, 0, 0])
- array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j])
+ array([ 1.+0.j, 0.+1.j, -1.+0.j, 0.-1.j]) # may vary
Create and plot a band-limited signal with random phases:
@@ -275,21 +268,25 @@ def ifft(a, n=None, axis=-1, norm=None):
>>> n[40:60] = np.exp(1j*np.random.uniform(0, 2*np.pi, (20,)))
>>> s = np.fft.ifft(n)
>>> plt.plot(t, s.real, 'b-', t, s.imag, 'r--')
- ...
+ [<matplotlib.lines.Line2D object at ...>, <matplotlib.lines.Line2D object at ...>]
>>> plt.legend(('real', 'imaginary'))
- ...
+ <matplotlib.legend.Legend object at ...>
>>> plt.show()
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=complex)
+ a = asarray(a)
if n is None:
n = a.shape[axis]
- unitary = _unitary(norm)
- output = _raw_fft(a, n, axis, fftpack.cffti, fftpack.cfftb, _fft_cache)
- return output * (1 / (sqrt(n) if unitary else n))
+ if norm is not None and _unitary(norm):
+ inv_norm = sqrt(max(n, 1))
+ else:
+ inv_norm = n
+ output = _raw_fft(a, n, axis, False, False, inv_norm)
+ return output
+
+@array_function_dispatch(_fft_dispatcher)
def rfft(a, n=None, axis=-1, norm=None):
"""
Compute the one-dimensional discrete Fourier Transform for real input.
@@ -359,26 +356,26 @@ def rfft(a, n=None, axis=-1, norm=None):
Examples
--------
>>> np.fft.fft([0, 1, 0, 0])
- array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j])
+ array([ 1.+0.j, 0.-1.j, -1.+0.j, 0.+1.j]) # may vary
>>> np.fft.rfft([0, 1, 0, 0])
- array([ 1.+0.j, 0.-1.j, -1.+0.j])
+ array([ 1.+0.j, 0.-1.j, -1.+0.j]) # may vary
Notice how the final element of the `fft` output is the complex conjugate
of the second element, for real input. For `rfft`, this symmetry is
exploited to compute only the non-negative frequency terms.
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=float)
- output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftf,
- _real_fft_cache)
- if _unitary(norm):
+ a = asarray(a)
+ inv_norm = 1
+ if norm is not None and _unitary(norm):
if n is None:
n = a.shape[axis]
- output *= 1 / sqrt(n)
+ inv_norm = sqrt(n)
+ output = _raw_fft(a, n, axis, True, True, inv_norm)
return output
+@array_function_dispatch(_fft_dispatcher)
def irfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse of the n-point DFT for real input.
@@ -402,8 +399,9 @@ def irfft(a, n=None, axis=-1, norm=None):
Length of the transformed axis of the output.
For `n` output points, ``n//2+1`` input points are necessary. If the
input is longer than this, it is cropped. If it is shorter than this,
- it is padded with zeros. If `n` is not given, it is determined from
- the length of the input along the axis specified by `axis`.
+ it is padded with zeros. If `n` is not given, it is taken to be
+ ``2*(m-1)`` where ``m`` is the length of the input along the axis
+ specified by `axis`.
axis : int, optional
Axis over which to compute the inverse FFT. If not given, the last
axis is used.
@@ -446,12 +444,20 @@ def irfft(a, n=None, axis=-1, norm=None):
thus resample a series to `m` points via Fourier interpolation by:
``a_resamp = irfft(rfft(a), m)``.
+ The correct interpretation of the hermitian input depends on the length of
+ the original data, as given by `n`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `irfft`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
+ the value is thus treated as purely real. To avoid losing information, the
+ correct length of the real input **must** be given.
+
Examples
--------
>>> np.fft.ifft([1, -1j, -1, 1j])
- array([ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j])
+ array([0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]) # may vary
>>> np.fft.irfft([1, -1j, -1])
- array([ 0., 1., 0., 0.])
+ array([0., 1., 0., 0.])
Notice how the last term in the input to the ordinary `ifft` is the
complex conjugate of the second term, and the output has zero imaginary
@@ -459,16 +465,17 @@ def irfft(a, n=None, axis=-1, norm=None):
specified, and the output array is purely real.
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=complex)
+ a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
- unitary = _unitary(norm)
- output = _raw_fft(a, n, axis, fftpack.rffti, fftpack.rfftb,
- _real_fft_cache)
- return output * (1 / (sqrt(n) if unitary else n))
+ inv_norm = n
+ if norm is not None and _unitary(norm):
+ inv_norm = sqrt(n)
+ output = _raw_fft(a, n, axis, True, False, inv_norm)
+ return output
+@array_function_dispatch(_fft_dispatcher)
def hfft(a, n=None, axis=-1, norm=None):
"""
Compute the FFT of a signal that has Hermitian symmetry, i.e., a real
@@ -482,8 +489,9 @@ def hfft(a, n=None, axis=-1, norm=None):
Length of the transformed axis of the output. For `n` output
points, ``n//2 + 1`` input points are necessary. If the input is
longer than this, it is cropped. If it is shorter than this, it is
- padded with zeros. If `n` is not given, it is determined from the
- length of the input along the axis specified by `axis`.
+ padded with zeros. If `n` is not given, it is taken to be ``2*(m-1)``
+ where ``m`` is the length of the input along the axis specified by
+ `axis`.
axis : int, optional
Axis over which to compute the FFT. If not given, the last
axis is used.
@@ -522,20 +530,28 @@ def hfft(a, n=None, axis=-1, norm=None):
* even: ``ihfft(hfft(a, 2*len(a) - 2) == a``, within roundoff error,
* odd: ``ihfft(hfft(a, 2*len(a) - 1) == a``, within roundoff error.
+ The correct interpretation of the hermitian input depends on the length of
+ the original data, as given by `n`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `hfft`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. By Hermitian symmetry,
+ the value is thus treated as purely real. To avoid losing information, the
+ shape of the full signal **must** be given.
+
Examples
--------
>>> signal = np.array([1, 2, 3, 4, 3, 2])
>>> np.fft.fft(signal)
- array([ 15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j])
+ array([15.+0.j, -4.+0.j, 0.+0.j, -1.-0.j, 0.+0.j, -4.+0.j]) # may vary
>>> np.fft.hfft(signal[:4]) # Input first half of signal
- array([ 15., -4., 0., -1., 0., -4.])
+ array([15., -4., 0., -1., 0., -4.])
>>> np.fft.hfft(signal, 6) # Input entire signal and truncate
- array([ 15., -4., 0., -1., 0., -4.])
+ array([15., -4., 0., -1., 0., -4.])
>>> signal = np.array([[1, 1.j], [-1.j, 2]])
>>> np.conj(signal.T) - signal # check Hermitian symmetry
- array([[ 0.-0.j, 0.+0.j],
+ array([[ 0.-0.j, -0.+0.j], # may vary
[ 0.+0.j, 0.-0.j]])
>>> freq_spectrum = np.fft.hfft(signal)
>>> freq_spectrum
@@ -543,14 +559,14 @@ def hfft(a, n=None, axis=-1, norm=None):
[ 2., -2.]])
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=complex)
+ a = asarray(a)
if n is None:
n = (a.shape[axis] - 1) * 2
unitary = _unitary(norm)
return irfft(conjugate(a), n, axis) * (sqrt(n) if unitary else n)
+@array_function_dispatch(_fft_dispatcher)
def ihfft(a, n=None, axis=-1, norm=None):
"""
Compute the inverse FFT of a signal that has Hermitian symmetry.
@@ -598,13 +614,12 @@ def ihfft(a, n=None, axis=-1, norm=None):
--------
>>> spectrum = np.array([ 15, -4, 0, -1, 0, -4])
>>> np.fft.ifft(spectrum)
- array([ 1.+0.j, 2.-0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.-0.j])
+ array([1.+0.j, 2.+0.j, 3.+0.j, 4.+0.j, 3.+0.j, 2.+0.j]) # may vary
>>> np.fft.ihfft(spectrum)
- array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j])
+ array([ 1.-0.j, 2.-0.j, 3.-0.j, 4.-0.j]) # may vary
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=float)
+ a = asarray(a)
if n is None:
n = a.shape[axis]
unitary = _unitary(norm)
@@ -641,6 +656,11 @@ def _raw_fftnd(a, s=None, axes=None, function=fft, norm=None):
return a
+def _fftn_dispatcher(a, s=None, axes=None, norm=None):
+ return (a,)
+
+
+@array_function_dispatch(_fftn_dispatcher)
def fftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform.
@@ -709,17 +729,17 @@ def fftn(a, s=None, axes=None, norm=None):
--------
>>> a = np.mgrid[:3, :3, :3][0]
>>> np.fft.fftn(a, axes=(1, 2))
- array([[[ 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j]],
- [[ 9.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j]],
- [[ 18.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j]]])
+ array([[[ 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[ 9.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]],
+ [[18.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j],
+ [ 0.+0.j, 0.+0.j, 0.+0.j]]])
>>> np.fft.fftn(a, (2, 2), axes=(0, 1))
- array([[[ 2.+0.j, 2.+0.j, 2.+0.j],
+ array([[[ 2.+0.j, 2.+0.j, 2.+0.j], # may vary
[ 0.+0.j, 0.+0.j, 0.+0.j]],
[[-2.+0.j, -2.+0.j, -2.+0.j],
[ 0.+0.j, 0.+0.j, 0.+0.j]]])
@@ -738,6 +758,7 @@ def fftn(a, s=None, axes=None, norm=None):
return _raw_fftnd(a, s, axes, fft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def ifftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional inverse discrete Fourier Transform.
@@ -814,10 +835,10 @@ def ifftn(a, s=None, axes=None, norm=None):
--------
>>> a = np.eye(4)
>>> np.fft.ifftn(np.fft.fftn(a, axes=(0,)), axes=(1,))
- array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j]])
Create and plot an image with band-limited frequency content:
@@ -835,6 +856,7 @@ def ifftn(a, s=None, axes=None, norm=None):
return _raw_fftnd(a, s, axes, ifft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def fft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional discrete Fourier Transform
@@ -909,22 +931,23 @@ def fft2(a, s=None, axes=(-2, -1), norm=None):
--------
>>> a = np.mgrid[:5, :5][0]
>>> np.fft.fft2(a)
- array([[ 50.0 +0.j , 0.0 +0.j , 0.0 +0.j ,
- 0.0 +0.j , 0.0 +0.j ],
- [-12.5+17.20477401j, 0.0 +0.j , 0.0 +0.j ,
- 0.0 +0.j , 0.0 +0.j ],
- [-12.5 +4.0614962j , 0.0 +0.j , 0.0 +0.j ,
- 0.0 +0.j , 0.0 +0.j ],
- [-12.5 -4.0614962j , 0.0 +0.j , 0.0 +0.j ,
- 0.0 +0.j , 0.0 +0.j ],
- [-12.5-17.20477401j, 0.0 +0.j , 0.0 +0.j ,
- 0.0 +0.j , 0.0 +0.j ]])
+ array([[ 50. +0.j , 0. +0.j , 0. +0.j , # may vary
+ 0. +0.j , 0. +0.j ],
+ [-12.5+17.20477401j, 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5 +4.0614962j , 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5 -4.0614962j , 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ],
+ [-12.5-17.20477401j, 0. +0.j , 0. +0.j ,
+ 0. +0.j , 0. +0.j ]])
"""
return _raw_fftnd(a, s, axes, fft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def ifft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse discrete Fourier Transform.
@@ -1002,16 +1025,17 @@ def ifft2(a, s=None, axes=(-2, -1), norm=None):
--------
>>> a = 4 * np.eye(4)
>>> np.fft.ifft2(a)
- array([[ 1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
- [ 0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
- [ 0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
+ array([[1.+0.j, 0.+0.j, 0.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j, 0.+0.j, 1.+0.j],
+ [0.+0.j, 0.+0.j, 1.+0.j, 0.+0.j],
+ [0.+0.j, 1.+0.j, 0.+0.j, 0.+0.j]])
"""
return _raw_fftnd(a, s, axes, ifft, norm)
+@array_function_dispatch(_fftn_dispatcher)
def rfftn(a, s=None, axes=None, norm=None):
"""
Compute the N-dimensional discrete Fourier Transform for real input.
@@ -1083,20 +1107,19 @@ def rfftn(a, s=None, axes=None, norm=None):
--------
>>> a = np.ones((2, 2, 2))
>>> np.fft.rfftn(a)
- array([[[ 8.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j]],
- [[ 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j]]])
+ array([[[8.+0.j, 0.+0.j], # may vary
+ [0.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
>>> np.fft.rfftn(a, axes=(2, 0))
- array([[[ 4.+0.j, 0.+0.j],
- [ 4.+0.j, 0.+0.j]],
- [[ 0.+0.j, 0.+0.j],
- [ 0.+0.j, 0.+0.j]]])
+ array([[[4.+0.j, 0.+0.j], # may vary
+ [4.+0.j, 0.+0.j]],
+ [[0.+0.j, 0.+0.j],
+ [0.+0.j, 0.+0.j]]])
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=float)
+ a = asarray(a)
s, axes = _cook_nd_args(a, s, axes)
a = rfft(a, s[-1], axes[-1], norm)
for ii in range(len(axes)-1):
@@ -1104,6 +1127,7 @@ def rfftn(a, s=None, axes=None, norm=None):
return a
+@array_function_dispatch(_fftn_dispatcher)
def rfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional FFT of a real array.
@@ -1141,6 +1165,7 @@ def rfft2(a, s=None, axes=(-2, -1), norm=None):
return rfftn(a, s, axes, norm)
+@array_function_dispatch(_fftn_dispatcher)
def irfftn(a, s=None, axes=None, norm=None):
"""
Compute the inverse of the N-dimensional FFT of real input.
@@ -1167,8 +1192,9 @@ def irfftn(a, s=None, axes=None, norm=None):
where ``s[-1]//2+1`` points of the input are used.
Along any axis, if the shape indicated by `s` is smaller than that of
the input, the input is cropped. If it is larger, the input is padded
- with zeros. If `s` is not given, the shape of the input along the
- axes specified by `axes` is used.
+ with zeros. If `s` is not given, the shape of the input along the axes
+ specified by axes is used. Except for the last axis which is taken to be
+ ``2*(m-1)`` where ``m`` is the length of the input along that axis.
axes : sequence of ints, optional
Axes over which to compute the inverse FFT. If not given, the last
`len(s)` axes are used, or all axes if `s` is also not specified.
@@ -1213,21 +1239,29 @@ def irfftn(a, s=None, axes=None, norm=None):
See `rfft` for definitions and conventions used for real input.
+ The correct interpretation of the hermitian input depends on the shape of
+ the original data, as given by `s`. This is because each input shape could
+ correspond to either an odd or even length signal. By default, `irfftn`
+ assumes an even output length which puts the last entry at the Nyquist
+ frequency; aliasing with its symmetric counterpart. When performing the
+ final complex to real transform, the last value is thus treated as purely
+ real. To avoid losing information, the correct shape of the real input
+ **must** be given.
+
Examples
--------
>>> a = np.zeros((3, 2, 2))
>>> a[0, 0, 0] = 3 * 2 * 2
>>> np.fft.irfftn(a)
- array([[[ 1., 1.],
- [ 1., 1.]],
- [[ 1., 1.],
- [ 1., 1.]],
- [[ 1., 1.],
- [ 1., 1.]]])
+ array([[[1., 1.],
+ [1., 1.]],
+ [[1., 1.],
+ [1., 1.]],
+ [[1., 1.],
+ [1., 1.]]])
"""
- # The copy may be required for multithreading.
- a = array(a, copy=True, dtype=complex)
+ a = asarray(a)
s, axes = _cook_nd_args(a, s, axes, invreal=1)
for ii in range(len(axes)-1):
a = ifft(a, s[ii], axes[ii], norm)
@@ -1235,6 +1269,7 @@ def irfftn(a, s=None, axes=None, norm=None):
return a
+@array_function_dispatch(_fftn_dispatcher)
def irfft2(a, s=None, axes=(-2, -1), norm=None):
"""
Compute the 2-dimensional inverse FFT of a real array.
@@ -1244,7 +1279,7 @@ def irfft2(a, s=None, axes=(-2, -1), norm=None):
a : array_like
The input array
s : sequence of ints, optional
- Shape of the inverse FFT.
+ Shape of the real output to the inverse FFT.
axes : sequence of ints, optional
The axes over which to compute the inverse fft.
Default is the last two axes.
diff --git a/numpy/fft/fftpack.c b/numpy/fft/fftpack.c
deleted file mode 100644
index 07fa2bf4c..000000000
--- a/numpy/fft/fftpack.c
+++ /dev/null
@@ -1,1536 +0,0 @@
-/*
- * fftpack.c : A set of FFT routines in C.
- * Algorithmically based on Fortran-77 FFTPACK by Paul N. Swarztrauber (Version 4, 1985).
-*/
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include <Python.h>
-#include <math.h>
-#include <stdio.h>
-#include <numpy/ndarraytypes.h>
-
-#define DOUBLE
-#ifdef DOUBLE
-#define Treal double
-#else
-#define Treal float
-#endif
-
-#define ref(u,a) u[a]
-
-/* Macros for accurate calculation of the twiddle factors. */
-#define TWOPI 6.283185307179586476925286766559005768391
-#define cos2pi(m, n) cos((TWOPI * (m)) / (n))
-#define sin2pi(m, n) sin((TWOPI * (m)) / (n))
-
-#define MAXFAC 13 /* maximum number of factors in factorization of n */
-#define NSPECIAL 4 /* number of factors for which we have special-case routines */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-static void sincos2pi(int m, int n, Treal* si, Treal* co)
-/* Calculates sin(2pi * m/n) and cos(2pi * m/n). It is more accurate
- * than the naive calculation as the fraction m/n is reduced to [0, 1/8) first.
- * Due to the symmetry of sin(x) and cos(x) the values for all x can be
- * determined from the function values of the reduced argument in the first
- * octant.
- */
- {
- int n8, m8, octant;
- n8 = 8 * n;
- m8 = (8 * m) % n8;
- octant = m8 / n;
- m8 = m8 % n;
- switch(octant) {
- case 0:
- *co = cos2pi(m8, n8);
- *si = sin2pi(m8, n8);
- break;
- case 1:
- *co = sin2pi(n-m8, n8);
- *si = cos2pi(n-m8, n8);
- break;
- case 2:
- *co = -sin2pi(m8, n8);
- *si = cos2pi(m8, n8);
- break;
- case 3:
- *co = -cos2pi(n-m8, n8);
- *si = sin2pi(n-m8, n8);
- break;
- case 4:
- *co = -cos2pi(m8, n8);
- *si = -sin2pi(m8, n8);
- break;
- case 5:
- *co = -sin2pi(n-m8, n8);
- *si = -cos2pi(n-m8, n8);
- break;
- case 6:
- *co = sin2pi(m8, n8);
- *si = -cos2pi(m8, n8);
- break;
- case 7:
- *co = cos2pi(n-m8, n8);
- *si = -sin2pi(n-m8, n8);
- break;
- }
- }
-
-/* ----------------------------------------------------------------------
- passf2, passf3, passf4, passf5, passf. Complex FFT passes fwd and bwd.
------------------------------------------------------------------------ */
-
-static void passf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[], int isign)
- /* isign==+1 for backward transform */
- {
- int i, k, ah, ac;
- Treal ti2, tr2;
- if (ido <= 2) {
- for (k=0; k<l1; k++) {
- ah = k*ido;
- ac = 2*k*ido;
- ch[ah] = ref(cc,ac) + ref(cc,ac + ido);
- ch[ah + ido*l1] = ref(cc,ac) - ref(cc,ac + ido);
- ch[ah+1] = ref(cc,ac+1) + ref(cc,ac + ido + 1);
- ch[ah + ido*l1 + 1] = ref(cc,ac+1) - ref(cc,ac + ido + 1);
- }
- } else {
- for (k=0; k<l1; k++) {
- for (i=0; i<ido-1; i+=2) {
- ah = i + k*ido;
- ac = i + 2*k*ido;
- ch[ah] = ref(cc,ac) + ref(cc,ac + ido);
- tr2 = ref(cc,ac) - ref(cc,ac + ido);
- ch[ah+1] = ref(cc,ac+1) + ref(cc,ac + 1 + ido);
- ti2 = ref(cc,ac+1) - ref(cc,ac + 1 + ido);
- ch[ah+l1*ido+1] = wa1[i]*ti2 + isign*wa1[i+1]*tr2;
- ch[ah+l1*ido] = wa1[i]*tr2 - isign*wa1[i+1]*ti2;
- }
- }
- }
- } /* passf2 */
-
-
-static void passf3(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], int isign)
- /* isign==+1 for backward transform */
- {
- static const Treal taur = -0.5;
- static const Treal taui = 0.86602540378443864676;
- int i, k, ac, ah;
- Treal ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
- if (ido == 2) {
- for (k=1; k<=l1; k++) {
- ac = (3*k - 2)*ido;
- tr2 = ref(cc,ac) + ref(cc,ac + ido);
- cr2 = ref(cc,ac - ido) + taur*tr2;
- ah = (k - 1)*ido;
- ch[ah] = ref(cc,ac - ido) + tr2;
-
- ti2 = ref(cc,ac + 1) + ref(cc,ac + ido + 1);
- ci2 = ref(cc,ac - ido + 1) + taur*ti2;
- ch[ah + 1] = ref(cc,ac - ido + 1) + ti2;
-
- cr3 = isign*taui*(ref(cc,ac) - ref(cc,ac + ido));
- ci3 = isign*taui*(ref(cc,ac + 1) - ref(cc,ac + ido + 1));
- ch[ah + l1*ido] = cr2 - ci3;
- ch[ah + 2*l1*ido] = cr2 + ci3;
- ch[ah + l1*ido + 1] = ci2 + cr3;
- ch[ah + 2*l1*ido + 1] = ci2 - cr3;
- }
- } else {
- for (k=1; k<=l1; k++) {
- for (i=0; i<ido-1; i+=2) {
- ac = i + (3*k - 2)*ido;
- tr2 = ref(cc,ac) + ref(cc,ac + ido);
- cr2 = ref(cc,ac - ido) + taur*tr2;
- ah = i + (k-1)*ido;
- ch[ah] = ref(cc,ac - ido) + tr2;
- ti2 = ref(cc,ac + 1) + ref(cc,ac + ido + 1);
- ci2 = ref(cc,ac - ido + 1) + taur*ti2;
- ch[ah + 1] = ref(cc,ac - ido + 1) + ti2;
- cr3 = isign*taui*(ref(cc,ac) - ref(cc,ac + ido));
- ci3 = isign*taui*(ref(cc,ac + 1) - ref(cc,ac + ido + 1));
- dr2 = cr2 - ci3;
- dr3 = cr2 + ci3;
- di2 = ci2 + cr3;
- di3 = ci2 - cr3;
- ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2;
- ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2;
- ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3;
- ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3;
- }
- }
- }
- } /* passf3 */
-
-
-static void passf4(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], const Treal wa3[], int isign)
- /* isign == -1 for forward transform and +1 for backward transform */
- {
- int i, k, ac, ah;
- Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
- if (ido == 2) {
- for (k=0; k<l1; k++) {
- ac = 4*k*ido + 1;
- ti1 = ref(cc,ac) - ref(cc,ac + 2*ido);
- ti2 = ref(cc,ac) + ref(cc,ac + 2*ido);
- tr4 = ref(cc,ac + 3*ido) - ref(cc,ac + ido);
- ti3 = ref(cc,ac + ido) + ref(cc,ac + 3*ido);
- tr1 = ref(cc,ac - 1) - ref(cc,ac + 2*ido - 1);
- tr2 = ref(cc,ac - 1) + ref(cc,ac + 2*ido - 1);
- ti4 = ref(cc,ac + ido - 1) - ref(cc,ac + 3*ido - 1);
- tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 3*ido - 1);
- ah = k*ido;
- ch[ah] = tr2 + tr3;
- ch[ah + 2*l1*ido] = tr2 - tr3;
- ch[ah + 1] = ti2 + ti3;
- ch[ah + 2*l1*ido + 1] = ti2 - ti3;
- ch[ah + l1*ido] = tr1 + isign*tr4;
- ch[ah + 3*l1*ido] = tr1 - isign*tr4;
- ch[ah + l1*ido + 1] = ti1 + isign*ti4;
- ch[ah + 3*l1*ido + 1] = ti1 - isign*ti4;
- }
- } else {
- for (k=0; k<l1; k++) {
- for (i=0; i<ido-1; i+=2) {
- ac = i + 1 + 4*k*ido;
- ti1 = ref(cc,ac) - ref(cc,ac + 2*ido);
- ti2 = ref(cc,ac) + ref(cc,ac + 2*ido);
- ti3 = ref(cc,ac + ido) + ref(cc,ac + 3*ido);
- tr4 = ref(cc,ac + 3*ido) - ref(cc,ac + ido);
- tr1 = ref(cc,ac - 1) - ref(cc,ac + 2*ido - 1);
- tr2 = ref(cc,ac - 1) + ref(cc,ac + 2*ido - 1);
- ti4 = ref(cc,ac + ido - 1) - ref(cc,ac + 3*ido - 1);
- tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 3*ido - 1);
- ah = i + k*ido;
- ch[ah] = tr2 + tr3;
- cr3 = tr2 - tr3;
- ch[ah + 1] = ti2 + ti3;
- ci3 = ti2 - ti3;
- cr2 = tr1 + isign*tr4;
- cr4 = tr1 - isign*tr4;
- ci2 = ti1 + isign*ti4;
- ci4 = ti1 - isign*ti4;
- ch[ah + l1*ido] = wa1[i]*cr2 - isign*wa1[i + 1]*ci2;
- ch[ah + l1*ido + 1] = wa1[i]*ci2 + isign*wa1[i + 1]*cr2;
- ch[ah + 2*l1*ido] = wa2[i]*cr3 - isign*wa2[i + 1]*ci3;
- ch[ah + 2*l1*ido + 1] = wa2[i]*ci3 + isign*wa2[i + 1]*cr3;
- ch[ah + 3*l1*ido] = wa3[i]*cr4 -isign*wa3[i + 1]*ci4;
- ch[ah + 3*l1*ido + 1] = wa3[i]*ci4 + isign*wa3[i + 1]*cr4;
- }
- }
- }
- } /* passf4 */
-
-
-static void passf5(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[], int isign)
- /* isign == -1 for forward transform and +1 for backward transform */
- {
- static const Treal tr11 = 0.3090169943749474241;
- static const Treal ti11 = 0.95105651629515357212;
- static const Treal tr12 = -0.8090169943749474241;
- static const Treal ti12 = 0.58778525229247312917;
- int i, k, ac, ah;
- Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
- ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
- if (ido == 2) {
- for (k = 1; k <= l1; ++k) {
- ac = (5*k - 4)*ido + 1;
- ti5 = ref(cc,ac) - ref(cc,ac + 3*ido);
- ti2 = ref(cc,ac) + ref(cc,ac + 3*ido);
- ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido);
- ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido);
- tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1);
- tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1);
- tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1);
- tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1);
- ah = (k - 1)*ido;
- ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3;
- ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3;
- cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3;
- ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3;
- cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3;
- ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3;
- cr5 = isign*(ti11*tr5 + ti12*tr4);
- ci5 = isign*(ti11*ti5 + ti12*ti4);
- cr4 = isign*(ti12*tr5 - ti11*tr4);
- ci4 = isign*(ti12*ti5 - ti11*ti4);
- ch[ah + l1*ido] = cr2 - ci5;
- ch[ah + 4*l1*ido] = cr2 + ci5;
- ch[ah + l1*ido + 1] = ci2 + cr5;
- ch[ah + 2*l1*ido + 1] = ci3 + cr4;
- ch[ah + 2*l1*ido] = cr3 - ci4;
- ch[ah + 3*l1*ido] = cr3 + ci4;
- ch[ah + 3*l1*ido + 1] = ci3 - cr4;
- ch[ah + 4*l1*ido + 1] = ci2 - cr5;
- }
- } else {
- for (k=1; k<=l1; k++) {
- for (i=0; i<ido-1; i+=2) {
- ac = i + 1 + (k*5 - 4)*ido;
- ti5 = ref(cc,ac) - ref(cc,ac + 3*ido);
- ti2 = ref(cc,ac) + ref(cc,ac + 3*ido);
- ti4 = ref(cc,ac + ido) - ref(cc,ac + 2*ido);
- ti3 = ref(cc,ac + ido) + ref(cc,ac + 2*ido);
- tr5 = ref(cc,ac - 1) - ref(cc,ac + 3*ido - 1);
- tr2 = ref(cc,ac - 1) + ref(cc,ac + 3*ido - 1);
- tr4 = ref(cc,ac + ido - 1) - ref(cc,ac + 2*ido - 1);
- tr3 = ref(cc,ac + ido - 1) + ref(cc,ac + 2*ido - 1);
- ah = i + (k - 1)*ido;
- ch[ah] = ref(cc,ac - ido - 1) + tr2 + tr3;
- ch[ah + 1] = ref(cc,ac - ido) + ti2 + ti3;
- cr2 = ref(cc,ac - ido - 1) + tr11*tr2 + tr12*tr3;
-
- ci2 = ref(cc,ac - ido) + tr11*ti2 + tr12*ti3;
- cr3 = ref(cc,ac - ido - 1) + tr12*tr2 + tr11*tr3;
-
- ci3 = ref(cc,ac - ido) + tr12*ti2 + tr11*ti3;
- cr5 = isign*(ti11*tr5 + ti12*tr4);
- ci5 = isign*(ti11*ti5 + ti12*ti4);
- cr4 = isign*(ti12*tr5 - ti11*tr4);
- ci4 = isign*(ti12*ti5 - ti11*ti4);
- dr3 = cr3 - ci4;
- dr4 = cr3 + ci4;
- di3 = ci3 + cr4;
- di4 = ci3 - cr4;
- dr5 = cr2 + ci5;
- dr2 = cr2 - ci5;
- di5 = ci2 - cr5;
- di2 = ci2 + cr5;
- ch[ah + l1*ido] = wa1[i]*dr2 - isign*wa1[i+1]*di2;
- ch[ah + l1*ido + 1] = wa1[i]*di2 + isign*wa1[i+1]*dr2;
- ch[ah + 2*l1*ido] = wa2[i]*dr3 - isign*wa2[i+1]*di3;
- ch[ah + 2*l1*ido + 1] = wa2[i]*di3 + isign*wa2[i+1]*dr3;
- ch[ah + 3*l1*ido] = wa3[i]*dr4 - isign*wa3[i+1]*di4;
- ch[ah + 3*l1*ido + 1] = wa3[i]*di4 + isign*wa3[i+1]*dr4;
- ch[ah + 4*l1*ido] = wa4[i]*dr5 - isign*wa4[i+1]*di5;
- ch[ah + 4*l1*ido + 1] = wa4[i]*di5 + isign*wa4[i+1]*dr5;
- }
- }
- }
- } /* passf5 */
-
-
-static void passf(int *nac, int ido, int ip, int l1, int idl1,
- Treal cc[], Treal ch[],
- const Treal wa[], int isign)
- /* isign is -1 for forward transform and +1 for backward transform */
- {
- int idij, idlj, idot, ipph, i, j, k, l, jc, lc, ik, idj, idl, inc,idp;
- Treal wai, war;
-
- idot = ido / 2;
- /* nt = ip*idl1;*/
- ipph = (ip + 1) / 2;
- idp = ip*ido;
- if (ido >= l1) {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (k=0; k<l1; k++) {
- for (i=0; i<ido; i++) {
- ch[i + (k + j*l1)*ido] =
- ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*ip)*ido);
- ch[i + (k + jc*l1)*ido] =
- ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*ip)*ido);
- }
- }
- }
- for (k=0; k<l1; k++)
- for (i=0; i<ido; i++)
- ch[i + k*ido] = ref(cc,i + k*ip*ido);
- } else {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (i=0; i<ido; i++) {
- for (k=0; k<l1; k++) {
- ch[i + (k + j*l1)*ido] = ref(cc,i + (j + k*ip)*ido) + ref(cc,i + (jc + k*
- ip)*ido);
- ch[i + (k + jc*l1)*ido] = ref(cc,i + (j + k*ip)*ido) - ref(cc,i + (jc + k*
- ip)*ido);
- }
- }
- }
- for (i=0; i<ido; i++)
- for (k=0; k<l1; k++)
- ch[i + k*ido] = ref(cc,i + k*ip*ido);
- }
-
- idl = 2 - ido;
- inc = 0;
- for (l=1; l<ipph; l++) {
- lc = ip - l;
- idl += ido;
- for (ik=0; ik<idl1; ik++) {
- cc[ik + l*idl1] = ch[ik] + wa[idl - 2]*ch[ik + idl1];
- cc[ik + lc*idl1] = isign*wa[idl-1]*ch[ik + (ip-1)*idl1];
- }
- idlj = idl;
- inc += ido;
- for (j=2; j<ipph; j++) {
- jc = ip - j;
- idlj += inc;
- if (idlj > idp) idlj -= idp;
- war = wa[idlj - 2];
- wai = wa[idlj-1];
- for (ik=0; ik<idl1; ik++) {
- cc[ik + l*idl1] += war*ch[ik + j*idl1];
- cc[ik + lc*idl1] += isign*wai*ch[ik + jc*idl1];
- }
- }
- }
- for (j=1; j<ipph; j++)
- for (ik=0; ik<idl1; ik++)
- ch[ik] += ch[ik + j*idl1];
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (ik=1; ik<idl1; ik+=2) {
- ch[ik - 1 + j*idl1] = cc[ik - 1 + j*idl1] - cc[ik + jc*idl1];
- ch[ik - 1 + jc*idl1] = cc[ik - 1 + j*idl1] + cc[ik + jc*idl1];
- ch[ik + j*idl1] = cc[ik + j*idl1] + cc[ik - 1 + jc*idl1];
- ch[ik + jc*idl1] = cc[ik + j*idl1] - cc[ik - 1 + jc*idl1];
- }
- }
- *nac = 1;
- if (ido == 2) return;
- *nac = 0;
- for (ik=0; ik<idl1; ik++)
- cc[ik] = ch[ik];
- for (j=1; j<ip; j++) {
- for (k=0; k<l1; k++) {
- cc[(k + j*l1)*ido + 0] = ch[(k + j*l1)*ido + 0];
- cc[(k + j*l1)*ido + 1] = ch[(k + j*l1)*ido + 1];
- }
- }
- if (idot <= l1) {
- idij = 0;
- for (j=1; j<ip; j++) {
- idij += 2;
- for (i=3; i<ido; i+=2) {
- idij += 2;
- for (k=0; k<l1; k++) {
- cc[i - 1 + (k + j*l1)*ido] =
- wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] -
- isign*wa[idij-1]*ch[i + (k + j*l1)*ido];
- cc[i + (k + j*l1)*ido] =
- wa[idij - 2]*ch[i + (k + j*l1)*ido] +
- isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- } else {
- idj = 2 - ido;
- for (j=1; j<ip; j++) {
- idj += ido;
- for (k = 0; k < l1; k++) {
- idij = idj;
- for (i=3; i<ido; i+=2) {
- idij += 2;
- cc[i - 1 + (k + j*l1)*ido] =
- wa[idij - 2]*ch[i - 1 + (k + j*l1)*ido] -
- isign*wa[idij-1]*ch[i + (k + j*l1)*ido];
- cc[i + (k + j*l1)*ido] =
- wa[idij - 2]*ch[i + (k + j*l1)*ido] +
- isign*wa[idij-1]*ch[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- }
- } /* passf */
-
-
- /* ----------------------------------------------------------------------
-radf2,radb2, radf3,radb3, radf4,radb4, radf5,radb5, radfg,radbg.
-Treal FFT passes fwd and bwd.
----------------------------------------------------------------------- */
-
-static void radf2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[])
- {
- int i, k, ic;
- Treal ti2, tr2;
- for (k=0; k<l1; k++) {
- ch[2*k*ido] =
- ref(cc,k*ido) + ref(cc,(k + l1)*ido);
- ch[(2*k+1)*ido + ido-1] =
- ref(cc,k*ido) - ref(cc,(k + l1)*ido);
- }
- if (ido < 2) return;
- if (ido != 2) {
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- tr2 = wa1[i - 2]*ref(cc, i-1 + (k + l1)*ido) + wa1[i - 1]*ref(cc, i + (k + l1)*ido);
- ti2 = wa1[i - 2]*ref(cc, i + (k + l1)*ido) - wa1[i - 1]*ref(cc, i-1 + (k + l1)*ido);
- ch[i + 2*k*ido] = ref(cc,i + k*ido) + ti2;
- ch[ic + (2*k+1)*ido] = ti2 - ref(cc,i + k*ido);
- ch[i - 1 + 2*k*ido] = ref(cc,i - 1 + k*ido) + tr2;
- ch[ic - 1 + (2*k+1)*ido] = ref(cc,i - 1 + k*ido) - tr2;
- }
- }
- if (ido % 2 == 1) return;
- }
- for (k=0; k<l1; k++) {
- ch[(2*k+1)*ido] = -ref(cc,ido-1 + (k + l1)*ido);
- ch[ido-1 + 2*k*ido] = ref(cc,ido-1 + k*ido);
- }
- } /* radf2 */
-
-
-static void radb2(int ido, int l1, const Treal cc[], Treal ch[], const Treal wa1[])
- {
- int i, k, ic;
- Treal ti2, tr2;
- for (k=0; k<l1; k++) {
- ch[k*ido] =
- ref(cc,2*k*ido) + ref(cc,ido-1 + (2*k+1)*ido);
- ch[(k + l1)*ido] =
- ref(cc,2*k*ido) - ref(cc,ido-1 + (2*k+1)*ido);
- }
- if (ido < 2) return;
- if (ido != 2) {
- for (k = 0; k < l1; ++k) {
- for (i = 2; i < ido; i += 2) {
- ic = ido - i;
- ch[i-1 + k*ido] =
- ref(cc,i-1 + 2*k*ido) + ref(cc,ic-1 + (2*k+1)*ido);
- tr2 = ref(cc,i-1 + 2*k*ido) - ref(cc,ic-1 + (2*k+1)*ido);
- ch[i + k*ido] =
- ref(cc,i + 2*k*ido) - ref(cc,ic + (2*k+1)*ido);
- ti2 = ref(cc,i + (2*k)*ido) + ref(cc,ic + (2*k+1)*ido);
- ch[i-1 + (k + l1)*ido] =
- wa1[i - 2]*tr2 - wa1[i - 1]*ti2;
- ch[i + (k + l1)*ido] =
- wa1[i - 2]*ti2 + wa1[i - 1]*tr2;
- }
- }
- if (ido % 2 == 1) return;
- }
- for (k = 0; k < l1; k++) {
- ch[ido-1 + k*ido] = 2*ref(cc,ido-1 + 2*k*ido);
- ch[ido-1 + (k + l1)*ido] = -2*ref(cc,(2*k+1)*ido);
- }
- } /* radb2 */
-
-
-static void radf3(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[])
- {
- static const Treal taur = -0.5;
- static const Treal taui = 0.86602540378443864676;
- int i, k, ic;
- Treal ci2, di2, di3, cr2, dr2, dr3, ti2, ti3, tr2, tr3;
- for (k=0; k<l1; k++) {
- cr2 = ref(cc,(k + l1)*ido) + ref(cc,(k + 2*l1)*ido);
- ch[3*k*ido] = ref(cc,k*ido) + cr2;
- ch[(3*k+2)*ido] = taui*(ref(cc,(k + l1*2)*ido) - ref(cc,(k + l1)*ido));
- ch[ido-1 + (3*k + 1)*ido] = ref(cc,k*ido) + taur*cr2;
- }
- if (ido == 1) return;
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) +
- wa1[i - 1]*ref(cc,i + (k + l1)*ido);
- di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
- dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + l1*2)*ido) + wa2[i - 1]*ref(cc,i + (k + l1*2)*ido);
- di3 = wa2[i - 2]*ref(cc,i + (k + l1*2)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + l1*2)*ido);
- cr2 = dr2 + dr3;
- ci2 = di2 + di3;
- ch[i - 1 + 3*k*ido] = ref(cc,i - 1 + k*ido) + cr2;
- ch[i + 3*k*ido] = ref(cc,i + k*ido) + ci2;
- tr2 = ref(cc,i - 1 + k*ido) + taur*cr2;
- ti2 = ref(cc,i + k*ido) + taur*ci2;
- tr3 = taui*(di2 - di3);
- ti3 = taui*(dr3 - dr2);
- ch[i - 1 + (3*k + 2)*ido] = tr2 + tr3;
- ch[ic - 1 + (3*k + 1)*ido] = tr2 - tr3;
- ch[i + (3*k + 2)*ido] = ti2 + ti3;
- ch[ic + (3*k + 1)*ido] = ti3 - ti2;
- }
- }
- } /* radf3 */
-
-
-static void radb3(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[])
- {
- static const Treal taur = -0.5;
- static const Treal taui = 0.86602540378443864676;
- int i, k, ic;
- Treal ci2, ci3, di2, di3, cr2, cr3, dr2, dr3, ti2, tr2;
- for (k=0; k<l1; k++) {
- tr2 = 2*ref(cc,ido-1 + (3*k + 1)*ido);
- cr2 = ref(cc,3*k*ido) + taur*tr2;
- ch[k*ido] = ref(cc,3*k*ido) + tr2;
- ci3 = 2*taui*ref(cc,(3*k + 2)*ido);
- ch[(k + l1)*ido] = cr2 - ci3;
- ch[(k + 2*l1)*ido] = cr2 + ci3;
- }
- if (ido == 1) return;
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- tr2 = ref(cc,i - 1 + (3*k + 2)*ido) + ref(cc,ic - 1 + (3*k + 1)*ido);
- cr2 = ref(cc,i - 1 + 3*k*ido) + taur*tr2;
- ch[i - 1 + k*ido] = ref(cc,i - 1 + 3*k*ido) + tr2;
- ti2 = ref(cc,i + (3*k + 2)*ido) - ref(cc,ic + (3*k + 1)*ido);
- ci2 = ref(cc,i + 3*k*ido) + taur*ti2;
- ch[i + k*ido] = ref(cc,i + 3*k*ido) + ti2;
- cr3 = taui*(ref(cc,i - 1 + (3*k + 2)*ido) - ref(cc,ic - 1 + (3*k + 1)*ido));
- ci3 = taui*(ref(cc,i + (3*k + 2)*ido) + ref(cc,ic + (3*k + 1)*ido));
- dr2 = cr2 - ci3;
- dr3 = cr2 + ci3;
- di2 = ci2 + cr3;
- di3 = ci2 - cr3;
- ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*dr2 - wa1[i - 1]*di2;
- ch[i + (k + l1)*ido] = wa1[i - 2]*di2 + wa1[i - 1]*dr2;
- ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*dr3 - wa2[i - 1]*di3;
- ch[i + (k + 2*l1)*ido] = wa2[i - 2]*di3 + wa2[i - 1]*dr3;
- }
- }
- } /* radb3 */
-
-
-static void radf4(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], const Treal wa3[])
- {
- static const Treal hsqt2 = 0.70710678118654752440;
- int i, k, ic;
- Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
- for (k=0; k<l1; k++) {
- tr1 = ref(cc,(k + l1)*ido) + ref(cc,(k + 3*l1)*ido);
- tr2 = ref(cc,k*ido) + ref(cc,(k + 2*l1)*ido);
- ch[4*k*ido] = tr1 + tr2;
- ch[ido-1 + (4*k + 3)*ido] = tr2 - tr1;
- ch[ido-1 + (4*k + 1)*ido] = ref(cc,k*ido) - ref(cc,(k + 2*l1)*ido);
- ch[(4*k + 2)*ido] = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + l1)*ido);
- }
- if (ido < 2) return;
- if (ido != 2) {
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i += 2) {
- ic = ido - i;
- cr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido);
- ci2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
- cr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)*
- ido);
- ci3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)*
- ido);
- cr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)*
- ido);
- ci4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)*
- ido);
- tr1 = cr2 + cr4;
- tr4 = cr4 - cr2;
- ti1 = ci2 + ci4;
- ti4 = ci2 - ci4;
- ti2 = ref(cc,i + k*ido) + ci3;
- ti3 = ref(cc,i + k*ido) - ci3;
- tr2 = ref(cc,i - 1 + k*ido) + cr3;
- tr3 = ref(cc,i - 1 + k*ido) - cr3;
- ch[i - 1 + 4*k*ido] = tr1 + tr2;
- ch[ic - 1 + (4*k + 3)*ido] = tr2 - tr1;
- ch[i + 4*k*ido] = ti1 + ti2;
- ch[ic + (4*k + 3)*ido] = ti1 - ti2;
- ch[i - 1 + (4*k + 2)*ido] = ti4 + tr3;
- ch[ic - 1 + (4*k + 1)*ido] = tr3 - ti4;
- ch[i + (4*k + 2)*ido] = tr4 + ti3;
- ch[ic + (4*k + 1)*ido] = tr4 - ti3;
- }
- }
- if (ido % 2 == 1) return;
- }
- for (k=0; k<l1; k++) {
- ti1 = -hsqt2*(ref(cc,ido-1 + (k + l1)*ido) + ref(cc,ido-1 + (k + 3*l1)*ido));
- tr1 = hsqt2*(ref(cc,ido-1 + (k + l1)*ido) - ref(cc,ido-1 + (k + 3*l1)*ido));
- ch[ido-1 + 4*k*ido] = tr1 + ref(cc,ido-1 + k*ido);
- ch[ido-1 + (4*k + 2)*ido] = ref(cc,ido-1 + k*ido) - tr1;
- ch[(4*k + 1)*ido] = ti1 - ref(cc,ido-1 + (k + 2*l1)*ido);
- ch[(4*k + 3)*ido] = ti1 + ref(cc,ido-1 + (k + 2*l1)*ido);
- }
- } /* radf4 */
-
-
-static void radb4(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], const Treal wa3[])
- {
- static const Treal sqrt2 = 1.41421356237309504880;
- int i, k, ic;
- Treal ci2, ci3, ci4, cr2, cr3, cr4, ti1, ti2, ti3, ti4, tr1, tr2, tr3, tr4;
- for (k = 0; k < l1; k++) {
- tr1 = ref(cc,4*k*ido) - ref(cc,ido-1 + (4*k + 3)*ido);
- tr2 = ref(cc,4*k*ido) + ref(cc,ido-1 + (4*k + 3)*ido);
- tr3 = ref(cc,ido-1 + (4*k + 1)*ido) + ref(cc,ido-1 + (4*k + 1)*ido);
- tr4 = ref(cc,(4*k + 2)*ido) + ref(cc,(4*k + 2)*ido);
- ch[k*ido] = tr2 + tr3;
- ch[(k + l1)*ido] = tr1 - tr4;
- ch[(k + 2*l1)*ido] = tr2 - tr3;
- ch[(k + 3*l1)*ido] = tr1 + tr4;
- }
- if (ido < 2) return;
- if (ido != 2) {
- for (k = 0; k < l1; ++k) {
- for (i = 2; i < ido; i += 2) {
- ic = ido - i;
- ti1 = ref(cc,i + 4*k*ido) + ref(cc,ic + (4*k + 3)*ido);
- ti2 = ref(cc,i + 4*k*ido) - ref(cc,ic + (4*k + 3)*ido);
- ti3 = ref(cc,i + (4*k + 2)*ido) - ref(cc,ic + (4*k + 1)*ido);
- tr4 = ref(cc,i + (4*k + 2)*ido) + ref(cc,ic + (4*k + 1)*ido);
- tr1 = ref(cc,i - 1 + 4*k*ido) - ref(cc,ic - 1 + (4*k + 3)*ido);
- tr2 = ref(cc,i - 1 + 4*k*ido) + ref(cc,ic - 1 + (4*k + 3)*ido);
- ti4 = ref(cc,i - 1 + (4*k + 2)*ido) - ref(cc,ic - 1 + (4*k + 1)*ido);
- tr3 = ref(cc,i - 1 + (4*k + 2)*ido) + ref(cc,ic - 1 + (4*k + 1)*ido);
- ch[i - 1 + k*ido] = tr2 + tr3;
- cr3 = tr2 - tr3;
- ch[i + k*ido] = ti2 + ti3;
- ci3 = ti2 - ti3;
- cr2 = tr1 - tr4;
- cr4 = tr1 + tr4;
- ci2 = ti1 + ti4;
- ci4 = ti1 - ti4;
- ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*cr2 - wa1[i - 1]*ci2;
- ch[i + (k + l1)*ido] = wa1[i - 2]*ci2 + wa1[i - 1]*cr2;
- ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*cr3 - wa2[i - 1]*ci3;
- ch[i + (k + 2*l1)*ido] = wa2[i - 2]*ci3 + wa2[i - 1]*cr3;
- ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*cr4 - wa3[i - 1]*ci4;
- ch[i + (k + 3*l1)*ido] = wa3[i - 2]*ci4 + wa3[i - 1]*cr4;
- }
- }
- if (ido % 2 == 1) return;
- }
- for (k = 0; k < l1; k++) {
- ti1 = ref(cc,(4*k + 1)*ido) + ref(cc,(4*k + 3)*ido);
- ti2 = ref(cc,(4*k + 3)*ido) - ref(cc,(4*k + 1)*ido);
- tr1 = ref(cc,ido-1 + 4*k*ido) - ref(cc,ido-1 + (4*k + 2)*ido);
- tr2 = ref(cc,ido-1 + 4*k*ido) + ref(cc,ido-1 + (4*k + 2)*ido);
- ch[ido-1 + k*ido] = tr2 + tr2;
- ch[ido-1 + (k + l1)*ido] = sqrt2*(tr1 - ti1);
- ch[ido-1 + (k + 2*l1)*ido] = ti2 + ti2;
- ch[ido-1 + (k + 3*l1)*ido] = -sqrt2*(tr1 + ti1);
- }
- } /* radb4 */
-
-
-static void radf5(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[])
- {
- static const Treal tr11 = 0.3090169943749474241;
- static const Treal ti11 = 0.95105651629515357212;
- static const Treal tr12 = -0.8090169943749474241;
- static const Treal ti12 = 0.58778525229247312917;
- int i, k, ic;
- Treal ci2, di2, ci4, ci5, di3, di4, di5, ci3, cr2, cr3, dr2, dr3, dr4, dr5,
- cr5, cr4, ti2, ti3, ti5, ti4, tr2, tr3, tr4, tr5;
- for (k = 0; k < l1; k++) {
- cr2 = ref(cc,(k + 4*l1)*ido) + ref(cc,(k + l1)*ido);
- ci5 = ref(cc,(k + 4*l1)*ido) - ref(cc,(k + l1)*ido);
- cr3 = ref(cc,(k + 3*l1)*ido) + ref(cc,(k + 2*l1)*ido);
- ci4 = ref(cc,(k + 3*l1)*ido) - ref(cc,(k + 2*l1)*ido);
- ch[5*k*ido] = ref(cc,k*ido) + cr2 + cr3;
- ch[ido-1 + (5*k + 1)*ido] = ref(cc,k*ido) + tr11*cr2 + tr12*cr3;
- ch[(5*k + 2)*ido] = ti11*ci5 + ti12*ci4;
- ch[ido-1 + (5*k + 3)*ido] = ref(cc,k*ido) + tr12*cr2 + tr11*cr3;
- ch[(5*k + 4)*ido] = ti12*ci5 - ti11*ci4;
- }
- if (ido == 1) return;
- for (k = 0; k < l1; ++k) {
- for (i = 2; i < ido; i += 2) {
- ic = ido - i;
- dr2 = wa1[i - 2]*ref(cc,i - 1 + (k + l1)*ido) + wa1[i - 1]*ref(cc,i + (k + l1)*ido);
- di2 = wa1[i - 2]*ref(cc,i + (k + l1)*ido) - wa1[i - 1]*ref(cc,i - 1 + (k + l1)*ido);
- dr3 = wa2[i - 2]*ref(cc,i - 1 + (k + 2*l1)*ido) + wa2[i - 1]*ref(cc,i + (k + 2*l1)*ido);
- di3 = wa2[i - 2]*ref(cc,i + (k + 2*l1)*ido) - wa2[i - 1]*ref(cc,i - 1 + (k + 2*l1)*ido);
- dr4 = wa3[i - 2]*ref(cc,i - 1 + (k + 3*l1)*ido) + wa3[i - 1]*ref(cc,i + (k + 3*l1)*ido);
- di4 = wa3[i - 2]*ref(cc,i + (k + 3*l1)*ido) - wa3[i - 1]*ref(cc,i - 1 + (k + 3*l1)*ido);
- dr5 = wa4[i - 2]*ref(cc,i - 1 + (k + 4*l1)*ido) + wa4[i - 1]*ref(cc,i + (k + 4*l1)*ido);
- di5 = wa4[i - 2]*ref(cc,i + (k + 4*l1)*ido) - wa4[i - 1]*ref(cc,i - 1 + (k + 4*l1)*ido);
- cr2 = dr2 + dr5;
- ci5 = dr5 - dr2;
- cr5 = di2 - di5;
- ci2 = di2 + di5;
- cr3 = dr3 + dr4;
- ci4 = dr4 - dr3;
- cr4 = di3 - di4;
- ci3 = di3 + di4;
- ch[i - 1 + 5*k*ido] = ref(cc,i - 1 + k*ido) + cr2 + cr3;
- ch[i + 5*k*ido] = ref(cc,i + k*ido) + ci2 + ci3;
- tr2 = ref(cc,i - 1 + k*ido) + tr11*cr2 + tr12*cr3;
- ti2 = ref(cc,i + k*ido) + tr11*ci2 + tr12*ci3;
- tr3 = ref(cc,i - 1 + k*ido) + tr12*cr2 + tr11*cr3;
- ti3 = ref(cc,i + k*ido) + tr12*ci2 + tr11*ci3;
- tr5 = ti11*cr5 + ti12*cr4;
- ti5 = ti11*ci5 + ti12*ci4;
- tr4 = ti12*cr5 - ti11*cr4;
- ti4 = ti12*ci5 - ti11*ci4;
- ch[i - 1 + (5*k + 2)*ido] = tr2 + tr5;
- ch[ic - 1 + (5*k + 1)*ido] = tr2 - tr5;
- ch[i + (5*k + 2)*ido] = ti2 + ti5;
- ch[ic + (5*k + 1)*ido] = ti5 - ti2;
- ch[i - 1 + (5*k + 4)*ido] = tr3 + tr4;
- ch[ic - 1 + (5*k + 3)*ido] = tr3 - tr4;
- ch[i + (5*k + 4)*ido] = ti3 + ti4;
- ch[ic + (5*k + 3)*ido] = ti4 - ti3;
- }
- }
- } /* radf5 */
-
-
-static void radb5(int ido, int l1, const Treal cc[], Treal ch[],
- const Treal wa1[], const Treal wa2[], const Treal wa3[], const Treal wa4[])
- {
- static const Treal tr11 = 0.3090169943749474241;
- static const Treal ti11 = 0.95105651629515357212;
- static const Treal tr12 = -0.8090169943749474241;
- static const Treal ti12 = 0.58778525229247312917;
- int i, k, ic;
- Treal ci2, ci3, ci4, ci5, di3, di4, di5, di2, cr2, cr3, cr5, cr4, ti2, ti3,
- ti4, ti5, dr3, dr4, dr5, dr2, tr2, tr3, tr4, tr5;
- for (k = 0; k < l1; k++) {
- ti5 = 2*ref(cc,(5*k + 2)*ido);
- ti4 = 2*ref(cc,(5*k + 4)*ido);
- tr2 = 2*ref(cc,ido-1 + (5*k + 1)*ido);
- tr3 = 2*ref(cc,ido-1 + (5*k + 3)*ido);
- ch[k*ido] = ref(cc,5*k*ido) + tr2 + tr3;
- cr2 = ref(cc,5*k*ido) + tr11*tr2 + tr12*tr3;
- cr3 = ref(cc,5*k*ido) + tr12*tr2 + tr11*tr3;
- ci5 = ti11*ti5 + ti12*ti4;
- ci4 = ti12*ti5 - ti11*ti4;
- ch[(k + l1)*ido] = cr2 - ci5;
- ch[(k + 2*l1)*ido] = cr3 - ci4;
- ch[(k + 3*l1)*ido] = cr3 + ci4;
- ch[(k + 4*l1)*ido] = cr2 + ci5;
- }
- if (ido == 1) return;
- for (k = 0; k < l1; ++k) {
- for (i = 2; i < ido; i += 2) {
- ic = ido - i;
- ti5 = ref(cc,i + (5*k + 2)*ido) + ref(cc,ic + (5*k + 1)*ido);
- ti2 = ref(cc,i + (5*k + 2)*ido) - ref(cc,ic + (5*k + 1)*ido);
- ti4 = ref(cc,i + (5*k + 4)*ido) + ref(cc,ic + (5*k + 3)*ido);
- ti3 = ref(cc,i + (5*k + 4)*ido) - ref(cc,ic + (5*k + 3)*ido);
- tr5 = ref(cc,i - 1 + (5*k + 2)*ido) - ref(cc,ic - 1 + (5*k + 1)*ido);
- tr2 = ref(cc,i - 1 + (5*k + 2)*ido) + ref(cc,ic - 1 + (5*k + 1)*ido);
- tr4 = ref(cc,i - 1 + (5*k + 4)*ido) - ref(cc,ic - 1 + (5*k + 3)*ido);
- tr3 = ref(cc,i - 1 + (5*k + 4)*ido) + ref(cc,ic - 1 + (5*k + 3)*ido);
- ch[i - 1 + k*ido] = ref(cc,i - 1 + 5*k*ido) + tr2 + tr3;
- ch[i + k*ido] = ref(cc,i + 5*k*ido) + ti2 + ti3;
- cr2 = ref(cc,i - 1 + 5*k*ido) + tr11*tr2 + tr12*tr3;
-
- ci2 = ref(cc,i + 5*k*ido) + tr11*ti2 + tr12*ti3;
- cr3 = ref(cc,i - 1 + 5*k*ido) + tr12*tr2 + tr11*tr3;
-
- ci3 = ref(cc,i + 5*k*ido) + tr12*ti2 + tr11*ti3;
- cr5 = ti11*tr5 + ti12*tr4;
- ci5 = ti11*ti5 + ti12*ti4;
- cr4 = ti12*tr5 - ti11*tr4;
- ci4 = ti12*ti5 - ti11*ti4;
- dr3 = cr3 - ci4;
- dr4 = cr3 + ci4;
- di3 = ci3 + cr4;
- di4 = ci3 - cr4;
- dr5 = cr2 + ci5;
- dr2 = cr2 - ci5;
- di5 = ci2 - cr5;
- di2 = ci2 + cr5;
- ch[i - 1 + (k + l1)*ido] = wa1[i - 2]*dr2 - wa1[i - 1]*di2;
- ch[i + (k + l1)*ido] = wa1[i - 2]*di2 + wa1[i - 1]*dr2;
- ch[i - 1 + (k + 2*l1)*ido] = wa2[i - 2]*dr3 - wa2[i - 1]*di3;
- ch[i + (k + 2*l1)*ido] = wa2[i - 2]*di3 + wa2[i - 1]*dr3;
- ch[i - 1 + (k + 3*l1)*ido] = wa3[i - 2]*dr4 - wa3[i - 1]*di4;
- ch[i + (k + 3*l1)*ido] = wa3[i - 2]*di4 + wa3[i - 1]*dr4;
- ch[i - 1 + (k + 4*l1)*ido] = wa4[i - 2]*dr5 - wa4[i - 1]*di5;
- ch[i + (k + 4*l1)*ido] = wa4[i - 2]*di5 + wa4[i - 1]*dr5;
- }
- }
- } /* radb5 */
-
-
-static void radfg(int ido, int ip, int l1, int idl1,
- Treal cc[], Treal ch[], const Treal wa[])
- {
- int idij, ipph, i, j, k, l, j2, ic, jc, lc, ik, is, nbd;
- Treal dc2, ai1, ai2, ar1, ar2, ds2, dcp, dsp, ar1h, ar2h;
- sincos2pi(1, ip, &dsp, &dcp);
- ipph = (ip + 1) / 2;
- nbd = (ido - 1) / 2;
- if (ido != 1) {
- for (ik=0; ik<idl1; ik++) ch[ik] = cc[ik];
- for (j=1; j<ip; j++)
- for (k=0; k<l1; k++)
- ch[(k + j*l1)*ido] = cc[(k + j*l1)*ido];
- if (nbd <= l1) {
- is = -ido;
- for (j=1; j<ip; j++) {
- is += ido;
- idij = is-1;
- for (i=2; i<ido; i+=2) {
- idij += 2;
- for (k=0; k<l1; k++) {
- ch[i - 1 + (k + j*l1)*ido] =
- wa[idij - 1]*cc[i - 1 + (k + j*l1)*ido] + wa[idij]*cc[i + (k + j*l1)*ido];
- ch[i + (k + j*l1)*ido] =
- wa[idij - 1]*cc[i + (k + j*l1)*ido] - wa[idij]*cc[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- } else {
- is = -ido;
- for (j=1; j<ip; j++) {
- is += ido;
- for (k=0; k<l1; k++) {
- idij = is-1;
- for (i=2; i<ido; i+=2) {
- idij += 2;
- ch[i - 1 + (k + j*l1)*ido] =
- wa[idij - 1]*cc[i - 1 + (k + j*l1)*ido] + wa[idij]*cc[i + (k + j*l1)*ido];
- ch[i + (k + j*l1)*ido] =
- wa[idij - 1]*cc[i + (k + j*l1)*ido] - wa[idij]*cc[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- }
- if (nbd >= l1) {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- cc[i - 1 + (k + j*l1)*ido] = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
- cc[i - 1 + (k + jc*l1)*ido] = ch[i + (k + j*l1)*ido] - ch[i + (k + jc*l1)*ido];
- cc[i + (k + j*l1)*ido] = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
- cc[i + (k + jc*l1)*ido] = ch[i - 1 + (k + jc*l1)*ido] - ch[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- } else {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (i=2; i<ido; i+=2) {
- for (k=0; k<l1; k++) {
- cc[i - 1 + (k + j*l1)*ido] =
- ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
- cc[i - 1 + (k + jc*l1)*ido] = ch[i + (k + j*l1)*ido] - ch[i + (k + jc*l1)*ido];
- cc[i + (k + j*l1)*ido] = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
- cc[i + (k + jc*l1)*ido] = ch[i - 1 + (k + jc*l1)*ido] - ch[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- }
- } else { /* now ido == 1 */
- for (ik=0; ik<idl1; ik++) cc[ik] = ch[ik];
- }
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (k=0; k<l1; k++) {
- cc[(k + j*l1)*ido] = ch[(k + j*l1)*ido] + ch[(k + jc*l1)*ido];
- cc[(k + jc*l1)*ido] = ch[(k + jc*l1)*ido] - ch[(k + j*l1)*ido];
- }
- }
-
- ar1 = 1;
- ai1 = 0;
- for (l=1; l<ipph; l++) {
- lc = ip - l;
- ar1h = dcp*ar1 - dsp*ai1;
- ai1 = dcp*ai1 + dsp*ar1;
- ar1 = ar1h;
- for (ik=0; ik<idl1; ik++) {
- ch[ik + l*idl1] = cc[ik] + ar1*cc[ik + idl1];
- ch[ik + lc*idl1] = ai1*cc[ik + (ip-1)*idl1];
- }
- dc2 = ar1;
- ds2 = ai1;
- ar2 = ar1;
- ai2 = ai1;
- for (j=2; j<ipph; j++) {
- jc = ip - j;
- ar2h = dc2*ar2 - ds2*ai2;
- ai2 = dc2*ai2 + ds2*ar2;
- ar2 = ar2h;
- for (ik=0; ik<idl1; ik++) {
- ch[ik + l*idl1] += ar2*cc[ik + j*idl1];
- ch[ik + lc*idl1] += ai2*cc[ik + jc*idl1];
- }
- }
- }
-
- for (j=1; j<ipph; j++)
- for (ik=0; ik<idl1; ik++)
- ch[ik] += cc[ik + j*idl1];
-
- if (ido >= l1) {
- for (k=0; k<l1; k++) {
- for (i=0; i<ido; i++) {
- ref(cc,i + k*ip*ido) = ch[i + k*ido];
- }
- }
- } else {
- for (i=0; i<ido; i++) {
- for (k=0; k<l1; k++) {
- ref(cc,i + k*ip*ido) = ch[i + k*ido];
- }
- }
- }
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- j2 = 2*j;
- for (k=0; k<l1; k++) {
- ref(cc,ido-1 + (j2 - 1 + k*ip)*ido) =
- ch[(k + j*l1)*ido];
- ref(cc,(j2 + k*ip)*ido) =
- ch[(k + jc*l1)*ido];
- }
- }
- if (ido == 1) return;
- if (nbd >= l1) {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- j2 = 2*j;
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- ref(cc,i - 1 + (j2 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
- ref(cc,ic - 1 + (j2 - 1 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] - ch[i - 1 + (k + jc*l1)*ido];
- ref(cc,i + (j2 + k*ip)*ido) = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
- ref(cc,ic + (j2 - 1 + k*ip)*ido) = ch[i + (k + jc*l1)*ido] - ch[i + (k + j*l1)*ido];
- }
- }
- }
- } else {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- j2 = 2*j;
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- for (k=0; k<l1; k++) {
- ref(cc,i - 1 + (j2 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] + ch[i - 1 + (k + jc*l1)*ido];
- ref(cc,ic - 1 + (j2 - 1 + k*ip)*ido) = ch[i - 1 + (k + j*l1)*ido] - ch[i - 1 + (k + jc*l1)*ido];
- ref(cc,i + (j2 + k*ip)*ido) = ch[i + (k + j*l1)*ido] + ch[i + (k + jc*l1)*ido];
- ref(cc,ic + (j2 - 1 + k*ip)*ido) = ch[i + (k + jc*l1)*ido] - ch[i + (k + j*l1)*ido];
- }
- }
- }
- }
- } /* radfg */
-
-
-static void radbg(int ido, int ip, int l1, int idl1,
- Treal cc[], Treal ch[], const Treal wa[])
- {
- int idij, ipph, i, j, k, l, j2, ic, jc, lc, ik, is;
- Treal dc2, ai1, ai2, ar1, ar2, ds2;
- int nbd;
- Treal dcp, dsp, ar1h, ar2h;
- sincos2pi(1, ip, &dsp, &dcp);
- nbd = (ido - 1) / 2;
- ipph = (ip + 1) / 2;
- if (ido >= l1) {
- for (k=0; k<l1; k++) {
- for (i=0; i<ido; i++) {
- ch[i + k*ido] = ref(cc,i + k*ip*ido);
- }
- }
- } else {
- for (i=0; i<ido; i++) {
- for (k=0; k<l1; k++) {
- ch[i + k*ido] = ref(cc,i + k*ip*ido);
- }
- }
- }
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- j2 = 2*j;
- for (k=0; k<l1; k++) {
- ch[(k + j*l1)*ido] = ref(cc,ido-1 + (j2 - 1 + k*ip)*ido) + ref(cc,ido-1 + (j2 - 1 + k*ip)*
- ido);
- ch[(k + jc*l1)*ido] = ref(cc,(j2 + k*ip)*ido) + ref(cc,(j2 + k*ip)*ido);
- }
- }
-
- if (ido != 1) {
- if (nbd >= l1) {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- ch[i - 1 + (k + j*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) + ref(cc,
- ic - 1 + (2*j - 1 + k*ip)*ido);
- ch[i - 1 + (k + jc*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) -
- ref(cc,ic - 1 + (2*j - 1 + k*ip)*ido);
- ch[i + (k + j*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) - ref(cc,ic
- + (2*j - 1 + k*ip)*ido);
- ch[i + (k + jc*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) + ref(cc,ic
- + (2*j - 1 + k*ip)*ido);
- }
- }
- }
- } else {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (i=2; i<ido; i+=2) {
- ic = ido - i;
- for (k=0; k<l1; k++) {
- ch[i - 1 + (k + j*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) + ref(cc,
- ic - 1 + (2*j - 1 + k*ip)*ido);
- ch[i - 1 + (k + jc*l1)*ido] = ref(cc,i - 1 + (2*j + k*ip)*ido) -
- ref(cc,ic - 1 + (2*j - 1 + k*ip)*ido);
- ch[i + (k + j*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) - ref(cc,ic
- + (2*j - 1 + k*ip)*ido);
- ch[i + (k + jc*l1)*ido] = ref(cc,i + (2*j + k*ip)*ido) + ref(cc,ic
- + (2*j - 1 + k*ip)*ido);
- }
- }
- }
- }
- }
-
- ar1 = 1;
- ai1 = 0;
- for (l=1; l<ipph; l++) {
- lc = ip - l;
- ar1h = dcp*ar1 - dsp*ai1;
- ai1 = dcp*ai1 + dsp*ar1;
- ar1 = ar1h;
- for (ik=0; ik<idl1; ik++) {
- cc[ik + l*idl1] = ch[ik] + ar1*ch[ik + idl1];
- cc[ik + lc*idl1] = ai1*ch[ik + (ip-1)*idl1];
- }
- dc2 = ar1;
- ds2 = ai1;
- ar2 = ar1;
- ai2 = ai1;
- for (j=2; j<ipph; j++) {
- jc = ip - j;
- ar2h = dc2*ar2 - ds2*ai2;
- ai2 = dc2*ai2 + ds2*ar2;
- ar2 = ar2h;
- for (ik=0; ik<idl1; ik++) {
- cc[ik + l*idl1] += ar2*ch[ik + j*idl1];
- cc[ik + lc*idl1] += ai2*ch[ik + jc*idl1];
- }
- }
- }
- for (j=1; j<ipph; j++) {
- for (ik=0; ik<idl1; ik++) {
- ch[ik] += ch[ik + j*idl1];
- }
- }
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (k=0; k<l1; k++) {
- ch[(k + j*l1)*ido] = cc[(k + j*l1)*ido] - cc[(k + jc*l1)*ido];
- ch[(k + jc*l1)*ido] = cc[(k + j*l1)*ido] + cc[(k + jc*l1)*ido];
- }
- }
-
- if (ido == 1) return;
- if (nbd >= l1) {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (k=0; k<l1; k++) {
- for (i=2; i<ido; i+=2) {
- ch[i - 1 + (k + j*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] - cc[i + (k + jc*l1)*ido];
- ch[i - 1 + (k + jc*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] + cc[i + (k + jc*l1)*ido];
- ch[i + (k + j*l1)*ido] = cc[i + (k + j*l1)*ido] + cc[i - 1 + (k + jc*l1)*ido];
- ch[i + (k + jc*l1)*ido] = cc[i + (k + j*l1)*ido] - cc[i - 1 + (k + jc*l1)*ido];
- }
- }
- }
- } else {
- for (j=1; j<ipph; j++) {
- jc = ip - j;
- for (i=2; i<ido; i+=2) {
- for (k=0; k<l1; k++) {
- ch[i - 1 + (k + j*l1)*ido] = cc[i - 1 + (k + j*l1)*ido] - cc[i + (k + jc*l1)*ido];
- ch[i - 1 + (k + jc*l1)*ido] = cc[i - 1 + (k + j *l1)*ido] + cc[i + (k + jc*l1)*ido];
- ch[i + (k + j*l1)*ido] = cc[i + (k + j*l1)*ido] + cc[i - 1 + (k + jc*l1)*ido];
- ch[i + (k + jc*l1)*ido] = cc[i + (k + j*l1)*ido] - cc[i - 1 + (k + jc*l1)*ido];
- }
- }
- }
- }
- for (ik=0; ik<idl1; ik++) cc[ik] = ch[ik];
- for (j=1; j<ip; j++)
- for (k=0; k<l1; k++)
- cc[(k + j*l1)*ido] = ch[(k + j*l1)*ido];
- if (nbd <= l1) {
- is = -ido;
- for (j=1; j<ip; j++) {
- is += ido;
- idij = is-1;
- for (i=2; i<ido; i+=2) {
- idij += 2;
- for (k=0; k<l1; k++) {
- cc[i - 1 + (k + j*l1)*ido] = wa[idij - 1]*ch[i - 1 + (k + j*l1)*ido] - wa[idij]*
- ch[i + (k + j*l1)*ido];
- cc[i + (k + j*l1)*ido] = wa[idij - 1]*ch[i + (k + j*l1)*ido] + wa[idij]*ch[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- } else {
- is = -ido;
- for (j=1; j<ip; j++) {
- is += ido;
- for (k=0; k<l1; k++) {
- idij = is - 1;
- for (i=2; i<ido; i+=2) {
- idij += 2;
- cc[i - 1 + (k + j*l1)*ido] = wa[idij-1]*ch[i - 1 + (k + j*l1)*ido] - wa[idij]*
- ch[i + (k + j*l1)*ido];
- cc[i + (k + j*l1)*ido] = wa[idij-1]*ch[i + (k + j*l1)*ido] + wa[idij]*ch[i - 1 + (k + j*l1)*ido];
- }
- }
- }
- }
- } /* radbg */
-
- /* ------------------------------------------------------------
-cfftf1, npy_cfftf, npy_cfftb, cffti1, npy_cffti. Complex FFTs.
---------------------------------------------------------------- */
-
-static void cfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2], int isign)
- {
- int idot, i;
- int k1, l1, l2;
- int na, nf, ip, iw, ix2, ix3, ix4, nac, ido, idl1;
- Treal *cinput, *coutput;
- nf = ifac[1];
- na = 0;
- l1 = 1;
- iw = 0;
- for (k1=2; k1<=nf+1; k1++) {
- ip = ifac[k1];
- l2 = ip*l1;
- ido = n / l2;
- idot = ido + ido;
- idl1 = idot*l1;
- if (na) {
- cinput = ch;
- coutput = c;
- } else {
- cinput = c;
- coutput = ch;
- }
- switch (ip) {
- case 4:
- ix2 = iw + idot;
- ix3 = ix2 + idot;
- passf4(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], isign);
- na = !na;
- break;
- case 2:
- passf2(idot, l1, cinput, coutput, &wa[iw], isign);
- na = !na;
- break;
- case 3:
- ix2 = iw + idot;
- passf3(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], isign);
- na = !na;
- break;
- case 5:
- ix2 = iw + idot;
- ix3 = ix2 + idot;
- ix4 = ix3 + idot;
- passf5(idot, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4], isign);
- na = !na;
- break;
- default:
- passf(&nac, idot, ip, l1, idl1, cinput, coutput, &wa[iw], isign);
- if (nac != 0) na = !na;
- }
- l1 = l2;
- iw += (ip - 1)*idot;
- }
- if (na == 0) return;
- for (i=0; i<2*n; i++) c[i] = ch[i];
- } /* cfftf1 */
-
-
-NPY_VISIBILITY_HIDDEN void npy_cfftf(int n, Treal c[], Treal wsave[])
- {
- int iw1, iw2;
- if (n == 1) return;
- iw1 = 2*n;
- iw2 = iw1 + 2*n;
- cfftf1(n, c, wsave, wsave+iw1, (int*)(wsave+iw2), -1);
- } /* npy_cfftf */
-
-
-NPY_VISIBILITY_HIDDEN void npy_cfftb(int n, Treal c[], Treal wsave[])
- {
- int iw1, iw2;
- if (n == 1) return;
- iw1 = 2*n;
- iw2 = iw1 + 2*n;
- cfftf1(n, c, wsave, wsave+iw1, (int*)(wsave+iw2), +1);
- } /* npy_cfftb */
-
-
-static void factorize(int n, int ifac[MAXFAC+2], const int ntryh[NSPECIAL])
- /* Factorize n in factors in ntryh and rest. On exit,
-ifac[0] contains n and ifac[1] contains number of factors,
-the factors start from ifac[2]. */
- {
- int ntry=3, i, j=0, ib, nf=0, nl=n, nq, nr;
-startloop:
- if (j < NSPECIAL)
- ntry = ntryh[j];
- else
- ntry+= 2;
- j++;
- do {
- nq = nl / ntry;
- nr = nl - ntry*nq;
- if (nr != 0) goto startloop;
- nf++;
- ifac[nf + 1] = ntry;
- nl = nq;
- if (ntry == 2 && nf != 1) {
- for (i=2; i<=nf; i++) {
- ib = nf - i + 2;
- ifac[ib + 1] = ifac[ib];
- }
- ifac[2] = 2;
- }
- } while (nl != 1);
- ifac[0] = n;
- ifac[1] = nf;
- }
-
-
-static void cffti1(int n, Treal wa[], int ifac[MAXFAC+2])
- {
- int fi, idot, i, j;
- int i1, k1, l1, l2;
- int ld, ii, nf, ip;
- int ido, ipm;
-
- static const int ntryh[NSPECIAL] = {
- 3,4,2,5 }; /* Do not change the order of these. */
-
- factorize(n,ifac,ntryh);
- nf = ifac[1];
- i = 1;
- l1 = 1;
- for (k1=1; k1<=nf; k1++) {
- ip = ifac[k1+1];
- ld = 0;
- l2 = l1*ip;
- ido = n / l2;
- idot = ido + ido + 2;
- ipm = ip - 1;
- for (j=1; j<=ipm; j++) {
- i1 = i;
- wa[i-1] = 1;
- wa[i] = 0;
- ld += l1;
- fi = 0;
- for (ii=4; ii<=idot; ii+=2) {
- i+= 2;
- fi+= 1;
- sincos2pi(fi*ld, n, wa+i, wa+i-1);
- }
- if (ip > 5) {
- wa[i1-1] = wa[i-1];
- wa[i1] = wa[i];
- }
- }
- l1 = l2;
- }
- } /* cffti1 */
-
-
-NPY_VISIBILITY_HIDDEN void npy_cffti(int n, Treal wsave[])
- {
- int iw1, iw2;
- if (n == 1) return;
- iw1 = 2*n;
- iw2 = iw1 + 2*n;
- cffti1(n, wsave+iw1, (int*)(wsave+iw2));
- } /* npy_cffti */
-
- /* -------------------------------------------------------------------
-rfftf1, rfftb1, npy_rfftf, npy_rfftb, rffti1, npy_rffti. Treal FFTs.
----------------------------------------------------------------------- */
-
-static void rfftf1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2])
- {
- int i;
- int k1, l1, l2, na, kh, nf, ip, iw, ix2, ix3, ix4, ido, idl1;
- Treal *cinput, *coutput;
- nf = ifac[1];
- na = 1;
- l2 = n;
- iw = n-1;
- for (k1 = 1; k1 <= nf; ++k1) {
- kh = nf - k1;
- ip = ifac[kh + 2];
- l1 = l2 / ip;
- ido = n / l2;
- idl1 = ido*l1;
- iw -= (ip - 1)*ido;
- na = !na;
- if (na) {
- cinput = ch;
- coutput = c;
- } else {
- cinput = c;
- coutput = ch;
- }
- switch (ip) {
- case 4:
- ix2 = iw + ido;
- ix3 = ix2 + ido;
- radf4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]);
- break;
- case 2:
- radf2(ido, l1, cinput, coutput, &wa[iw]);
- break;
- case 3:
- ix2 = iw + ido;
- radf3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]);
- break;
- case 5:
- ix2 = iw + ido;
- ix3 = ix2 + ido;
- ix4 = ix3 + ido;
- radf5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
- break;
- default:
- if (ido == 1)
- na = !na;
- if (na == 0) {
- radfg(ido, ip, l1, idl1, c, ch, &wa[iw]);
- na = 1;
- } else {
- radfg(ido, ip, l1, idl1, ch, c, &wa[iw]);
- na = 0;
- }
- }
- l2 = l1;
- }
- if (na == 1) return;
- for (i = 0; i < n; i++) c[i] = ch[i];
- } /* rfftf1 */
-
-
-static void rfftb1(int n, Treal c[], Treal ch[], const Treal wa[], const int ifac[MAXFAC+2])
- {
- int i;
- int k1, l1, l2, na, nf, ip, iw, ix2, ix3, ix4, ido, idl1;
- Treal *cinput, *coutput;
- nf = ifac[1];
- na = 0;
- l1 = 1;
- iw = 0;
- for (k1=1; k1<=nf; k1++) {
- ip = ifac[k1 + 1];
- l2 = ip*l1;
- ido = n / l2;
- idl1 = ido*l1;
- if (na) {
- cinput = ch;
- coutput = c;
- } else {
- cinput = c;
- coutput = ch;
- }
- switch (ip) {
- case 4:
- ix2 = iw + ido;
- ix3 = ix2 + ido;
- radb4(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3]);
- na = !na;
- break;
- case 2:
- radb2(ido, l1, cinput, coutput, &wa[iw]);
- na = !na;
- break;
- case 3:
- ix2 = iw + ido;
- radb3(ido, l1, cinput, coutput, &wa[iw], &wa[ix2]);
- na = !na;
- break;
- case 5:
- ix2 = iw + ido;
- ix3 = ix2 + ido;
- ix4 = ix3 + ido;
- radb5(ido, l1, cinput, coutput, &wa[iw], &wa[ix2], &wa[ix3], &wa[ix4]);
- na = !na;
- break;
- default:
- radbg(ido, ip, l1, idl1, cinput, coutput, &wa[iw]);
- if (ido == 1) na = !na;
- }
- l1 = l2;
- iw += (ip - 1)*ido;
- }
- if (na == 0) return;
- for (i=0; i<n; i++) c[i] = ch[i];
- } /* rfftb1 */
-
-
-NPY_VISIBILITY_HIDDEN void npy_rfftf(int n, Treal r[], Treal wsave[])
- {
- if (n == 1) return;
- rfftf1(n, r, wsave, wsave+n, (int*)(wsave+2*n));
- } /* npy_rfftf */
-
-
-NPY_VISIBILITY_HIDDEN void npy_rfftb(int n, Treal r[], Treal wsave[])
- {
- if (n == 1) return;
- rfftb1(n, r, wsave, wsave+n, (int*)(wsave+2*n));
- } /* npy_rfftb */
-
-
-static void rffti1(int n, Treal wa[], int ifac[MAXFAC+2])
- {
- int fi, i, j;
- int k1, l1, l2;
- int ld, ii, nf, ip, is;
- int ido, ipm, nfm1;
- static const int ntryh[NSPECIAL] = {
- 4,2,3,5 }; /* Do not change the order of these. */
- factorize(n,ifac,ntryh);
- nf = ifac[1];
- is = 0;
- nfm1 = nf - 1;
- l1 = 1;
- if (nfm1 == 0) return;
- for (k1 = 1; k1 <= nfm1; k1++) {
- ip = ifac[k1 + 1];
- ld = 0;
- l2 = l1*ip;
- ido = n / l2;
- ipm = ip - 1;
- for (j = 1; j <= ipm; ++j) {
- ld += l1;
- i = is;
- fi = 0;
- for (ii = 3; ii <= ido; ii += 2) {
- i += 2;
- fi += 1;
- sincos2pi(fi*ld, n, wa+i-1, wa+i-2);
- }
- is += ido;
- }
- l1 = l2;
- }
- } /* rffti1 */
-
-
-NPY_VISIBILITY_HIDDEN void npy_rffti(int n, Treal wsave[])
- {
- if (n == 1) return;
- rffti1(n, wsave+n, (int*)(wsave+2*n));
- } /* npy_rffti */
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/numpy/fft/fftpack.h b/numpy/fft/fftpack.h
deleted file mode 100644
index 5e8f4631c..000000000
--- a/numpy/fft/fftpack.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * This file is part of tela the Tensor Language.
- * Copyright (c) 1994-1995 Pekka Janhunen
- */
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#define DOUBLE
-
-#ifdef DOUBLE
-#define Treal double
-#else
-#define Treal float
-#endif
-
-extern NPY_VISIBILITY_HIDDEN void npy_cfftf(int N, Treal data[], const Treal wrk[]);
-extern NPY_VISIBILITY_HIDDEN void npy_cfftb(int N, Treal data[], const Treal wrk[]);
-extern NPY_VISIBILITY_HIDDEN void npy_cffti(int N, Treal wrk[]);
-
-extern NPY_VISIBILITY_HIDDEN void npy_rfftf(int N, Treal data[], const Treal wrk[]);
-extern NPY_VISIBILITY_HIDDEN void npy_rfftb(int N, Treal data[], const Treal wrk[]);
-extern NPY_VISIBILITY_HIDDEN void npy_rffti(int N, Treal wrk[]);
-
-#ifdef __cplusplus
-}
-#endif
diff --git a/numpy/fft/fftpack_litemodule.c b/numpy/fft/fftpack_litemodule.c
deleted file mode 100644
index bd6cfc120..000000000
--- a/numpy/fft/fftpack_litemodule.c
+++ /dev/null
@@ -1,366 +0,0 @@
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include "Python.h"
-#include "numpy/arrayobject.h"
-#include "fftpack.h"
-
-static PyObject *ErrorObject;
-
-static const char fftpack_cfftf__doc__[] = "";
-
-static PyObject *
-fftpack_cfftf(PyObject *NPY_UNUSED(self), PyObject *args)
-{
- PyObject *op1, *op2;
- PyArrayObject *data;
- PyArray_Descr *descr;
- double *wsave, *dptr;
- npy_intp nsave;
- int npts, nrepeats, i;
-
- if(!PyArg_ParseTuple(args, "OO:cfftf", &op1, &op2)) {
- return NULL;
- }
- data = (PyArrayObject *)PyArray_CopyFromObject(op1,
- NPY_CDOUBLE, 1, 0);
- if (data == NULL) {
- return NULL;
- }
- descr = PyArray_DescrFromType(NPY_DOUBLE);
- if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) {
- goto fail;
- }
- if (data == NULL) {
- goto fail;
- }
-
- npts = PyArray_DIM(data, PyArray_NDIM(data) - 1);
- if (nsave != npts*4 + 15) {
- PyErr_SetString(ErrorObject, "invalid work array for fft size");
- goto fail;
- }
-
- nrepeats = PyArray_SIZE(data)/npts;
- dptr = (double *)PyArray_DATA(data);
- Py_BEGIN_ALLOW_THREADS;
- NPY_SIGINT_ON;
- for (i = 0; i < nrepeats; i++) {
- npy_cfftf(npts, dptr, wsave);
- dptr += npts*2;
- }
- NPY_SIGINT_OFF;
- Py_END_ALLOW_THREADS;
- PyArray_Free(op2, (char *)wsave);
- return (PyObject *)data;
-
-fail:
- PyArray_Free(op2, (char *)wsave);
- Py_DECREF(data);
- return NULL;
-}
-
-static const char fftpack_cfftb__doc__[] = "";
-
-static PyObject *
-fftpack_cfftb(PyObject *NPY_UNUSED(self), PyObject *args)
-{
- PyObject *op1, *op2;
- PyArrayObject *data;
- PyArray_Descr *descr;
- double *wsave, *dptr;
- npy_intp nsave;
- int npts, nrepeats, i;
-
- if(!PyArg_ParseTuple(args, "OO:cfftb", &op1, &op2)) {
- return NULL;
- }
- data = (PyArrayObject *)PyArray_CopyFromObject(op1,
- NPY_CDOUBLE, 1, 0);
- if (data == NULL) {
- return NULL;
- }
- descr = PyArray_DescrFromType(NPY_DOUBLE);
- if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) {
- goto fail;
- }
- if (data == NULL) {
- goto fail;
- }
-
- npts = PyArray_DIM(data, PyArray_NDIM(data) - 1);
- if (nsave != npts*4 + 15) {
- PyErr_SetString(ErrorObject, "invalid work array for fft size");
- goto fail;
- }
-
- nrepeats = PyArray_SIZE(data)/npts;
- dptr = (double *)PyArray_DATA(data);
- Py_BEGIN_ALLOW_THREADS;
- NPY_SIGINT_ON;
- for (i = 0; i < nrepeats; i++) {
- npy_cfftb(npts, dptr, wsave);
- dptr += npts*2;
- }
- NPY_SIGINT_OFF;
- Py_END_ALLOW_THREADS;
- PyArray_Free(op2, (char *)wsave);
- return (PyObject *)data;
-
-fail:
- PyArray_Free(op2, (char *)wsave);
- Py_DECREF(data);
- return NULL;
-}
-
-static const char fftpack_cffti__doc__[] = "";
-
-static PyObject *
-fftpack_cffti(PyObject *NPY_UNUSED(self), PyObject *args)
-{
- PyArrayObject *op;
- npy_intp dim;
- long n;
-
- if (!PyArg_ParseTuple(args, "l:cffti", &n)) {
- return NULL;
- }
- /*Magic size needed by npy_cffti*/
- dim = 4*n + 15;
- /*Create a 1 dimensional array of dimensions of type double*/
- op = (PyArrayObject *)PyArray_SimpleNew(1, &dim, NPY_DOUBLE);
- if (op == NULL) {
- return NULL;
- }
-
- Py_BEGIN_ALLOW_THREADS;
- NPY_SIGINT_ON;
- npy_cffti(n, (double *)PyArray_DATA((PyArrayObject*)op));
- NPY_SIGINT_OFF;
- Py_END_ALLOW_THREADS;
-
- return (PyObject *)op;
-}
-
-static const char fftpack_rfftf__doc__[] = "";
-
-static PyObject *
-fftpack_rfftf(PyObject *NPY_UNUSED(self), PyObject *args)
-{
- PyObject *op1, *op2;
- PyArrayObject *data, *ret;
- PyArray_Descr *descr;
- double *wsave = NULL, *dptr, *rptr;
- npy_intp nsave;
- int npts, nrepeats, i, rstep;
-
- if(!PyArg_ParseTuple(args, "OO:rfftf", &op1, &op2)) {
- return NULL;
- }
- data = (PyArrayObject *)PyArray_ContiguousFromObject(op1,
- NPY_DOUBLE, 1, 0);
- if (data == NULL) {
- return NULL;
- }
- /* FIXME, direct access changing contents of data->dimensions */
- npts = PyArray_DIM(data, PyArray_NDIM(data) - 1);
- PyArray_DIMS(data)[PyArray_NDIM(data) - 1] = npts/2 + 1;
- ret = (PyArrayObject *)PyArray_Zeros(PyArray_NDIM(data),
- PyArray_DIMS(data), PyArray_DescrFromType(NPY_CDOUBLE), 0);
- if (ret == NULL) {
- goto fail;
- }
- PyArray_DIMS(data)[PyArray_NDIM(data) - 1] = npts;
- rstep = PyArray_DIM(ret, PyArray_NDIM(ret) - 1)*2;
-
- descr = PyArray_DescrFromType(NPY_DOUBLE);
- if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) {
- goto fail;
- }
- if (data == NULL || ret == NULL) {
- goto fail;
- }
- if (nsave != npts*2+15) {
- PyErr_SetString(ErrorObject, "invalid work array for fft size");
- goto fail;
- }
-
- nrepeats = PyArray_SIZE(data)/npts;
- rptr = (double *)PyArray_DATA(ret);
- dptr = (double *)PyArray_DATA(data);
-
- Py_BEGIN_ALLOW_THREADS;
- NPY_SIGINT_ON;
- for (i = 0; i < nrepeats; i++) {
- memcpy((char *)(rptr+1), dptr, npts*sizeof(double));
- npy_rfftf(npts, rptr+1, wsave);
- rptr[0] = rptr[1];
- rptr[1] = 0.0;
- rptr += rstep;
- dptr += npts;
- }
- NPY_SIGINT_OFF;
- Py_END_ALLOW_THREADS;
- PyArray_Free(op2, (char *)wsave);
- Py_DECREF(data);
- return (PyObject *)ret;
-
-fail:
- PyArray_Free(op2, (char *)wsave);
- Py_XDECREF(data);
- Py_XDECREF(ret);
- return NULL;
-}
-
-static const char fftpack_rfftb__doc__[] = "";
-
-static PyObject *
-fftpack_rfftb(PyObject *NPY_UNUSED(self), PyObject *args)
-{
- PyObject *op1, *op2;
- PyArrayObject *data, *ret;
- PyArray_Descr *descr;
- double *wsave, *dptr, *rptr;
- npy_intp nsave;
- int npts, nrepeats, i;
-
- if(!PyArg_ParseTuple(args, "OO:rfftb", &op1, &op2)) {
- return NULL;
- }
- data = (PyArrayObject *)PyArray_ContiguousFromObject(op1,
- NPY_CDOUBLE, 1, 0);
- if (data == NULL) {
- return NULL;
- }
- npts = PyArray_DIM(data, PyArray_NDIM(data) - 1);
- ret = (PyArrayObject *)PyArray_Zeros(PyArray_NDIM(data), PyArray_DIMS(data),
- PyArray_DescrFromType(NPY_DOUBLE), 0);
-
- descr = PyArray_DescrFromType(NPY_DOUBLE);
- if (PyArray_AsCArray(&op2, (void *)&wsave, &nsave, 1, descr) == -1) {
- goto fail;
- }
- if (data == NULL || ret == NULL) {
- goto fail;
- }
- if (nsave != npts*2 + 15) {
- PyErr_SetString(ErrorObject, "invalid work array for fft size");
- goto fail;
- }
-
- nrepeats = PyArray_SIZE(ret)/npts;
- rptr = (double *)PyArray_DATA(ret);
- dptr = (double *)PyArray_DATA(data);
-
- Py_BEGIN_ALLOW_THREADS;
- NPY_SIGINT_ON;
- for (i = 0; i < nrepeats; i++) {
- memcpy((char *)(rptr + 1), (dptr + 2), (npts - 1)*sizeof(double));
- rptr[0] = dptr[0];
- npy_rfftb(npts, rptr, wsave);
- rptr += npts;
- dptr += npts*2;
- }
- NPY_SIGINT_OFF;
- Py_END_ALLOW_THREADS;
- PyArray_Free(op2, (char *)wsave);
- Py_DECREF(data);
- return (PyObject *)ret;
-
-fail:
- PyArray_Free(op2, (char *)wsave);
- Py_XDECREF(data);
- Py_XDECREF(ret);
- return NULL;
-}
-
-static const char fftpack_rffti__doc__[] = "";
-
-static PyObject *
-fftpack_rffti(PyObject *NPY_UNUSED(self), PyObject *args)
-{
- PyArrayObject *op;
- npy_intp dim;
- long n;
-
- if (!PyArg_ParseTuple(args, "l:rffti", &n)) {
- return NULL;
- }
- /*Magic size needed by npy_rffti*/
- dim = 2*n + 15;
- /*Create a 1 dimensional array of dimensions of type double*/
- op = (PyArrayObject *)PyArray_SimpleNew(1, &dim, NPY_DOUBLE);
- if (op == NULL) {
- return NULL;
- }
- Py_BEGIN_ALLOW_THREADS;
- NPY_SIGINT_ON;
- npy_rffti(n, (double *)PyArray_DATA((PyArrayObject*)op));
- NPY_SIGINT_OFF;
- Py_END_ALLOW_THREADS;
-
- return (PyObject *)op;
-}
-
-
-/* List of methods defined in the module */
-
-static struct PyMethodDef fftpack_methods[] = {
- {"cfftf", fftpack_cfftf, 1, fftpack_cfftf__doc__},
- {"cfftb", fftpack_cfftb, 1, fftpack_cfftb__doc__},
- {"cffti", fftpack_cffti, 1, fftpack_cffti__doc__},
- {"rfftf", fftpack_rfftf, 1, fftpack_rfftf__doc__},
- {"rfftb", fftpack_rfftb, 1, fftpack_rfftb__doc__},
- {"rffti", fftpack_rffti, 1, fftpack_rffti__doc__},
- {NULL, NULL, 0, NULL} /* sentinel */
-};
-
-#if PY_MAJOR_VERSION >= 3
-static struct PyModuleDef moduledef = {
- PyModuleDef_HEAD_INIT,
- "fftpack_lite",
- NULL,
- -1,
- fftpack_methods,
- NULL,
- NULL,
- NULL,
- NULL
-};
-#endif
-
-/* Initialization function for the module */
-#if PY_MAJOR_VERSION >= 3
-#define RETVAL(x) x
-PyMODINIT_FUNC PyInit_fftpack_lite(void)
-#else
-#define RETVAL(x)
-PyMODINIT_FUNC
-initfftpack_lite(void)
-#endif
-{
- PyObject *m,*d;
-#if PY_MAJOR_VERSION >= 3
- m = PyModule_Create(&moduledef);
-#else
- static const char fftpack_module_documentation[] = "";
-
- m = Py_InitModule4("fftpack_lite", fftpack_methods,
- fftpack_module_documentation,
- (PyObject*)NULL,PYTHON_API_VERSION);
-#endif
- if (m == NULL) {
- return RETVAL(NULL);
- }
-
- /* Import the array object */
- import_array();
-
- /* Add some symbolic constants to the module */
- d = PyModule_GetDict(m);
- ErrorObject = PyErr_NewException("fftpack.error", NULL, NULL);
- PyDict_SetItemString(d, "error", ErrorObject);
-
- /* XXXX Add constants here */
-
- return RETVAL(m);
-}
diff --git a/numpy/fft/helper.py b/numpy/fft/helper.py
index 729121f31..a920a4ac0 100644
--- a/numpy/fft/helper.py
+++ b/numpy/fft/helper.py
@@ -4,13 +4,9 @@ Discrete Fourier Transforms - helper.py
"""
from __future__ import division, absolute_import, print_function
-import collections
-try:
- import threading
-except ImportError:
- import dummy_threading as threading
from numpy.compat import integer_types
from numpy.core import integer, empty, arange, asarray, roll
+from numpy.core.overrides import array_function_dispatch, set_module
# Created by Pearu Peterson, September 2002
@@ -19,6 +15,11 @@ __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq']
integer_types = integer_types + (integer,)
+def _fftshift_dispatcher(x, axes=None):
+ return (x,)
+
+
+@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def fftshift(x, axes=None):
"""
Shift the zero-frequency component to the center of the spectrum.
@@ -46,7 +47,7 @@ def fftshift(x, axes=None):
--------
>>> freqs = np.fft.fftfreq(10, 0.1)
>>> freqs
- array([ 0., 1., 2., 3., 4., -5., -4., -3., -2., -1.])
+ array([ 0., 1., 2., ..., -3., -2., -1.])
>>> np.fft.fftshift(freqs)
array([-5., -4., -3., -2., -1., 0., 1., 2., 3., 4.])
@@ -75,6 +76,7 @@ def fftshift(x, axes=None):
return roll(x, shift, axes)
+@array_function_dispatch(_fftshift_dispatcher, module='numpy.fft')
def ifftshift(x, axes=None):
"""
The inverse of `fftshift`. Although identical for even-length `x`, the
@@ -121,6 +123,7 @@ def ifftshift(x, axes=None):
return roll(x, shift, axes)
+@set_module('numpy.fft')
def fftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies.
@@ -154,7 +157,7 @@ def fftfreq(n, d=1.0):
>>> timestep = 0.1
>>> freq = np.fft.fftfreq(n, d=timestep)
>>> freq
- array([ 0. , 1.25, 2.5 , 3.75, -5. , -3.75, -2.5 , -1.25])
+ array([ 0. , 1.25, 2.5 , ..., -3.75, -2.5 , -1.25])
"""
if not isinstance(n, integer_types):
@@ -167,9 +170,9 @@ def fftfreq(n, d=1.0):
p2 = arange(-(n//2), 0, dtype=int)
results[N:] = p2
return results * val
- #return hstack((arange(0,(n-1)/2 + 1), arange(-(n/2),0))) / (n*d)
+@set_module('numpy.fft')
def rfftfreq(n, d=1.0):
"""
Return the Discrete Fourier Transform sample frequencies
@@ -207,7 +210,7 @@ def rfftfreq(n, d=1.0):
>>> sample_rate = 100
>>> freq = np.fft.fftfreq(n, d=1./sample_rate)
>>> freq
- array([ 0., 10., 20., 30., 40., -50., -40., -30., -20., -10.])
+ array([ 0., 10., 20., ..., -30., -20., -10.])
>>> freq = np.fft.rfftfreq(n, d=1./sample_rate)
>>> freq
array([ 0., 10., 20., 30., 40., 50.])
@@ -219,99 +222,3 @@ def rfftfreq(n, d=1.0):
N = n//2 + 1
results = arange(0, N, dtype=int)
return results * val
-
-
-class _FFTCache(object):
- """
- Cache for the FFT twiddle factors as an LRU (least recently used) cache.
-
- Parameters
- ----------
- max_size_in_mb : int
- Maximum memory usage of the cache before items are being evicted.
- max_item_count : int
- Maximum item count of the cache before items are being evicted.
-
- Notes
- -----
- Items will be evicted if either limit has been reached upon getting and
- setting. The maximum memory usages is not strictly the given
- ``max_size_in_mb`` but rather
- ``max(max_size_in_mb, 1.5 * size_of_largest_item)``. Thus the cache will
- never be completely cleared - at least one item will remain and a single
- large item can cause the cache to retain several smaller items even if the
- given maximum cache size has been exceeded.
- """
- def __init__(self, max_size_in_mb, max_item_count):
- self._max_size_in_bytes = max_size_in_mb * 1024 ** 2
- self._max_item_count = max_item_count
- self._dict = collections.OrderedDict()
- self._lock = threading.Lock()
-
- def put_twiddle_factors(self, n, factors):
- """
- Store twiddle factors for an FFT of length n in the cache.
-
- Putting multiple twiddle factors for a certain n will store it multiple
- times.
-
- Parameters
- ----------
- n : int
- Data length for the FFT.
- factors : ndarray
- The actual twiddle values.
- """
- with self._lock:
- # Pop + later add to move it to the end for LRU behavior.
- # Internally everything is stored in a dictionary whose values are
- # lists.
- try:
- value = self._dict.pop(n)
- except KeyError:
- value = []
- value.append(factors)
- self._dict[n] = value
- self._prune_cache()
-
- def pop_twiddle_factors(self, n):
- """
- Pop twiddle factors for an FFT of length n from the cache.
-
- Will return None if the requested twiddle factors are not available in
- the cache.
-
- Parameters
- ----------
- n : int
- Data length for the FFT.
-
- Returns
- -------
- out : ndarray or None
- The retrieved twiddle factors if available, else None.
- """
- with self._lock:
- if n not in self._dict or not self._dict[n]:
- return None
- # Pop + later add to move it to the end for LRU behavior.
- all_values = self._dict.pop(n)
- value = all_values.pop()
- # Only put pack if there are still some arrays left in the list.
- if all_values:
- self._dict[n] = all_values
- return value
-
- def _prune_cache(self):
- # Always keep at least one item.
- while len(self._dict) > 1 and (
- len(self._dict) > self._max_item_count or self._check_size()):
- self._dict.popitem(last=False)
-
- def _check_size(self):
- item_sizes = [sum(_j.nbytes for _j in _i)
- for _i in self._dict.values() if _i]
- if not item_sizes:
- return False
- max_size = max(self._max_size_in_bytes, 1.5 * max(item_sizes))
- return sum(item_sizes) > max_size
diff --git a/numpy/fft/info.py b/numpy/fft/info.py
deleted file mode 100644
index cb6526b44..000000000
--- a/numpy/fft/info.py
+++ /dev/null
@@ -1,187 +0,0 @@
-"""
-Discrete Fourier Transform (:mod:`numpy.fft`)
-=============================================
-
-.. currentmodule:: numpy.fft
-
-Standard FFTs
--------------
-
-.. autosummary::
- :toctree: generated/
-
- fft Discrete Fourier transform.
- ifft Inverse discrete Fourier transform.
- fft2 Discrete Fourier transform in two dimensions.
- ifft2 Inverse discrete Fourier transform in two dimensions.
- fftn Discrete Fourier transform in N-dimensions.
- ifftn Inverse discrete Fourier transform in N dimensions.
-
-Real FFTs
----------
-
-.. autosummary::
- :toctree: generated/
-
- rfft Real discrete Fourier transform.
- irfft Inverse real discrete Fourier transform.
- rfft2 Real discrete Fourier transform in two dimensions.
- irfft2 Inverse real discrete Fourier transform in two dimensions.
- rfftn Real discrete Fourier transform in N dimensions.
- irfftn Inverse real discrete Fourier transform in N dimensions.
-
-Hermitian FFTs
---------------
-
-.. autosummary::
- :toctree: generated/
-
- hfft Hermitian discrete Fourier transform.
- ihfft Inverse Hermitian discrete Fourier transform.
-
-Helper routines
----------------
-
-.. autosummary::
- :toctree: generated/
-
- fftfreq Discrete Fourier Transform sample frequencies.
- rfftfreq DFT sample frequencies (for usage with rfft, irfft).
- fftshift Shift zero-frequency component to center of spectrum.
- ifftshift Inverse of fftshift.
-
-
-Background information
-----------------------
-
-Fourier analysis is fundamentally a method for expressing a function as a
-sum of periodic components, and for recovering the function from those
-components. When both the function and its Fourier transform are
-replaced with discretized counterparts, it is called the discrete Fourier
-transform (DFT). The DFT has become a mainstay of numerical computing in
-part because of a very fast algorithm for computing it, called the Fast
-Fourier Transform (FFT), which was known to Gauss (1805) and was brought
-to light in its current form by Cooley and Tukey [CT]_. Press et al. [NR]_
-provide an accessible introduction to Fourier analysis and its
-applications.
-
-Because the discrete Fourier transform separates its input into
-components that contribute at discrete frequencies, it has a great number
-of applications in digital signal processing, e.g., for filtering, and in
-this context the discretized input to the transform is customarily
-referred to as a *signal*, which exists in the *time domain*. The output
-is called a *spectrum* or *transform* and exists in the *frequency
-domain*.
-
-Implementation details
-----------------------
-
-There are many ways to define the DFT, varying in the sign of the
-exponent, normalization, etc. In this implementation, the DFT is defined
-as
-
-.. math::
- A_k = \\sum_{m=0}^{n-1} a_m \\exp\\left\\{-2\\pi i{mk \\over n}\\right\\}
- \\qquad k = 0,\\ldots,n-1.
-
-The DFT is in general defined for complex inputs and outputs, and a
-single-frequency component at linear frequency :math:`f` is
-represented by a complex exponential
-:math:`a_m = \\exp\\{2\\pi i\\,f m\\Delta t\\}`, where :math:`\\Delta t`
-is the sampling interval.
-
-The values in the result follow so-called "standard" order: If ``A =
-fft(a, n)``, then ``A[0]`` contains the zero-frequency term (the sum of
-the signal), which is always purely real for real inputs. Then ``A[1:n/2]``
-contains the positive-frequency terms, and ``A[n/2+1:]`` contains the
-negative-frequency terms, in order of decreasingly negative frequency.
-For an even number of input points, ``A[n/2]`` represents both positive and
-negative Nyquist frequency, and is also purely real for real input. For
-an odd number of input points, ``A[(n-1)/2]`` contains the largest positive
-frequency, while ``A[(n+1)/2]`` contains the largest negative frequency.
-The routine ``np.fft.fftfreq(n)`` returns an array giving the frequencies
-of corresponding elements in the output. The routine
-``np.fft.fftshift(A)`` shifts transforms and their frequencies to put the
-zero-frequency components in the middle, and ``np.fft.ifftshift(A)`` undoes
-that shift.
-
-When the input `a` is a time-domain signal and ``A = fft(a)``, ``np.abs(A)``
-is its amplitude spectrum and ``np.abs(A)**2`` is its power spectrum.
-The phase spectrum is obtained by ``np.angle(A)``.
-
-The inverse DFT is defined as
-
-.. math::
- a_m = \\frac{1}{n}\\sum_{k=0}^{n-1}A_k\\exp\\left\\{2\\pi i{mk\\over n}\\right\\}
- \\qquad m = 0,\\ldots,n-1.
-
-It differs from the forward transform by the sign of the exponential
-argument and the default normalization by :math:`1/n`.
-
-Normalization
--------------
-The default normalization has the direct transforms unscaled and the inverse
-transforms are scaled by :math:`1/n`. It is possible to obtain unitary
-transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
-`None`) so that both direct and inverse transforms will be scaled by
-:math:`1/\\sqrt{n}`.
-
-Real and Hermitian transforms
------------------------------
-
-When the input is purely real, its transform is Hermitian, i.e., the
-component at frequency :math:`f_k` is the complex conjugate of the
-component at frequency :math:`-f_k`, which means that for real
-inputs there is no information in the negative frequency components that
-is not already available from the positive frequency components.
-The family of `rfft` functions is
-designed to operate on real inputs, and exploits this symmetry by
-computing only the positive frequency components, up to and including the
-Nyquist frequency. Thus, ``n`` input points produce ``n/2+1`` complex
-output points. The inverses of this family assumes the same symmetry of
-its input, and for an output of ``n`` points uses ``n/2+1`` input points.
-
-Correspondingly, when the spectrum is purely real, the signal is
-Hermitian. The `hfft` family of functions exploits this symmetry by
-using ``n/2+1`` complex points in the input (time) domain for ``n`` real
-points in the frequency domain.
-
-In higher dimensions, FFTs are used, e.g., for image analysis and
-filtering. The computational efficiency of the FFT means that it can
-also be a faster way to compute large convolutions, using the property
-that a convolution in the time domain is equivalent to a point-by-point
-multiplication in the frequency domain.
-
-Higher dimensions
------------------
-
-In two dimensions, the DFT is defined as
-
-.. math::
- A_{kl} = \\sum_{m=0}^{M-1} \\sum_{n=0}^{N-1}
- a_{mn}\\exp\\left\\{-2\\pi i \\left({mk\\over M}+{nl\\over N}\\right)\\right\\}
- \\qquad k = 0, \\ldots, M-1;\\quad l = 0, \\ldots, N-1,
-
-which extends in the obvious way to higher dimensions, and the inverses
-in higher dimensions also extend in the same way.
-
-References
-----------
-
-.. [CT] Cooley, James W., and John W. Tukey, 1965, "An algorithm for the
- machine calculation of complex Fourier series," *Math. Comput.*
- 19: 297-301.
-
-.. [NR] Press, W., Teukolsky, S., Vetterline, W.T., and Flannery, B.P.,
- 2007, *Numerical Recipes: The Art of Scientific Computing*, ch.
- 12-13. Cambridge Univ. Press, Cambridge, UK.
-
-Examples
---------
-
-For examples, see the various functions.
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core']
diff --git a/numpy/fft/setup.py b/numpy/fft/setup.py
index cd99a82d7..8c3a31557 100644
--- a/numpy/fft/setup.py
+++ b/numpy/fft/setup.py
@@ -7,9 +7,9 @@ def configuration(parent_package='',top_path=None):
config.add_data_dir('tests')
- # Configure fftpack_lite
- config.add_extension('fftpack_lite',
- sources=['fftpack_litemodule.c', 'fftpack.c']
+ # Configure pocketfft_internal
+ config.add_extension('_pocketfft_internal',
+ sources=['_pocketfft.c']
)
return config
diff --git a/numpy/fft/tests/test_fftpack.py b/numpy/fft/tests/test_fftpack.py
deleted file mode 100644
index 8d6cd8407..000000000
--- a/numpy/fft/tests/test_fftpack.py
+++ /dev/null
@@ -1,185 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-import numpy as np
-from numpy.random import random
-from numpy.testing import (
- assert_array_almost_equal, assert_array_equal, assert_raises,
- )
-import threading
-import sys
-if sys.version_info[0] >= 3:
- import queue
-else:
- import Queue as queue
-
-
-def fft1(x):
- L = len(x)
- phase = -2j*np.pi*(np.arange(L)/float(L))
- phase = np.arange(L).reshape(-1, 1) * phase
- return np.sum(x*np.exp(phase), axis=1)
-
-
-class TestFFTShift(object):
-
- def test_fft_n(self):
- assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
-
-
-class TestFFT1D(object):
-
- def test_fft(self):
- x = random(30) + 1j*random(30)
- assert_array_almost_equal(fft1(x), np.fft.fft(x))
- assert_array_almost_equal(fft1(x) / np.sqrt(30),
- np.fft.fft(x, norm="ortho"))
-
- def test_ifft(self):
- x = random(30) + 1j*random(30)
- assert_array_almost_equal(x, np.fft.ifft(np.fft.fft(x)))
- assert_array_almost_equal(
- x, np.fft.ifft(np.fft.fft(x, norm="ortho"), norm="ortho"))
-
- def test_fft2(self):
- x = random((30, 20)) + 1j*random((30, 20))
- assert_array_almost_equal(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
- np.fft.fft2(x))
- assert_array_almost_equal(np.fft.fft2(x) / np.sqrt(30 * 20),
- np.fft.fft2(x, norm="ortho"))
-
- def test_ifft2(self):
- x = random((30, 20)) + 1j*random((30, 20))
- assert_array_almost_equal(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
- np.fft.ifft2(x))
- assert_array_almost_equal(np.fft.ifft2(x) * np.sqrt(30 * 20),
- np.fft.ifft2(x, norm="ortho"))
-
- def test_fftn(self):
- x = random((30, 20, 10)) + 1j*random((30, 20, 10))
- assert_array_almost_equal(
- np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
- np.fft.fftn(x))
- assert_array_almost_equal(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
- np.fft.fftn(x, norm="ortho"))
-
- def test_ifftn(self):
- x = random((30, 20, 10)) + 1j*random((30, 20, 10))
- assert_array_almost_equal(
- np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
- np.fft.ifftn(x))
- assert_array_almost_equal(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
- np.fft.ifftn(x, norm="ortho"))
-
- def test_rfft(self):
- x = random(30)
- for n in [x.size, 2*x.size]:
- for norm in [None, 'ortho']:
- assert_array_almost_equal(
- np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
- np.fft.rfft(x, n=n, norm=norm))
- assert_array_almost_equal(np.fft.rfft(x, n=n) / np.sqrt(n),
- np.fft.rfft(x, n=n, norm="ortho"))
-
- def test_irfft(self):
- x = random(30)
- assert_array_almost_equal(x, np.fft.irfft(np.fft.rfft(x)))
- assert_array_almost_equal(
- x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"))
-
- def test_rfft2(self):
- x = random((30, 20))
- assert_array_almost_equal(np.fft.fft2(x)[:, :11], np.fft.rfft2(x))
- assert_array_almost_equal(np.fft.rfft2(x) / np.sqrt(30 * 20),
- np.fft.rfft2(x, norm="ortho"))
-
- def test_irfft2(self):
- x = random((30, 20))
- assert_array_almost_equal(x, np.fft.irfft2(np.fft.rfft2(x)))
- assert_array_almost_equal(
- x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"))
-
- def test_rfftn(self):
- x = random((30, 20, 10))
- assert_array_almost_equal(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x))
- assert_array_almost_equal(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
- np.fft.rfftn(x, norm="ortho"))
-
- def test_irfftn(self):
- x = random((30, 20, 10))
- assert_array_almost_equal(x, np.fft.irfftn(np.fft.rfftn(x)))
- assert_array_almost_equal(
- x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"))
-
- def test_hfft(self):
- x = random(14) + 1j*random(14)
- x_herm = np.concatenate((random(1), x, random(1)))
- x = np.concatenate((x_herm, x[::-1].conj()))
- assert_array_almost_equal(np.fft.fft(x), np.fft.hfft(x_herm))
- assert_array_almost_equal(np.fft.hfft(x_herm) / np.sqrt(30),
- np.fft.hfft(x_herm, norm="ortho"))
-
- def test_ihttf(self):
- x = random(14) + 1j*random(14)
- x_herm = np.concatenate((random(1), x, random(1)))
- x = np.concatenate((x_herm, x[::-1].conj()))
- assert_array_almost_equal(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)))
- assert_array_almost_equal(
- x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
- norm="ortho"))
-
- def test_all_1d_norm_preserving(self):
- # verify that round-trip transforms are norm-preserving
- x = random(30)
- x_norm = np.linalg.norm(x)
- n = x.size * 2
- func_pairs = [(np.fft.fft, np.fft.ifft),
- (np.fft.rfft, np.fft.irfft),
- # hfft: order so the first function takes x.size samples
- # (necessary for comparison to x_norm above)
- (np.fft.ihfft, np.fft.hfft),
- ]
- for forw, back in func_pairs:
- for n in [x.size, 2*x.size]:
- for norm in [None, 'ortho']:
- tmp = forw(x, n=n, norm=norm)
- tmp = back(tmp, n=n, norm=norm)
- assert_array_almost_equal(x_norm,
- np.linalg.norm(tmp))
-
-class TestFFTThreadSafe(object):
- threads = 16
- input_shape = (800, 200)
-
- def _test_mtsame(self, func, *args):
- def worker(args, q):
- q.put(func(*args))
-
- q = queue.Queue()
- expected = func(*args)
-
- # Spin off a bunch of threads to call the same function simultaneously
- t = [threading.Thread(target=worker, args=(args, q))
- for i in range(self.threads)]
- [x.start() for x in t]
-
- [x.join() for x in t]
- # Make sure all threads returned the correct value
- for i in range(self.threads):
- assert_array_equal(q.get(timeout=5), expected,
- 'Function returned wrong value in multithreaded context')
-
- def test_fft(self):
- a = np.ones(self.input_shape) * 1+0j
- self._test_mtsame(np.fft.fft, a)
-
- def test_ifft(self):
- a = np.ones(self.input_shape) * 1+0j
- self._test_mtsame(np.fft.ifft, a)
-
- def test_rfft(self):
- a = np.ones(self.input_shape)
- self._test_mtsame(np.fft.rfft, a)
-
- def test_irfft(self):
- a = np.ones(self.input_shape) * 1+0j
- self._test_mtsame(np.fft.irfft, a)
diff --git a/numpy/fft/tests/test_helper.py b/numpy/fft/tests/test_helper.py
index 8d315fa02..6613c8002 100644
--- a/numpy/fft/tests/test_helper.py
+++ b/numpy/fft/tests/test_helper.py
@@ -7,7 +7,6 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
from numpy import fft, pi
-from numpy.fft.helper import _FFTCache
class TestFFTShift(object):
@@ -168,81 +167,3 @@ class TestIRFFTN(object):
# Should not raise error
fft.irfftn(a, axes=axes)
-
-
-class TestFFTCache(object):
-
- def test_basic_behaviour(self):
- c = _FFTCache(max_size_in_mb=1, max_item_count=4)
-
- # Put
- c.put_twiddle_factors(1, np.ones(2, dtype=np.float32))
- c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32))
-
- # Get
- assert_array_almost_equal(c.pop_twiddle_factors(1),
- np.ones(2, dtype=np.float32))
- assert_array_almost_equal(c.pop_twiddle_factors(2),
- np.zeros(2, dtype=np.float32))
-
- # Nothing should be left.
- assert_equal(len(c._dict), 0)
-
- # Now put everything in twice so it can be retrieved once and each will
- # still have one item left.
- for _ in range(2):
- c.put_twiddle_factors(1, np.ones(2, dtype=np.float32))
- c.put_twiddle_factors(2, np.zeros(2, dtype=np.float32))
- assert_array_almost_equal(c.pop_twiddle_factors(1),
- np.ones(2, dtype=np.float32))
- assert_array_almost_equal(c.pop_twiddle_factors(2),
- np.zeros(2, dtype=np.float32))
- assert_equal(len(c._dict), 2)
-
- def test_automatic_pruning(self):
- # That's around 2600 single precision samples.
- c = _FFTCache(max_size_in_mb=0.01, max_item_count=4)
-
- c.put_twiddle_factors(1, np.ones(200, dtype=np.float32))
- c.put_twiddle_factors(2, np.ones(200, dtype=np.float32))
- assert_equal(list(c._dict.keys()), [1, 2])
-
- # This is larger than the limit but should still be kept.
- c.put_twiddle_factors(3, np.ones(3000, dtype=np.float32))
- assert_equal(list(c._dict.keys()), [1, 2, 3])
- # Add one more.
- c.put_twiddle_factors(4, np.ones(3000, dtype=np.float32))
- # The other three should no longer exist.
- assert_equal(list(c._dict.keys()), [4])
-
- # Now test the max item count pruning.
- c = _FFTCache(max_size_in_mb=0.01, max_item_count=2)
- c.put_twiddle_factors(2, np.empty(2))
- c.put_twiddle_factors(1, np.empty(2))
- # Can still be accessed.
- assert_equal(list(c._dict.keys()), [2, 1])
-
- c.put_twiddle_factors(3, np.empty(2))
- # 1 and 3 can still be accessed - c[2] has been touched least recently
- # and is thus evicted.
- assert_equal(list(c._dict.keys()), [1, 3])
-
- # One last test. We will add a single large item that is slightly
- # bigger then the cache size. Some small items can still be added.
- c = _FFTCache(max_size_in_mb=0.01, max_item_count=5)
- c.put_twiddle_factors(1, np.ones(3000, dtype=np.float32))
- c.put_twiddle_factors(2, np.ones(2, dtype=np.float32))
- c.put_twiddle_factors(3, np.ones(2, dtype=np.float32))
- c.put_twiddle_factors(4, np.ones(2, dtype=np.float32))
- assert_equal(list(c._dict.keys()), [1, 2, 3, 4])
-
- # One more big item. This time it is 6 smaller ones but they are
- # counted as one big item.
- for _ in range(6):
- c.put_twiddle_factors(5, np.ones(500, dtype=np.float32))
- # '1' no longer in the cache. Rest still in the cache.
- assert_equal(list(c._dict.keys()), [2, 3, 4, 5])
-
- # Another big item - should now be the only item in the cache.
- c.put_twiddle_factors(6, np.ones(4000, dtype=np.float32))
- assert_equal(list(c._dict.keys()), [6])
diff --git a/numpy/fft/tests/test_pocketfft.py b/numpy/fft/tests/test_pocketfft.py
new file mode 100644
index 000000000..453e964fa
--- /dev/null
+++ b/numpy/fft/tests/test_pocketfft.py
@@ -0,0 +1,261 @@
+from __future__ import division, absolute_import, print_function
+
+import numpy as np
+import pytest
+from numpy.random import random
+from numpy.testing import (
+ assert_array_equal, assert_raises, assert_allclose
+ )
+import threading
+import sys
+if sys.version_info[0] >= 3:
+ import queue
+else:
+ import Queue as queue
+
+
+def fft1(x):
+ L = len(x)
+ phase = -2j*np.pi*(np.arange(L)/float(L))
+ phase = np.arange(L).reshape(-1, 1) * phase
+ return np.sum(x*np.exp(phase), axis=1)
+
+
+class TestFFTShift(object):
+
+ def test_fft_n(self):
+ assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
+
+
+class TestFFT1D(object):
+
+ def test_identity(self):
+ maxlen = 512
+ x = random(maxlen) + 1j*random(maxlen)
+ xr = random(maxlen)
+ for i in range(1,maxlen):
+ assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
+ atol=1e-12)
+ assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]),i),
+ xr[0:i], atol=1e-12)
+
+ def test_fft(self):
+ x = random(30) + 1j*random(30)
+ assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
+ assert_allclose(fft1(x) / np.sqrt(30),
+ np.fft.fft(x, norm="ortho"), atol=1e-6)
+
+ @pytest.mark.parametrize('norm', (None, 'ortho'))
+ def test_ifft(self, norm):
+ x = random(30) + 1j*random(30)
+ assert_allclose(
+ x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
+ atol=1e-6)
+ # Ensure we get the correct error message
+ with pytest.raises(ValueError,
+ match='Invalid number of FFT data points'):
+ np.fft.ifft([], norm=norm)
+
+ def test_fft2(self):
+ x = random((30, 20)) + 1j*random((30, 20))
+ assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
+ np.fft.fft2(x), atol=1e-6)
+ assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
+ np.fft.fft2(x, norm="ortho"), atol=1e-6)
+
+ def test_ifft2(self):
+ x = random((30, 20)) + 1j*random((30, 20))
+ assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
+ np.fft.ifft2(x), atol=1e-6)
+ assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
+ np.fft.ifft2(x, norm="ortho"), atol=1e-6)
+
+ def test_fftn(self):
+ x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+ assert_allclose(
+ np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
+ np.fft.fftn(x), atol=1e-6)
+ assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
+ np.fft.fftn(x, norm="ortho"), atol=1e-6)
+
+ def test_ifftn(self):
+ x = random((30, 20, 10)) + 1j*random((30, 20, 10))
+ assert_allclose(
+ np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
+ np.fft.ifftn(x), atol=1e-6)
+ assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
+ np.fft.ifftn(x, norm="ortho"), atol=1e-6)
+
+ def test_rfft(self):
+ x = random(30)
+ for n in [x.size, 2*x.size]:
+ for norm in [None, 'ortho']:
+ assert_allclose(
+ np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
+ np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
+ assert_allclose(
+ np.fft.rfft(x, n=n) / np.sqrt(n),
+ np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
+
+ def test_irfft(self):
+ x = random(30)
+ assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
+ assert_allclose(
+ x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"), atol=1e-6)
+
+ def test_rfft2(self):
+ x = random((30, 20))
+ assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
+ assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
+ np.fft.rfft2(x, norm="ortho"), atol=1e-6)
+
+ def test_irfft2(self):
+ x = random((30, 20))
+ assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
+ assert_allclose(
+ x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"), atol=1e-6)
+
+ def test_rfftn(self):
+ x = random((30, 20, 10))
+ assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
+ assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
+ np.fft.rfftn(x, norm="ortho"), atol=1e-6)
+
+ def test_irfftn(self):
+ x = random((30, 20, 10))
+ assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
+ assert_allclose(
+ x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"), atol=1e-6)
+
+ def test_hfft(self):
+ x = random(14) + 1j*random(14)
+ x_herm = np.concatenate((random(1), x, random(1)))
+ x = np.concatenate((x_herm, x[::-1].conj()))
+ assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
+ assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
+ np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
+
+ def test_ihttf(self):
+ x = random(14) + 1j*random(14)
+ x_herm = np.concatenate((random(1), x, random(1)))
+ x = np.concatenate((x_herm, x[::-1].conj()))
+ assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
+ assert_allclose(
+ x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
+ norm="ortho"), atol=1e-6)
+
+ @pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
+ np.fft.rfftn, np.fft.irfftn])
+ def test_axes(self, op):
+ x = random((30, 20, 10))
+ axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
+ for a in axes:
+ op_tr = op(np.transpose(x, a))
+ tr_op = np.transpose(op(x, axes=a), a)
+ assert_allclose(op_tr, tr_op, atol=1e-6)
+
+ def test_all_1d_norm_preserving(self):
+ # verify that round-trip transforms are norm-preserving
+ x = random(30)
+ x_norm = np.linalg.norm(x)
+ n = x.size * 2
+ func_pairs = [(np.fft.fft, np.fft.ifft),
+ (np.fft.rfft, np.fft.irfft),
+ # hfft: order so the first function takes x.size samples
+ # (necessary for comparison to x_norm above)
+ (np.fft.ihfft, np.fft.hfft),
+ ]
+ for forw, back in func_pairs:
+ for n in [x.size, 2*x.size]:
+ for norm in [None, 'ortho']:
+ tmp = forw(x, n=n, norm=norm)
+ tmp = back(tmp, n=n, norm=norm)
+ assert_allclose(x_norm,
+ np.linalg.norm(tmp), atol=1e-6)
+
+ @pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
+ np.longdouble])
+ def test_dtypes(self, dtype):
+ # make sure that all input precisions are accepted and internally
+ # converted to 64bit
+ x = random(30).astype(dtype)
+ assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
+ assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
+
+
+@pytest.mark.parametrize(
+ "dtype",
+ [np.float32, np.float64, np.complex64, np.complex128])
+@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
+@pytest.mark.parametrize(
+ "fft",
+ [np.fft.fft, np.fft.fft2, np.fft.fftn,
+ np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
+def test_fft_with_order(dtype, order, fft):
+ # Check that FFT/IFFT produces identical results for C, Fortran and
+ # non contiguous arrays
+ rng = np.random.RandomState(42)
+ X = rng.rand(8, 7, 13).astype(dtype, copy=False)
+ # See discussion in pull/14178
+ _tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
+ if order == 'F':
+ Y = np.asfortranarray(X)
+ else:
+ # Make a non contiguous array
+ Y = X[::-1]
+ X = np.ascontiguousarray(X[::-1])
+
+ if fft.__name__.endswith('fft'):
+ for axis in range(3):
+ X_res = fft(X, axis=axis)
+ Y_res = fft(Y, axis=axis)
+ assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
+ elif fft.__name__.endswith(('fft2', 'fftn')):
+ axes = [(0, 1), (1, 2), (0, 2)]
+ if fft.__name__.endswith('fftn'):
+ axes.extend([(0,), (1,), (2,), None])
+ for ax in axes:
+ X_res = fft(X, axes=ax)
+ Y_res = fft(Y, axes=ax)
+ assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
+ else:
+ raise ValueError()
+
+
+class TestFFTThreadSafe(object):
+ threads = 16
+ input_shape = (800, 200)
+
+ def _test_mtsame(self, func, *args):
+ def worker(args, q):
+ q.put(func(*args))
+
+ q = queue.Queue()
+ expected = func(*args)
+
+ # Spin off a bunch of threads to call the same function simultaneously
+ t = [threading.Thread(target=worker, args=(args, q))
+ for i in range(self.threads)]
+ [x.start() for x in t]
+
+ [x.join() for x in t]
+ # Make sure all threads returned the correct value
+ for i in range(self.threads):
+ assert_array_equal(q.get(timeout=5), expected,
+ 'Function returned wrong value in multithreaded context')
+
+ def test_fft(self):
+ a = np.ones(self.input_shape) * 1+0j
+ self._test_mtsame(np.fft.fft, a)
+
+ def test_ifft(self):
+ a = np.ones(self.input_shape) * 1+0j
+ self._test_mtsame(np.fft.ifft, a)
+
+ def test_rfft(self):
+ a = np.ones(self.input_shape)
+ self._test_mtsame(np.fft.rfft, a)
+
+ def test_irfft(self):
+ a = np.ones(self.input_shape) * 1+0j
+ self._test_mtsame(np.fft.irfft, a)
diff --git a/numpy/lib/__init__.py b/numpy/lib/__init__.py
index c1757150e..2db12d9a4 100644
--- a/numpy/lib/__init__.py
+++ b/numpy/lib/__init__.py
@@ -1,14 +1,31 @@
+"""
+**Note:** almost all functions in the ``numpy.lib`` namespace
+are also present in the main ``numpy`` namespace. Please use the
+functions as ``np.<funcname>`` where possible.
+
+``numpy.lib`` is mostly a space for implementing functions that don't
+belong in core or in another NumPy submodule with a clear purpose
+(e.g. ``random``, ``fft``, ``linalg``, ``ma``).
+
+Most contains basic functions that are used by several submodules and are
+useful to have in the main name-space.
+
+"""
from __future__ import division, absolute_import, print_function
import math
-from .info import __doc__
from numpy.version import version as __version__
+# Public submodules
+# Note: recfunctions and (maybe) format are public too, but not imported
+from . import mixins
+from . import scimath as emath
+
+# Private submodules
from .type_check import *
from .index_tricks import *
from .function_base import *
-from .mixins import *
from .nanfunctions import *
from .shape_base import *
from .stride_tricks import *
@@ -16,9 +33,7 @@ from .twodim_base import *
from .ufunclike import *
from .histograms import *
-from . import scimath as emath
from .polynomial import *
-#import convertcode
from .utils import *
from .arraysetops import *
from .npyio import *
@@ -28,11 +43,10 @@ from .arraypad import *
from ._version import *
from numpy.core._multiarray_umath import tracemalloc_domain
-__all__ = ['emath', 'math', 'tracemalloc_domain']
+__all__ = ['emath', 'math', 'tracemalloc_domain', 'Arrayterator']
__all__ += type_check.__all__
__all__ += index_tricks.__all__
__all__ += function_base.__all__
-__all__ += mixins.__all__
__all__ += shape_base.__all__
__all__ += stride_tricks.__all__
__all__ += twodim_base.__all__
diff --git a/numpy/lib/_datasource.py b/numpy/lib/_datasource.py
index ab00b1444..0d71375c2 100644
--- a/numpy/lib/_datasource.py
+++ b/numpy/lib/_datasource.py
@@ -20,17 +20,18 @@ gzip, bz2 and xz are supported.
Example::
>>> # Create a DataSource, use os.curdir (default) for local storage.
- >>> ds = datasource.DataSource()
+ >>> from numpy import DataSource
+ >>> ds = DataSource()
>>>
>>> # Open a remote file.
>>> # DataSource downloads the file, stores it locally in:
>>> # './www.google.com/index.html'
>>> # opens the file and returns a file object.
- >>> fp = ds.open('http://www.google.com/index.html')
+ >>> fp = ds.open('http://www.google.com/') # doctest: +SKIP
>>>
>>> # Use the file as you normally would
- >>> fp.read()
- >>> fp.close()
+ >>> fp.read() # doctest: +SKIP
+ >>> fp.close() # doctest: +SKIP
"""
from __future__ import division, absolute_import, print_function
@@ -40,9 +41,14 @@ import sys
import warnings
import shutil
import io
+from contextlib import closing
+
+from numpy.core.overrides import set_module
+
_open = open
+
def _check_mode(mode, encoding, newline):
"""Check mode and that encoding and newline are compatible.
@@ -152,6 +158,7 @@ class _FileOpeners(object):
Examples
--------
+ >>> import gzip
>>> np.lib._datasource._file_openers.keys()
[None, '.bz2', '.gz', '.xz', '.lzma']
>>> np.lib._datasource._file_openers['.gz'] is gzip.open
@@ -262,7 +269,8 @@ def open(path, mode='r', destpath=os.curdir, encoding=None, newline=None):
return ds.open(path, mode, encoding=encoding, newline=newline)
-class DataSource (object):
+@set_module('numpy')
+class DataSource(object):
"""
DataSource(destpath='.')
@@ -285,7 +293,7 @@ class DataSource (object):
URLs require a scheme string (``http://``) to be used, without it they
will fail::
- >>> repos = DataSource()
+ >>> repos = np.DataSource()
>>> repos.exists('www.google.com/index.html')
False
>>> repos.exists('http://www.google.com/index.html')
@@ -297,17 +305,17 @@ class DataSource (object):
--------
::
- >>> ds = DataSource('/home/guido')
- >>> urlname = 'http://www.google.com/index.html'
- >>> gfile = ds.open('http://www.google.com/index.html') # remote file
+ >>> ds = np.DataSource('/home/guido')
+ >>> urlname = 'http://www.google.com/'
+ >>> gfile = ds.open('http://www.google.com/')
>>> ds.abspath(urlname)
- '/home/guido/www.google.com/site/index.html'
+ '/home/guido/www.google.com/index.html'
- >>> ds = DataSource(None) # use with temporary file
+ >>> ds = np.DataSource(None) # use with temporary file
>>> ds.open('/home/guido/foobar.txt')
<open file '/home/guido.foobar.txt', mode 'r' at 0x91d4430>
>>> ds.abspath('/home/guido/foobar.txt')
- '/tmp/tmpy4pgsP/home/guido/foobar.txt'
+ '/tmp/.../home/guido/foobar.txt'
"""
@@ -323,7 +331,7 @@ class DataSource (object):
def __del__(self):
# Remove temp directories
- if self._istmpdest:
+ if hasattr(self, '_istmpdest') and self._istmpdest:
shutil.rmtree(self._destpath)
def _iszip(self, filename):
@@ -407,13 +415,9 @@ class DataSource (object):
# TODO: Doesn't handle compressed files!
if self._isurl(path):
try:
- openedurl = urlopen(path)
- f = _open(upath, 'wb')
- try:
- shutil.copyfileobj(openedurl, f)
- finally:
- f.close()
- openedurl.close()
+ with closing(urlopen(path)) as openedurl:
+ with _open(upath, 'wb') as f:
+ shutil.copyfileobj(openedurl, f)
except URLError:
raise URLError("URL not found: %s" % path)
else:
@@ -540,6 +544,11 @@ class DataSource (object):
is accessible if it exists in either location.
"""
+
+ # First test for local path
+ if os.path.exists(path):
+ return True
+
# We import this here because importing urllib2 is slow and
# a significant fraction of numpy's total import time.
if sys.version_info[0] >= 3:
@@ -549,10 +558,6 @@ class DataSource (object):
from urllib2 import urlopen
from urllib2 import URLError
- # Test local path
- if os.path.exists(path):
- return True
-
# Test cached url
upath = self.abspath(path)
if os.path.exists(upath):
diff --git a/numpy/lib/_iotools.py b/numpy/lib/_iotools.py
index b604b8c52..c392929fd 100644
--- a/numpy/lib/_iotools.py
+++ b/numpy/lib/_iotools.py
@@ -8,7 +8,7 @@ __docformat__ = "restructuredtext en"
import sys
import numpy as np
import numpy.core.numeric as nx
-from numpy.compat import asbytes, asunicode, bytes, asbytes_nested, basestring
+from numpy.compat import asbytes, asunicode, bytes, basestring
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
@@ -121,7 +121,7 @@ def has_nested_fields(ndtype):
"""
for name in ndtype.names or ():
- if ndtype[name].names:
+ if ndtype[name].names is not None:
return True
return False
@@ -146,11 +146,17 @@ def flatten_dtype(ndtype, flatten_base=False):
>>> dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
... ('block', int, (2, 3))])
>>> np.lib._iotools.flatten_dtype(dt)
- [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32')]
+ [dtype('S4'), dtype('float64'), dtype('float64'), dtype('int64')]
>>> np.lib._iotools.flatten_dtype(dt, flatten_base=True)
- [dtype('|S4'), dtype('float64'), dtype('float64'), dtype('int32'),
- dtype('int32'), dtype('int32'), dtype('int32'), dtype('int32'),
- dtype('int32')]
+ [dtype('S4'),
+ dtype('float64'),
+ dtype('float64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64'),
+ dtype('int64')]
"""
names = ndtype.names
@@ -309,13 +315,13 @@ class NameValidator(object):
--------
>>> validator = np.lib._iotools.NameValidator()
>>> validator(['file', 'field2', 'with space', 'CaSe'])
- ['file_', 'field2', 'with_space', 'CaSe']
+ ('file_', 'field2', 'with_space', 'CaSe')
>>> validator = np.lib._iotools.NameValidator(excludelist=['excl'],
- deletechars='q',
- case_sensitive='False')
+ ... deletechars='q',
+ ... case_sensitive=False)
>>> validator(['excl', 'field2', 'no_q', 'with space', 'CaSe'])
- ['excl_', 'field2', 'no_', 'with_space', 'case']
+ ('EXCL', 'FIELD2', 'NO_Q', 'WITH_SPACE', 'CASE')
"""
#
@@ -599,7 +605,7 @@ class StringConverter(object):
--------
>>> import dateutil.parser
>>> import datetime
- >>> dateparser = datetustil.parser.parse
+ >>> dateparser = dateutil.parser.parse
>>> defaultdate = datetime.date(2000, 1, 1)
>>> StringConverter.upgrade_mapper(dateparser, default=defaultdate)
"""
@@ -693,7 +699,7 @@ class StringConverter(object):
self.func = lambda x: int(float(x))
# Store the list of strings corresponding to missing values.
if missing_values is None:
- self.missing_values = set([''])
+ self.missing_values = {''}
else:
if isinstance(missing_values, basestring):
missing_values = missing_values.split(",")
@@ -925,28 +931,27 @@ def easy_dtype(ndtype, names=None, defaultfmt="f%i", **validationargs):
names = validate(names, nbfields=nbfields, defaultfmt=defaultfmt)
ndtype = np.dtype(dict(formats=ndtype, names=names))
else:
- nbtypes = len(ndtype)
# Explicit names
if names is not None:
validate = NameValidator(**validationargs)
if isinstance(names, basestring):
names = names.split(",")
# Simple dtype: repeat to match the nb of names
- if nbtypes == 0:
+ if ndtype.names is None:
formats = tuple([ndtype.type] * len(names))
names = validate(names, defaultfmt=defaultfmt)
ndtype = np.dtype(list(zip(names, formats)))
# Structured dtype: just validate the names as needed
else:
- ndtype.names = validate(names, nbfields=nbtypes,
+ ndtype.names = validate(names, nbfields=len(ndtype.names),
defaultfmt=defaultfmt)
# No implicit names
- elif (nbtypes > 0):
+ elif ndtype.names is not None:
validate = NameValidator(**validationargs)
# Default initial names : should we change the format ?
- if ((ndtype.names == tuple("f%i" % i for i in range(nbtypes))) and
+ if ((ndtype.names == tuple("f%i" % i for i in range(len(ndtype.names)))) and
(defaultfmt != "f%i")):
- ndtype.names = validate([''] * nbtypes, defaultfmt=defaultfmt)
+ ndtype.names = validate([''] * len(ndtype.names), defaultfmt=defaultfmt)
# Explicit initial names : just validate
else:
ndtype.names = validate(ndtype.names, defaultfmt=defaultfmt)
diff --git a/numpy/lib/_version.py b/numpy/lib/_version.py
index c3563a7fa..8aa999fc9 100644
--- a/numpy/lib/_version.py
+++ b/numpy/lib/_version.py
@@ -47,9 +47,12 @@ class NumpyVersion():
>>> from numpy.lib import NumpyVersion
>>> if NumpyVersion(np.__version__) < '1.7.0':
... print('skip')
- skip
+ >>> # skip
>>> NumpyVersion('1.7') # raises ValueError, add ".0"
+ Traceback (most recent call last):
+ ...
+ ValueError: Not a valid numpy version string
"""
diff --git a/numpy/lib/arraypad.py b/numpy/lib/arraypad.py
index e9ca9de4d..33e64708d 100644
--- a/numpy/lib/arraypad.py
+++ b/numpy/lib/arraypad.py
@@ -6,6 +6,8 @@ of an n-dimensional array.
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core.overrides import array_function_dispatch
+from numpy.lib.index_tricks import ndindex
__all__ = ['pad']
@@ -15,50 +17,7 @@ __all__ = ['pad']
# Private utility functions.
-def _arange_ndarray(arr, shape, axis, reverse=False):
- """
- Create an ndarray of `shape` with increments along specified `axis`
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- shape : tuple of ints
- Shape of desired array. Should be equivalent to `arr.shape` except
- `shape[axis]` which may have any positive value.
- axis : int
- Axis to increment along.
- reverse : bool
- If False, increment in a positive fashion from 1 to `shape[axis]`,
- inclusive. If True, the bounds are the same but the order reversed.
-
- Returns
- -------
- padarr : ndarray
- Output array sized to pad `arr` along `axis`, with linear range from
- 1 to `shape[axis]` along specified `axis`.
-
- Notes
- -----
- The range is deliberately 1-indexed for this specific use case. Think of
- this algorithm as broadcasting `np.arange` to a single `axis` of an
- arbitrarily shaped ndarray.
-
- """
- initshape = tuple(1 if i != axis else shape[axis]
- for (i, x) in enumerate(arr.shape))
- if not reverse:
- padarr = np.arange(1, shape[axis] + 1)
- else:
- padarr = np.arange(shape[axis], 0, -1)
- padarr = padarr.reshape(initshape)
- for i, dim in enumerate(shape):
- if padarr.shape[i] != dim:
- padarr = padarr.repeat(dim, axis=i)
- return padarr
-
-
-def _round_ifneeded(arr, dtype):
+def _round_if_needed(arr, dtype):
"""
Rounds arr inplace if destination dtype is integer.
@@ -68,936 +27,519 @@ def _round_ifneeded(arr, dtype):
Input array.
dtype : dtype
The dtype of the destination array.
-
"""
if np.issubdtype(dtype, np.integer):
arr.round(out=arr)
-def _slice_at_axis(shape, sl, axis):
- """
- Construct a slice tuple the length of shape, with sl at the specified axis
- """
- slice_tup = (slice(None),)
- return slice_tup * axis + (sl,) + slice_tup * (len(shape) - axis - 1)
-
-
-def _slice_first(shape, n, axis):
- """ Construct a slice tuple to take the first n elements along axis """
- return _slice_at_axis(shape, slice(0, n), axis=axis)
-
-
-def _slice_last(shape, n, axis):
- """ Construct a slice tuple to take the last n elements along axis """
- dim = shape[axis] # doing this explicitly makes n=0 work
- return _slice_at_axis(shape, slice(dim - n, dim), axis=axis)
-
-
-def _do_prepend(arr, pad_chunk, axis):
- return np.concatenate(
- (pad_chunk.astype(arr.dtype, copy=False), arr), axis=axis)
-
-
-def _do_append(arr, pad_chunk, axis):
- return np.concatenate(
- (arr, pad_chunk.astype(arr.dtype, copy=False)), axis=axis)
-
-
-def _prepend_const(arr, pad_amt, val, axis=-1):
- """
- Prepend constant `val` along `axis` of `arr`.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- val : scalar
- Constant value to use. For best results should be of type `arr.dtype`;
- if not `arr.dtype` will be cast to `arr.dtype`.
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt` constant `val` prepended along `axis`.
-
- """
- if pad_amt == 0:
- return arr
- padshape = tuple(x if i != axis else pad_amt
- for (i, x) in enumerate(arr.shape))
- return _do_prepend(arr, np.full(padshape, val, dtype=arr.dtype), axis)
-
-
-def _append_const(arr, pad_amt, val, axis=-1):
- """
- Append constant `val` along `axis` of `arr`.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- val : scalar
- Constant value to use. For best results should be of type `arr.dtype`;
- if not `arr.dtype` will be cast to `arr.dtype`.
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt` constant `val` appended along `axis`.
-
- """
- if pad_amt == 0:
- return arr
- padshape = tuple(x if i != axis else pad_amt
- for (i, x) in enumerate(arr.shape))
- return _do_append(arr, np.full(padshape, val, dtype=arr.dtype), axis)
-
-
-
-def _prepend_edge(arr, pad_amt, axis=-1):
- """
- Prepend `pad_amt` to `arr` along `axis` by extending edge values.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, extended by `pad_amt` edge values appended along `axis`.
-
- """
- if pad_amt == 0:
- return arr
-
- edge_slice = _slice_first(arr.shape, 1, axis=axis)
- edge_arr = arr[edge_slice]
- return _do_prepend(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
-
-
-def _append_edge(arr, pad_amt, axis=-1):
+def _slice_at_axis(sl, axis):
"""
- Append `pad_amt` to `arr` along `axis` by extending edge values.
+ Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, extended by `pad_amt` edge values prepended along
- `axis`.
-
- """
- if pad_amt == 0:
- return arr
-
- edge_slice = _slice_last(arr.shape, 1, axis=axis)
- edge_arr = arr[edge_slice]
- return _do_append(arr, edge_arr.repeat(pad_amt, axis=axis), axis)
-
-
-def _prepend_ramp(arr, pad_amt, end, axis=-1):
- """
- Prepend linear ramp along `axis`.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- end : scalar
- Constal value to use. For best results should be of type `arr.dtype`;
- if not `arr.dtype` will be cast to `arr.dtype`.
+ sl : slice
+ The slice for the given dimension.
axis : int
- Axis along which to pad `arr`.
+ The axis to which `sl` is applied. All other dimensions are left
+ "unsliced".
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt` values prepended along `axis`. The
- prepended region ramps linearly from the edge value to `end`.
+ sl : tuple of slices
+ A tuple with slices matching `shape` in length.
+ Examples
+ --------
+ >>> _slice_at_axis(slice(None, 3, -1), 1)
+ (slice(None, None, None), slice(None, 3, -1), (...,))
"""
- if pad_amt == 0:
- return arr
-
- # Generate shape for final concatenated array
- padshape = tuple(x if i != axis else pad_amt
- for (i, x) in enumerate(arr.shape))
-
- # Generate an n-dimensional array incrementing along `axis`
- ramp_arr = _arange_ndarray(arr, padshape, axis,
- reverse=True).astype(np.float64)
-
- # Appropriate slicing to extract n-dimensional edge along `axis`
- edge_slice = _slice_first(arr.shape, 1, axis=axis)
-
- # Extract edge, and extend along `axis`
- edge_pad = arr[edge_slice].repeat(pad_amt, axis)
-
- # Linear ramp
- slope = (end - edge_pad) / float(pad_amt)
- ramp_arr = ramp_arr * slope
- ramp_arr += edge_pad
- _round_ifneeded(ramp_arr, arr.dtype)
-
- # Ramp values will most likely be float, cast them to the same type as arr
- return _do_prepend(arr, ramp_arr, axis)
-
-
-def _append_ramp(arr, pad_amt, end, axis=-1):
- """
- Append linear ramp along `axis`.
+ return (slice(None),) * axis + (sl,) + (...,)
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- end : scalar
- Constal value to use. For best results should be of type `arr.dtype`;
- if not `arr.dtype` will be cast to `arr.dtype`.
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt` values appended along `axis`. The
- appended region ramps linearly from the edge value to `end`.
+def _view_roi(array, original_area_slice, axis):
"""
- if pad_amt == 0:
- return arr
+ Get a view of the current region of interest during iterative padding.
- # Generate shape for final concatenated array
- padshape = tuple(x if i != axis else pad_amt
- for (i, x) in enumerate(arr.shape))
-
- # Generate an n-dimensional array incrementing along `axis`
- ramp_arr = _arange_ndarray(arr, padshape, axis,
- reverse=False).astype(np.float64)
-
- # Slice a chunk from the edge to calculate stats on
- edge_slice = _slice_last(arr.shape, 1, axis=axis)
-
- # Extract edge, and extend along `axis`
- edge_pad = arr[edge_slice].repeat(pad_amt, axis)
-
- # Linear ramp
- slope = (end - edge_pad) / float(pad_amt)
- ramp_arr = ramp_arr * slope
- ramp_arr += edge_pad
- _round_ifneeded(ramp_arr, arr.dtype)
-
- # Ramp values will most likely be float, cast them to the same type as arr
- return _do_append(arr, ramp_arr, axis)
-
-
-def _prepend_max(arr, pad_amt, num, axis=-1):
- """
- Prepend `pad_amt` maximum values along `axis`.
+ When padding multiple dimensions iteratively corner values are
+ unnecessarily overwritten multiple times. This function reduces the
+ working area for the first dimensions so that corners are excluded.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- num : int
- Depth into `arr` along `axis` to calculate maximum.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
+ array : ndarray
+ The array with the region of interest.
+ original_area_slice : tuple of slices
+ Denotes the area with original values of the unpadded array.
axis : int
- Axis along which to pad `arr`.
+ The currently padded dimension assuming that `axis` is padded before
+ `axis` + 1.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt` values appended along `axis`. The
- prepended region is the maximum of the first `num` values along
- `axis`.
-
+ roi : ndarray
+ The region of interest of the original `array`.
"""
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _prepend_edge(arr, pad_amt, axis)
-
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
-
- # Slice a chunk from the edge to calculate stats on
- max_slice = _slice_first(arr.shape, num, axis=axis)
+ axis += 1
+ sl = (slice(None),) * axis + original_area_slice[axis:]
+ return array[sl]
- # Extract slice, calculate max
- max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
- # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return _do_prepend(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
-
-
-def _append_max(arr, pad_amt, num, axis=-1):
+def _pad_simple(array, pad_width, fill_value=None):
"""
- Pad one `axis` of `arr` with the maximum of the last `num` elements.
+ Pad array on all sides with either a single value or undefined values.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- num : int
- Depth into `arr` along `axis` to calculate maximum.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
- axis : int
- Axis along which to pad `arr`.
+ array : ndarray
+ Array to grow.
+ pad_width : sequence of tuple[int, int]
+ Pad width on both sides for each dimension in `arr`.
+ fill_value : scalar, optional
+ If provided the padded area is filled with this value, otherwise
+ the pad area left undefined.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt` values appended along `axis`. The
- appended region is the maximum of the final `num` values along `axis`.
-
+ padded : ndarray
+ The padded array with the same dtype as`array`. Its order will default
+ to C-style if `array` is not F-contiguous.
+ original_area_slice : tuple
+ A tuple of slices pointing to the area of the original array.
"""
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _append_edge(arr, pad_amt, axis)
+ # Allocate grown array
+ new_shape = tuple(
+ left + size + right
+ for size, (left, right) in zip(array.shape, pad_width)
+ )
+ order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order
+ padded = np.empty(new_shape, dtype=array.dtype, order=order)
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
-
- # Slice a chunk from the edge to calculate stats on
- if num is not None:
- max_slice = _slice_last(arr.shape, num, axis=axis)
- else:
- max_slice = tuple(slice(None) for x in arr.shape)
+ if fill_value is not None:
+ padded.fill(fill_value)
- # Extract slice, calculate max
- max_chunk = arr[max_slice].max(axis=axis, keepdims=True)
+ # Copy old array into correct space
+ original_area_slice = tuple(
+ slice(left, left + size)
+ for size, (left, right) in zip(array.shape, pad_width)
+ )
+ padded[original_area_slice] = array
- # Concatenate `arr` with `max_chunk`, extended along `axis` by `pad_amt`
- return _do_append(arr, max_chunk.repeat(pad_amt, axis=axis), axis)
+ return padded, original_area_slice
-def _prepend_mean(arr, pad_amt, num, axis=-1):
+def _set_pad_area(padded, axis, width_pair, value_pair):
"""
- Prepend `pad_amt` mean values along `axis`.
+ Set empty-padded area in given dimension.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- num : int
- Depth into `arr` along `axis` to calculate mean.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
+ padded : ndarray
+ Array with the pad area which is modified inplace.
axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt` values prepended along `axis`. The
- prepended region is the mean of the first `num` values along `axis`.
-
+ Dimension with the pad area to set.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ value_pair : tuple of scalars or ndarrays
+ Values inserted into the pad area on each side. It must match or be
+ broadcastable to the shape of `arr`.
"""
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _prepend_edge(arr, pad_amt, axis)
-
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
-
- # Slice a chunk from the edge to calculate stats on
- mean_slice = _slice_first(arr.shape, num, axis=axis)
+ left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
+ padded[left_slice] = value_pair[0]
- # Extract slice, calculate mean
- mean_chunk = arr[mean_slice].mean(axis, keepdims=True)
- _round_ifneeded(mean_chunk, arr.dtype)
+ right_slice = _slice_at_axis(
+ slice(padded.shape[axis] - width_pair[1], None), axis)
+ padded[right_slice] = value_pair[1]
- # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return _do_prepend(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
-
-def _append_mean(arr, pad_amt, num, axis=-1):
+def _get_edges(padded, axis, width_pair):
"""
- Append `pad_amt` mean values along `axis`.
+ Retrieve edge values from empty-padded array in given dimension.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- num : int
- Depth into `arr` along `axis` to calculate mean.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
+ padded : ndarray
+ Empty-padded array.
axis : int
- Axis along which to pad `arr`.
+ Dimension in which the edges are considered.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt` values appended along `axis`. The
- appended region is the maximum of the final `num` values along `axis`.
-
+ left_edge, right_edge : ndarray
+ Edge values of the valid area in `padded` in the given dimension. Its
+ shape will always match `padded` except for the dimension given by
+ `axis` which will have a length of 1.
"""
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _append_edge(arr, pad_amt, axis)
-
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
-
- # Slice a chunk from the edge to calculate stats on
- if num is not None:
- mean_slice = _slice_last(arr.shape, num, axis=axis)
- else:
- mean_slice = tuple(slice(None) for x in arr.shape)
+ left_index = width_pair[0]
+ left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
+ left_edge = padded[left_slice]
- # Extract slice, calculate mean
- mean_chunk = arr[mean_slice].mean(axis=axis, keepdims=True)
- _round_ifneeded(mean_chunk, arr.dtype)
+ right_index = padded.shape[axis] - width_pair[1]
+ right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
+ right_edge = padded[right_slice]
- # Concatenate `arr` with `mean_chunk`, extended along `axis` by `pad_amt`
- return _do_append(arr, mean_chunk.repeat(pad_amt, axis), axis=axis)
+ return left_edge, right_edge
-def _prepend_med(arr, pad_amt, num, axis=-1):
+def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""
- Prepend `pad_amt` median values along `axis`.
+ Construct linear ramps for empty-padded array in given dimension.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- num : int
- Depth into `arr` along `axis` to calculate median.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
+ padded : ndarray
+ Empty-padded array.
axis : int
- Axis along which to pad `arr`.
+ Dimension in which the ramps are constructed.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ end_value_pair : (scalar, scalar)
+ End values for the linear ramps which form the edge of the fully padded
+ array. These values are included in the linear ramps.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt` values prepended along `axis`. The
- prepended region is the median of the first `num` values along `axis`.
-
+ left_ramp, right_ramp : ndarray
+ Linear ramps to set on both sides of `padded`.
"""
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _prepend_edge(arr, pad_amt, axis)
+ edge_pair = _get_edges(padded, axis, width_pair)
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
+ left_ramp = np.linspace(
+ start=end_value_pair[0],
+ stop=edge_pair[0].squeeze(axis), # Dimensions is replaced by linspace
+ num=width_pair[0],
+ endpoint=False,
+ dtype=padded.dtype,
+ axis=axis,
+ )
- # Slice a chunk from the edge to calculate stats on
- med_slice = _slice_first(arr.shape, num, axis=axis)
+ right_ramp = np.linspace(
+ start=end_value_pair[1],
+ stop=edge_pair[1].squeeze(axis), # Dimension is replaced by linspace
+ num=width_pair[1],
+ endpoint=False,
+ dtype=padded.dtype,
+ axis=axis,
+ )
+ # Reverse linear space in appropriate dimension
+ right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
- # Extract slice, calculate median
- med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
- _round_ifneeded(med_chunk, arr.dtype)
+ return left_ramp, right_ramp
- # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return _do_prepend(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
-
-def _append_med(arr, pad_amt, num, axis=-1):
+def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""
- Append `pad_amt` median values along `axis`.
+ Calculate statistic for the empty-padded array in given dimnsion.
Parameters
----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- num : int
- Depth into `arr` along `axis` to calculate median.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
+ padded : ndarray
+ Empty-padded array.
axis : int
- Axis along which to pad `arr`.
+ Dimension in which the statistic is calculated.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
+ length_pair : 2-element sequence of None or int
+ Gives the number of values in valid area from each side that is
+ taken into account when calculating the statistic. If None the entire
+ valid area in `padded` is considered.
+ stat_func : function
+ Function to compute statistic. The expected signature is
+ ``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt` values appended along `axis`. The
- appended region is the median of the final `num` values along `axis`.
-
+ left_stat, right_stat : ndarray
+ Calculated statistic for both sides of `padded`.
"""
- if pad_amt == 0:
- return arr
+ # Calculate indices of the edges of the area with original values
+ left_index = width_pair[0]
+ right_index = padded.shape[axis] - width_pair[1]
+ # as well as its length
+ max_length = right_index - left_index
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _append_edge(arr, pad_amt, axis)
+ # Limit stat_lengths to max_length
+ left_length, right_length = length_pair
+ if left_length is None or max_length < left_length:
+ left_length = max_length
+ if right_length is None or max_length < right_length:
+ right_length = max_length
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
+ if (left_length == 0 or right_length == 0) \
+ and stat_func in {np.amax, np.amin}:
+ # amax and amin can't operate on an emtpy array,
+ # raise a more descriptive warning here instead of the default one
+ raise ValueError("stat_length of 0 yields no value for padding")
- # Slice a chunk from the edge to calculate stats on
- if num is not None:
- med_slice = _slice_last(arr.shape, num, axis=axis)
- else:
- med_slice = tuple(slice(None) for x in arr.shape)
+ # Calculate statistic for the left side
+ left_slice = _slice_at_axis(
+ slice(left_index, left_index + left_length), axis)
+ left_chunk = padded[left_slice]
+ left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
+ _round_if_needed(left_stat, padded.dtype)
- # Extract slice, calculate median
- med_chunk = np.median(arr[med_slice], axis=axis, keepdims=True)
- _round_ifneeded(med_chunk, arr.dtype)
+ if left_length == right_length == max_length:
+ # return early as right_stat must be identical to left_stat
+ return left_stat, left_stat
- # Concatenate `arr` with `med_chunk`, extended along `axis` by `pad_amt`
- return _do_append(arr, med_chunk.repeat(pad_amt, axis), axis=axis)
+ # Calculate statistic for the right side
+ right_slice = _slice_at_axis(
+ slice(right_index - right_length, right_index), axis)
+ right_chunk = padded[right_slice]
+ right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
+ _round_if_needed(right_stat, padded.dtype)
+ return left_stat, right_stat
-def _prepend_min(arr, pad_amt, num, axis=-1):
- """
- Prepend `pad_amt` minimum values along `axis`.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to prepend.
- num : int
- Depth into `arr` along `axis` to calculate minimum.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt` values prepended along `axis`. The
- prepended region is the minimum of the first `num` values along
- `axis`.
+def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
"""
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _prepend_edge(arr, pad_amt, axis)
-
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
-
- # Slice a chunk from the edge to calculate stats on
- min_slice = _slice_first(arr.shape, num, axis=axis)
-
- # Extract slice, calculate min
- min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
-
- # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return _do_prepend(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
-
-
-def _append_min(arr, pad_amt, num, axis=-1):
- """
- Append `pad_amt` median values along `axis`.
+ Pad `axis` of `arr` with reflection.
Parameters
----------
- arr : ndarray
+ padded : ndarray
Input array of arbitrary shape.
- pad_amt : int
- Amount of padding to append.
- num : int
- Depth into `arr` along `axis` to calculate minimum.
- Range: [1, `arr.shape[axis]`] or None (entire axis)
axis : int
Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt` values appended along `axis`. The
- appended region is the minimum of the final `num` values along `axis`.
-
- """
- if pad_amt == 0:
- return arr
-
- # Equivalent to edge padding for single value, so do that instead
- if num == 1:
- return _append_edge(arr, pad_amt, axis)
-
- # Use entire array if `num` is too large
- if num is not None:
- if num >= arr.shape[axis]:
- num = None
-
- # Slice a chunk from the edge to calculate stats on
- if num is not None:
- min_slice = _slice_last(arr.shape, num, axis=axis)
- else:
- min_slice = tuple(slice(None) for x in arr.shape)
-
- # Extract slice, calculate min
- min_chunk = arr[min_slice].min(axis=axis, keepdims=True)
-
- # Concatenate `arr` with `min_chunk`, extended along `axis` by `pad_amt`
- return _do_append(arr, min_chunk.repeat(pad_amt, axis), axis=axis)
-
-
-def _pad_ref(arr, pad_amt, method, axis=-1):
- """
- Pad `axis` of `arr` by reflection.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
- pad_amt : tuple of ints, length 2
- Padding to (prepend, append) along `axis`.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
method : str
Controls method of reflection; options are 'even' or 'odd'.
- axis : int
- Axis along which to pad `arr`.
+ include_edge : bool
+ If true, edge value is included in reflection, otherwise the edge
+ value forms the symmetric axis to the reflection.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
- values appended along `axis`. Both regions are padded with reflected
- values from the original array.
-
- Notes
- -----
- This algorithm does not pad with repetition, i.e. the edges are not
- repeated in the reflection. For that behavior, use `mode='symmetric'`.
-
- The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
- single function, lest the indexing tricks in non-integer multiples of the
- original shape would violate repetition in the final iteration.
-
- """
- # Implicit booleanness to test for zero (or None) in any scalar type
- if pad_amt[0] == 0 and pad_amt[1] == 0:
- return arr
-
- ##########################################################################
- # Prepended region
-
- # Slice off a reverse indexed chunk from near edge to pad `arr` before
- ref_slice = _slice_at_axis(arr.shape, slice(pad_amt[0], 0, -1), axis=axis)
-
- ref_chunk1 = arr[ref_slice]
-
- # Memory/computationally more expensive, only do this if `method='odd'`
- if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
- edge_chunk = arr[edge_slice1]
- ref_chunk1 = 2 * edge_chunk - ref_chunk1
- del edge_chunk
-
- ##########################################################################
- # Appended region
-
- # Slice off a reverse indexed chunk from far edge to pad `arr` after
- start = arr.shape[axis] - pad_amt[1] - 1
- end = arr.shape[axis] - 1
- ref_slice = _slice_at_axis(arr.shape, slice(start, end), axis=axis)
- rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
- ref_chunk2 = arr[ref_slice][rev_idx]
-
- if 'odd' in method:
- edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
- edge_chunk = arr[edge_slice2]
- ref_chunk2 = 2 * edge_chunk - ref_chunk2
- del edge_chunk
-
- # Concatenate `arr` with both chunks, extending along `axis`
- return np.concatenate((ref_chunk1, arr, ref_chunk2), axis=axis)
-
-
-def _pad_sym(arr, pad_amt, method, axis=-1):
- """
- Pad `axis` of `arr` by symmetry.
-
- Parameters
- ----------
- arr : ndarray
- Input array of arbitrary shape.
pad_amt : tuple of ints, length 2
- Padding to (prepend, append) along `axis`.
- method : str
- Controls method of symmetry; options are 'even' or 'odd'.
- axis : int
- Axis along which to pad `arr`.
-
- Returns
- -------
- padarr : ndarray
- Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
- values appended along `axis`. Both regions are padded with symmetric
- values from the original array.
-
- Notes
- -----
- This algorithm DOES pad with repetition, i.e. the edges are repeated.
- For padding without repeated edges, use `mode='reflect'`.
-
- The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
- single function, lest the indexing tricks in non-integer multiples of the
- original shape would violate repetition in the final iteration.
-
+ New index positions of padding to do along the `axis`. If these are
+ both 0, padding is done in this dimension.
"""
- # Implicit booleanness to test for zero (or None) in any scalar type
- if pad_amt[0] == 0 and pad_amt[1] == 0:
- return arr
-
- ##########################################################################
- # Prepended region
-
- # Slice off a reverse indexed chunk from near edge to pad `arr` before
- sym_slice = _slice_first(arr.shape, pad_amt[0], axis=axis)
- rev_idx = _slice_at_axis(arr.shape, slice(None, None, -1), axis=axis)
- sym_chunk1 = arr[sym_slice][rev_idx]
-
- # Memory/computationally more expensive, only do this if `method='odd'`
- if 'odd' in method and pad_amt[0] > 0:
- edge_slice1 = _slice_first(arr.shape, 1, axis=axis)
- edge_chunk = arr[edge_slice1]
- sym_chunk1 = 2 * edge_chunk - sym_chunk1
- del edge_chunk
-
- ##########################################################################
- # Appended region
-
- # Slice off a reverse indexed chunk from far edge to pad `arr` after
- sym_slice = _slice_last(arr.shape, pad_amt[1], axis=axis)
- sym_chunk2 = arr[sym_slice][rev_idx]
-
- if 'odd' in method:
- edge_slice2 = _slice_last(arr.shape, 1, axis=axis)
- edge_chunk = arr[edge_slice2]
- sym_chunk2 = 2 * edge_chunk - sym_chunk2
- del edge_chunk
-
- # Concatenate `arr` with both chunks, extending along `axis`
- return np.concatenate((sym_chunk1, arr, sym_chunk2), axis=axis)
+ left_pad, right_pad = width_pair
+ old_length = padded.shape[axis] - right_pad - left_pad
-
-def _pad_wrap(arr, pad_amt, axis=-1):
- """
- Pad `axis` of `arr` via wrapping.
+ if include_edge:
+ # Edge is included, we need to offset the pad amount by 1
+ edge_offset = 1
+ else:
+ edge_offset = 0 # Edge is not included, no need to offset pad amount
+ old_length -= 1 # but must be omitted from the chunk
+
+ if left_pad > 0:
+ # Pad with reflected values on left side:
+ # First limit chunk size which can't be larger than pad area
+ chunk_length = min(old_length, left_pad)
+ # Slice right to left, stop on or next to edge, start relative to stop
+ stop = left_pad - edge_offset
+ start = stop + chunk_length
+ left_slice = _slice_at_axis(slice(start, stop, -1), axis)
+ left_chunk = padded[left_slice]
+
+ if method == "odd":
+ # Negate chunk and align with edge
+ edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
+ left_chunk = 2 * padded[edge_slice] - left_chunk
+
+ # Insert chunk into padded area
+ start = left_pad - chunk_length
+ stop = left_pad
+ pad_area = _slice_at_axis(slice(start, stop), axis)
+ padded[pad_area] = left_chunk
+ # Adjust pointer to left edge for next iteration
+ left_pad -= chunk_length
+
+ if right_pad > 0:
+ # Pad with reflected values on right side:
+ # First limit chunk size which can't be larger than pad area
+ chunk_length = min(old_length, right_pad)
+ # Slice right to left, start on or next to edge, stop relative to start
+ start = -right_pad + edge_offset - 2
+ stop = start - chunk_length
+ right_slice = _slice_at_axis(slice(start, stop, -1), axis)
+ right_chunk = padded[right_slice]
+
+ if method == "odd":
+ # Negate chunk and align with edge
+ edge_slice = _slice_at_axis(
+ slice(-right_pad - 1, -right_pad), axis)
+ right_chunk = 2 * padded[edge_slice] - right_chunk
+
+ # Insert chunk into padded area
+ start = padded.shape[axis] - right_pad
+ stop = start + chunk_length
+ pad_area = _slice_at_axis(slice(start, stop), axis)
+ padded[pad_area] = right_chunk
+ # Adjust pointer to right edge for next iteration
+ right_pad -= chunk_length
+
+ return left_pad, right_pad
+
+
+def _set_wrap_both(padded, axis, width_pair):
+ """
+ Pad `axis` of `arr` with wrapped values.
Parameters
----------
- arr : ndarray
+ padded : ndarray
Input array of arbitrary shape.
- pad_amt : tuple of ints, length 2
- Padding to (prepend, append) along `axis`.
axis : int
Axis along which to pad `arr`.
+ width_pair : (int, int)
+ Pair of widths that mark the pad area on both sides in the given
+ dimension.
Returns
-------
- padarr : ndarray
- Output array, with `pad_amt[0]` values prepended and `pad_amt[1]`
- values appended along `axis`. Both regions are padded wrapped values
- from the opposite end of `axis`.
-
- Notes
- -----
- This method of padding is also known as 'tile' or 'tiling'.
-
- The modes 'reflect', 'symmetric', and 'wrap' must be padded with a
- single function, lest the indexing tricks in non-integer multiples of the
- original shape would violate repetition in the final iteration.
-
- """
- # Implicit booleanness to test for zero (or None) in any scalar type
- if pad_amt[0] == 0 and pad_amt[1] == 0:
- return arr
-
- ##########################################################################
- # Prepended region
-
- # Slice off a reverse indexed chunk from near edge to pad `arr` before
- wrap_slice = _slice_last(arr.shape, pad_amt[0], axis=axis)
- wrap_chunk1 = arr[wrap_slice]
-
- ##########################################################################
- # Appended region
-
- # Slice off a reverse indexed chunk from far edge to pad `arr` after
- wrap_slice = _slice_first(arr.shape, pad_amt[1], axis=axis)
- wrap_chunk2 = arr[wrap_slice]
-
- # Concatenate `arr` with both chunks, extending along `axis`
- return np.concatenate((wrap_chunk1, arr, wrap_chunk2), axis=axis)
-
-
-def _normalize_shape(ndarray, shape, cast_to_int=True):
- """
- Private function which does some checks and normalizes the possibly
- much simpler representations of 'pad_width', 'stat_length',
- 'constant_values', 'end_values'.
-
- Parameters
- ----------
- narray : ndarray
- Input ndarray
- shape : {sequence, array_like, float, int}, optional
- The width of padding (pad_width), the number of elements on the
- edge of the narray used for statistics (stat_length), the constant
- value(s) to use when filling padded regions (constant_values), or the
- endpoint target(s) for linear ramps (end_values).
- ((before_1, after_1), ... (before_N, after_N)) unique number of
- elements for each axis where `N` is rank of `narray`.
- ((before, after),) yields same before and after constants for each
- axis.
- (constant,) or val is a shortcut for before = after = constant for
- all axes.
- cast_to_int : bool, optional
- Controls if values in ``shape`` will be rounded and cast to int
- before being returned.
-
- Returns
- -------
- normalized_shape : tuple of tuples
- val => ((val, val), (val, val), ...)
- [[val1, val2], [val3, val4], ...] => ((val1, val2), (val3, val4), ...)
- ((val1, val2), (val3, val4), ...) => no change
- [[val1, val2], ] => ((val1, val2), (val1, val2), ...)
- ((val1, val2), ) => ((val1, val2), (val1, val2), ...)
- [[val , ], ] => ((val, val), (val, val), ...)
- ((val , ), ) => ((val, val), (val, val), ...)
-
- """
- ndims = ndarray.ndim
-
- # Shortcut shape=None
- if shape is None:
- return ((None, None), ) * ndims
-
- # Convert any input `info` to a NumPy array
- shape_arr = np.asarray(shape)
-
- try:
- shape_arr = np.broadcast_to(shape_arr, (ndims, 2))
- except ValueError:
- fmt = "Unable to create correctly shaped tuple from %s"
- raise ValueError(fmt % (shape,))
-
- # Cast if necessary
- if cast_to_int is True:
- shape_arr = np.round(shape_arr).astype(int)
-
- # Convert list of lists to tuple of tuples
- return tuple(tuple(axis) for axis in shape_arr.tolist())
-
-
-def _validate_lengths(narray, number_elements):
- """
- Private function which does some checks and reformats pad_width and
- stat_length using _normalize_shape.
+ pad_amt : tuple of ints, length 2
+ New index positions of padding to do along the `axis`. If these are
+ both 0, padding is done in this dimension.
+ """
+ left_pad, right_pad = width_pair
+ period = padded.shape[axis] - right_pad - left_pad
+
+ # If the current dimension of `arr` doesn't contain enough valid values
+ # (not part of the undefined pad area) we need to pad multiple times.
+ # Each time the pad area shrinks on both sides which is communicated with
+ # these variables.
+ new_left_pad = 0
+ new_right_pad = 0
+
+ if left_pad > 0:
+ # Pad with wrapped values on left side
+ # First slice chunk from right side of the non-pad area.
+ # Use min(period, left_pad) to ensure that chunk is not larger than
+ # pad area
+ right_slice = _slice_at_axis(
+ slice(-right_pad - min(period, left_pad),
+ -right_pad if right_pad != 0 else None),
+ axis
+ )
+ right_chunk = padded[right_slice]
+
+ if left_pad > period:
+ # Chunk is smaller than pad area
+ pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
+ new_left_pad = left_pad - period
+ else:
+ # Chunk matches pad area
+ pad_area = _slice_at_axis(slice(None, left_pad), axis)
+ padded[pad_area] = right_chunk
+
+ if right_pad > 0:
+ # Pad with wrapped values on right side
+ # First slice chunk from left side of the non-pad area.
+ # Use min(period, right_pad) to ensure that chunk is not larger than
+ # pad area
+ left_slice = _slice_at_axis(
+ slice(left_pad, left_pad + min(period, right_pad),), axis)
+ left_chunk = padded[left_slice]
+
+ if right_pad > period:
+ # Chunk is smaller than pad area
+ pad_area = _slice_at_axis(
+ slice(-right_pad, -right_pad + period), axis)
+ new_right_pad = right_pad - period
+ else:
+ # Chunk matches pad area
+ pad_area = _slice_at_axis(slice(-right_pad, None), axis)
+ padded[pad_area] = left_chunk
+
+ return new_left_pad, new_right_pad
+
+
+def _as_pairs(x, ndim, as_index=False):
+ """
+ Broadcast `x` to an array with the shape (`ndim`, 2).
+
+ A helper function for `pad` that prepares and validates arguments like
+ `pad_width` for iteration in pairs.
Parameters
----------
- narray : ndarray
- Input ndarray
- number_elements : {sequence, int}, optional
- The width of padding (pad_width) or the number of elements on the edge
- of the narray used for statistics (stat_length).
- ((before_1, after_1), ... (before_N, after_N)) unique number of
- elements for each axis.
- ((before, after),) yields same before and after constants for each
- axis.
- (constant,) or int is a shortcut for before = after = constant for all
- axes.
+ x : {None, scalar, array-like}
+ The object to broadcast to the shape (`ndim`, 2).
+ ndim : int
+ Number of pairs the broadcasted `x` will have.
+ as_index : bool, optional
+ If `x` is not None, try to round each element of `x` to an integer
+ (dtype `np.intp`) and ensure every element is positive.
Returns
-------
- _validate_lengths : tuple of tuples
- int => ((int, int), (int, int), ...)
- [[int1, int2], [int3, int4], ...] => ((int1, int2), (int3, int4), ...)
- ((int1, int2), (int3, int4), ...) => no change
- [[int1, int2], ] => ((int1, int2), (int1, int2), ...)
- ((int1, int2), ) => ((int1, int2), (int1, int2), ...)
- [[int , ], ] => ((int, int), (int, int), ...)
- ((int , ), ) => ((int, int), (int, int), ...)
-
- """
- normshp = _normalize_shape(narray, number_elements)
- for i in normshp:
- chk = [1 if x is None else x for x in i]
- chk = [1 if x >= 0 else -1 for x in chk]
- if (chk[0] < 0) or (chk[1] < 0):
- fmt = "%s cannot contain negative values."
- raise ValueError(fmt % (number_elements,))
- return normshp
+ pairs : nested iterables, shape (`ndim`, 2)
+ The broadcasted version of `x`.
+
+ Raises
+ ------
+ ValueError
+ If `as_index` is True and `x` contains negative elements.
+ Or if `x` is not broadcastable to the shape (`ndim`, 2).
+ """
+ if x is None:
+ # Pass through None as a special case, otherwise np.round(x) fails
+ # with an AttributeError
+ return ((None, None),) * ndim
+
+ x = np.array(x)
+ if as_index:
+ x = np.round(x).astype(np.intp, copy=False)
+
+ if x.ndim < 3:
+ # Optimization: Possibly use faster paths for cases where `x` has
+ # only 1 or 2 elements. `np.broadcast_to` could handle these as well
+ # but is currently slower
+
+ if x.size == 1:
+ # x was supplied as a single value
+ x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
+ if as_index and x < 0:
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[0]),) * ndim
+
+ if x.size == 2 and x.shape != (2, 1):
+ # x was supplied with a single value for each side
+ # but except case when each dimension has a single value
+ # which should be broadcasted to a pair,
+ # e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
+ x = x.ravel() # Ensure x[0], x[1] works
+ if as_index and (x[0] < 0 or x[1] < 0):
+ raise ValueError("index can't contain negative values")
+ return ((x[0], x[1]),) * ndim
+
+ if as_index and x.min() < 0:
+ raise ValueError("index can't contain negative values")
+
+ # Converting the array with `tolist` seems to improve performance
+ # when iterating and indexing the result (see usage in `pad`)
+ return np.broadcast_to(x, (ndim, 2)).tolist()
+
+
+def _pad_dispatcher(array, pad_width, mode=None, **kwargs):
+ return (array,)
###############################################################################
# Public functions
-def pad(array, pad_width, mode, **kwargs):
+@array_function_dispatch(_pad_dispatcher, module='numpy')
+def pad(array, pad_width, mode='constant', **kwargs):
"""
- Pads an array.
+ Pad an array.
Parameters
----------
array : array_like of rank N
- Input array
+ The array to pad.
pad_width : {sequence, array_like, int}
Number of values padded to the edges of each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad widths
@@ -1005,10 +547,10 @@ def pad(array, pad_width, mode, **kwargs):
((before, after),) yields same before and after pad for each axis.
(pad,) or int is a shortcut for before = after = pad width for all
axes.
- mode : str or function
+ mode : str or function, optional
One of the following string values or a user supplied function.
- 'constant'
+ 'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
@@ -1038,6 +580,11 @@ def pad(array, pad_width, mode, **kwargs):
Pads with the wrap of the vector along the axis.
The first values are used to pad the end and the
end values are used to pad the beginning.
+ 'empty'
+ Pads with undefined values.
+
+ .. versionadded:: 1.17
+
<function>
Padding function, see Notes.
stat_length : sequence or int, optional
@@ -1054,31 +601,31 @@ def pad(array, pad_width, mode, **kwargs):
length for all axes.
Default is ``None``, to use the entire axis.
- constant_values : sequence or int, optional
+ constant_values : sequence or scalar, optional
Used in 'constant'. The values to set the padded values for each
axis.
- ((before_1, after_1), ... (before_N, after_N)) unique pad constants
+ ``((before_1, after_1), ... (before_N, after_N))`` unique pad constants
for each axis.
- ((before, after),) yields same before and after constants for each
+ ``((before, after),)`` yields same before and after constants for each
axis.
- (constant,) or int is a shortcut for before = after = constant for
+ ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all axes.
Default is 0.
- end_values : sequence or int, optional
+ end_values : sequence or scalar, optional
Used in 'linear_ramp'. The values used for the ending value of the
linear_ramp and that will form the edge of the padded array.
- ((before_1, after_1), ... (before_N, after_N)) unique end values
+ ``((before_1, after_1), ... (before_N, after_N))`` unique end values
for each axis.
- ((before, after),) yields same before and after end values for each
+ ``((before, after),)`` yields same before and after end values for each
axis.
- (constant,) or int is a shortcut for before = after = end value for
+ ``(constant,)`` or ``constant`` is a shortcut for ``before = after = constant`` for
all axes.
Default is 0.
@@ -1103,9 +650,8 @@ def pad(array, pad_width, mode, **kwargs):
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
- The padding function, if used, should return a rank 1 array equal in
- length to the vector argument with padded values replaced. It has the
- following signature::
+ The padding function, if used, should modify a rank 1 array in-place. It
+ has the following signature::
padding_func(vector, iaxis_pad_width, iaxis, kwargs)
@@ -1113,7 +659,7 @@ def pad(array, pad_width, mode, **kwargs):
vector : ndarray
A rank 1 array already padded with zeros. Padded values are
- vector[:pad_tuple[0]] and vector[-pad_tuple[1]:].
+ vector[:iaxis_pad_width[0]] and vector[-iaxis_pad_width[1]:].
iaxis_pad_width : tuple
A 2-tuple of ints, iaxis_pad_width[0] represents the number of
values padded at the beginning of vector where
@@ -1127,11 +673,11 @@ def pad(array, pad_width, mode, **kwargs):
Examples
--------
>>> a = [1, 2, 3, 4, 5]
- >>> np.pad(a, (2,3), 'constant', constant_values=(4, 6))
- array([4, 4, 1, 2, 3, 4, 5, 6, 6, 6])
+ >>> np.pad(a, (2, 3), 'constant', constant_values=(4, 6))
+ array([4, 4, 1, ..., 6, 6, 6])
>>> np.pad(a, (2, 3), 'edge')
- array([1, 1, 1, 2, 3, 4, 5, 5, 5, 5])
+ array([1, 1, 1, ..., 5, 5, 5])
>>> np.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
@@ -1175,7 +721,6 @@ def pad(array, pad_width, mode, **kwargs):
... pad_value = kwargs.get('padder', 10)
... vector[:pad_width[0]] = pad_value
... vector[-pad_width[1]:] = pad_value
- ... return vector
>>> a = np.arange(6)
>>> a = a.reshape((2, 3))
>>> np.pad(a, 2, pad_with)
@@ -1193,15 +738,42 @@ def pad(array, pad_width, mode, **kwargs):
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100]])
"""
- if not np.asarray(pad_width).dtype.kind == 'i':
+ array = np.asarray(array)
+ pad_width = np.asarray(pad_width)
+
+ if not pad_width.dtype.kind == 'i':
raise TypeError('`pad_width` must be of integral type.')
- narray = np.array(array)
- pad_width = _validate_lengths(narray, pad_width)
+ # Broadcast to shape (array.ndim, 2)
+ pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
+
+ if callable(mode):
+ # Old behavior: Use user-supplied function with np.apply_along_axis
+ function = mode
+ # Create a new zero padded array
+ padded, _ = _pad_simple(array, pad_width, fill_value=0)
+ # And apply along each axis
+
+ for axis in range(padded.ndim):
+ # Iterate using ndindex as in apply_along_axis, but assuming that
+ # function operates inplace on the padded array.
- allowedkwargs = {
+ # view with the iteration axis at the end
+ view = np.moveaxis(padded, axis, -1)
+
+ # compute indices for the iteration axes, and append a trailing
+ # ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
+ inds = ndindex(view.shape[:-1])
+ inds = (ind + (Ellipsis,) for ind in inds)
+ for ind in inds:
+ function(view[ind], pad_width[axis], axis, kwargs)
+
+ return padded
+
+ # Make sure that no unsupported keywords were passed for the current mode
+ allowed_kwargs = {
+ 'empty': [], 'edge': [], 'wrap': [],
'constant': ['constant_values'],
- 'edge': [],
'linear_ramp': ['end_values'],
'maximum': ['stat_length'],
'mean': ['stat_length'],
@@ -1209,176 +781,101 @@ def pad(array, pad_width, mode, **kwargs):
'minimum': ['stat_length'],
'reflect': ['reflect_type'],
'symmetric': ['reflect_type'],
- 'wrap': [],
- }
-
- kwdefaults = {
- 'stat_length': None,
- 'constant_values': 0,
- 'end_values': 0,
- 'reflect_type': 'even',
- }
-
- if isinstance(mode, np.compat.basestring):
- # Make sure have allowed kwargs appropriate for mode
- for key in kwargs:
- if key not in allowedkwargs[mode]:
- raise ValueError('%s keyword not in allowed keywords %s' %
- (key, allowedkwargs[mode]))
-
- # Set kwarg defaults
- for kw in allowedkwargs[mode]:
- kwargs.setdefault(kw, kwdefaults[kw])
-
- # Need to only normalize particular keywords.
- for i in kwargs:
- if i == 'stat_length':
- kwargs[i] = _validate_lengths(narray, kwargs[i])
- if i in ['end_values', 'constant_values']:
- kwargs[i] = _normalize_shape(narray, kwargs[i],
- cast_to_int=False)
- else:
- # Drop back to old, slower np.apply_along_axis mode for user-supplied
- # vector function
- function = mode
-
- # Create a new padded array
- rank = list(range(narray.ndim))
- total_dim_increase = [np.sum(pad_width[i]) for i in rank]
- offset_slices = tuple(
- slice(pad_width[i][0], pad_width[i][0] + narray.shape[i])
- for i in rank)
- new_shape = np.array(narray.shape) + total_dim_increase
- newmat = np.zeros(new_shape, narray.dtype)
-
- # Insert the original array into the padded array
- newmat[offset_slices] = narray
-
- # This is the core of pad ...
- for iaxis in rank:
- np.apply_along_axis(function,
- iaxis,
- newmat,
- pad_width[iaxis],
- iaxis,
- kwargs)
- return newmat
-
- # If we get here, use new padding method
- newmat = narray.copy()
-
- # API preserved, but completely new algorithm which pads by building the
- # entire block to pad before/after `arr` with in one step, for each axis.
- if mode == 'constant':
- for axis, ((pad_before, pad_after), (before_val, after_val)) \
- in enumerate(zip(pad_width, kwargs['constant_values'])):
- newmat = _prepend_const(newmat, pad_before, before_val, axis)
- newmat = _append_const(newmat, pad_after, after_val, axis)
-
- elif mode == 'edge':
- for axis, (pad_before, pad_after) in enumerate(pad_width):
- newmat = _prepend_edge(newmat, pad_before, axis)
- newmat = _append_edge(newmat, pad_after, axis)
-
- elif mode == 'linear_ramp':
- for axis, ((pad_before, pad_after), (before_val, after_val)) \
- in enumerate(zip(pad_width, kwargs['end_values'])):
- newmat = _prepend_ramp(newmat, pad_before, before_val, axis)
- newmat = _append_ramp(newmat, pad_after, after_val, axis)
-
- elif mode == 'maximum':
- for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
- in enumerate(zip(pad_width, kwargs['stat_length'])):
- newmat = _prepend_max(newmat, pad_before, chunk_before, axis)
- newmat = _append_max(newmat, pad_after, chunk_after, axis)
-
- elif mode == 'mean':
- for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
- in enumerate(zip(pad_width, kwargs['stat_length'])):
- newmat = _prepend_mean(newmat, pad_before, chunk_before, axis)
- newmat = _append_mean(newmat, pad_after, chunk_after, axis)
-
- elif mode == 'median':
- for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
- in enumerate(zip(pad_width, kwargs['stat_length'])):
- newmat = _prepend_med(newmat, pad_before, chunk_before, axis)
- newmat = _append_med(newmat, pad_after, chunk_after, axis)
-
- elif mode == 'minimum':
- for axis, ((pad_before, pad_after), (chunk_before, chunk_after)) \
- in enumerate(zip(pad_width, kwargs['stat_length'])):
- newmat = _prepend_min(newmat, pad_before, chunk_before, axis)
- newmat = _append_min(newmat, pad_after, chunk_after, axis)
-
- elif mode == 'reflect':
- for axis, (pad_before, pad_after) in enumerate(pad_width):
- if narray.shape[axis] == 0:
- # Axes with non-zero padding cannot be empty.
- if pad_before > 0 or pad_after > 0:
- raise ValueError("There aren't any elements to reflect"
- " in axis {} of `array`".format(axis))
- # Skip zero padding on empty axes.
- continue
-
- # Recursive padding along any axis where `pad_amt` is too large
- # for indexing tricks. We can only safely pad the original axis
- # length, to keep the period of the reflections consistent.
- if ((pad_before > 0) or
- (pad_after > 0)) and newmat.shape[axis] == 1:
+ }
+ try:
+ unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
+ except KeyError:
+ raise ValueError("mode '{}' is not supported".format(mode))
+ if unsupported_kwargs:
+ raise ValueError("unsupported keyword arguments for mode '{}': {}"
+ .format(mode, unsupported_kwargs))
+
+ stat_functions = {"maximum": np.amax, "minimum": np.amin,
+ "mean": np.mean, "median": np.median}
+
+ # Create array with final shape and original values
+ # (padded area is undefined)
+ padded, original_area_slice = _pad_simple(array, pad_width)
+ # And prepare iteration over all dimensions
+ # (zipping may be more readable than using enumerate)
+ axes = range(padded.ndim)
+
+ if mode == "constant":
+ values = kwargs.get("constant_values", 0)
+ values = _as_pairs(values, padded.ndim)
+ for axis, width_pair, value_pair in zip(axes, pad_width, values):
+ roi = _view_roi(padded, original_area_slice, axis)
+ _set_pad_area(roi, axis, width_pair, value_pair)
+
+ elif mode == "empty":
+ pass # Do nothing as _pad_simple already returned the correct result
+
+ elif array.size == 0:
+ # Only modes "constant" and "empty" can extend empty axes, all other
+ # modes depend on `array` not being empty
+ # -> ensure every empty axis is only "padded with 0"
+ for axis, width_pair in zip(axes, pad_width):
+ if array.shape[axis] == 0 and any(width_pair):
+ raise ValueError(
+ "can't extend empty axis {} using modes other than "
+ "'constant' or 'empty'".format(axis)
+ )
+ # passed, don't need to do anything more as _pad_simple already
+ # returned the correct result
+
+ elif mode == "edge":
+ for axis, width_pair in zip(axes, pad_width):
+ roi = _view_roi(padded, original_area_slice, axis)
+ edge_pair = _get_edges(roi, axis, width_pair)
+ _set_pad_area(roi, axis, width_pair, edge_pair)
+
+ elif mode == "linear_ramp":
+ end_values = kwargs.get("end_values", 0)
+ end_values = _as_pairs(end_values, padded.ndim)
+ for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
+ roi = _view_roi(padded, original_area_slice, axis)
+ ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
+ _set_pad_area(roi, axis, width_pair, ramp_pair)
+
+ elif mode in stat_functions:
+ func = stat_functions[mode]
+ length = kwargs.get("stat_length", None)
+ length = _as_pairs(length, padded.ndim, as_index=True)
+ for axis, width_pair, length_pair in zip(axes, pad_width, length):
+ roi = _view_roi(padded, original_area_slice, axis)
+ stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
+ _set_pad_area(roi, axis, width_pair, stat_pair)
+
+ elif mode in {"reflect", "symmetric"}:
+ method = kwargs.get("reflect_type", "even")
+ include_edge = True if mode == "symmetric" else False
+ for axis, (left_index, right_index) in zip(axes, pad_width):
+ if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
- newmat = _prepend_edge(newmat, pad_before, axis)
- newmat = _append_edge(newmat, pad_after, axis)
+ edge_pair = _get_edges(padded, axis, (left_index, right_index))
+ _set_pad_area(
+ padded, axis, (left_index, right_index), edge_pair)
continue
- method = kwargs['reflect_type']
- safe_pad = newmat.shape[axis] - 1
- while ((pad_before > safe_pad) or (pad_after > safe_pad)):
- pad_iter_b = min(safe_pad,
- safe_pad * (pad_before // safe_pad))
- pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
- newmat = _pad_ref(newmat, (pad_iter_b,
- pad_iter_a), method, axis)
- pad_before -= pad_iter_b
- pad_after -= pad_iter_a
- safe_pad += pad_iter_b + pad_iter_a
- newmat = _pad_ref(newmat, (pad_before, pad_after), method, axis)
-
- elif mode == 'symmetric':
- for axis, (pad_before, pad_after) in enumerate(pad_width):
- # Recursive padding along any axis where `pad_amt` is too large
- # for indexing tricks. We can only safely pad the original axis
- # length, to keep the period of the reflections consistent.
- method = kwargs['reflect_type']
- safe_pad = newmat.shape[axis]
- while ((pad_before > safe_pad) or
- (pad_after > safe_pad)):
- pad_iter_b = min(safe_pad,
- safe_pad * (pad_before // safe_pad))
- pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
- newmat = _pad_sym(newmat, (pad_iter_b,
- pad_iter_a), method, axis)
- pad_before -= pad_iter_b
- pad_after -= pad_iter_a
- safe_pad += pad_iter_b + pad_iter_a
- newmat = _pad_sym(newmat, (pad_before, pad_after), method, axis)
-
- elif mode == 'wrap':
- for axis, (pad_before, pad_after) in enumerate(pad_width):
- # Recursive padding along any axis where `pad_amt` is too large
- # for indexing tricks. We can only safely pad the original axis
- # length, to keep the period of the reflections consistent.
- safe_pad = newmat.shape[axis]
- while ((pad_before > safe_pad) or
- (pad_after > safe_pad)):
- pad_iter_b = min(safe_pad,
- safe_pad * (pad_before // safe_pad))
- pad_iter_a = min(safe_pad, safe_pad * (pad_after // safe_pad))
- newmat = _pad_wrap(newmat, (pad_iter_b, pad_iter_a), axis)
-
- pad_before -= pad_iter_b
- pad_after -= pad_iter_a
- safe_pad += pad_iter_b + pad_iter_a
- newmat = _pad_wrap(newmat, (pad_before, pad_after), axis)
-
- return newmat
+ roi = _view_roi(padded, original_area_slice, axis)
+ while left_index > 0 or right_index > 0:
+ # Iteratively pad until dimension is filled with reflected
+ # values. This is necessary if the pad area is larger than
+ # the length of the original values in the current dimension.
+ left_index, right_index = _set_reflect_both(
+ roi, axis, (left_index, right_index),
+ method, include_edge
+ )
+
+ elif mode == "wrap":
+ for axis, (left_index, right_index) in zip(axes, pad_width):
+ roi = _view_roi(padded, original_area_slice, axis)
+ while left_index > 0 or right_index > 0:
+ # Iteratively pad until dimension is filled with wrapped
+ # values. This is necessary if the pad area is larger than
+ # the length of the original values in the current dimension.
+ left_index, right_index = _set_wrap_both(
+ roi, axis, (left_index, right_index))
+
+ return padded
diff --git a/numpy/lib/arraysetops.py b/numpy/lib/arraysetops.py
index 62e9b6d50..2309f7e42 100644
--- a/numpy/lib/arraysetops.py
+++ b/numpy/lib/arraysetops.py
@@ -27,7 +27,14 @@ To do: Optionally return indices analogously to unique for all functions.
"""
from __future__ import division, absolute_import, print_function
+import functools
+
import numpy as np
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
__all__ = [
@@ -36,6 +43,11 @@ __all__ = [
]
+def _ediff1d_dispatcher(ary, to_end=None, to_begin=None):
+ return (ary, to_end, to_begin)
+
+
+@array_function_dispatch(_ediff1d_dispatcher)
def ediff1d(ary, to_end=None, to_begin=None):
"""
The differences between consecutive elements of an array.
@@ -70,7 +82,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
array([ 1, 2, 3, -7])
>>> np.ediff1d(x, to_begin=-99, to_end=np.array([88, 99]))
- array([-99, 1, 2, 3, -7, 88, 99])
+ array([-99, 1, 2, ..., -7, 88, 99])
The returned array is always 1D.
@@ -82,8 +94,7 @@ def ediff1d(ary, to_end=None, to_begin=None):
# force a 1d array
ary = np.asanyarray(ary).ravel()
- # we have unit tests enforcing
- # propagation of the dtype of input
+ # enforce propagation of the dtype of input
# ary to returned result
dtype_req = ary.dtype
@@ -94,23 +105,22 @@ def ediff1d(ary, to_end=None, to_begin=None):
if to_begin is None:
l_begin = 0
else:
- to_begin = np.asanyarray(to_begin)
- if not np.can_cast(to_begin, dtype_req):
- raise TypeError("dtype of to_begin must be compatible "
- "with input ary")
-
- to_begin = to_begin.ravel()
+ _to_begin = np.asanyarray(to_begin, dtype=dtype_req)
+ if not np.all(_to_begin == to_begin):
+ raise ValueError("cannot convert 'to_begin' to array with dtype "
+ "'%r' as required for input ary" % dtype_req)
+ to_begin = _to_begin.ravel()
l_begin = len(to_begin)
if to_end is None:
l_end = 0
else:
- to_end = np.asanyarray(to_end)
- if not np.can_cast(to_end, dtype_req):
- raise TypeError("dtype of to_end must be compatible "
- "with input ary")
-
- to_end = to_end.ravel()
+ _to_end = np.asanyarray(to_end, dtype=dtype_req)
+ # check that casting has not overflowed
+ if not np.all(_to_end == to_end):
+ raise ValueError("cannot convert 'to_end' to array with dtype "
+ "'%r' as required for input ary" % dtype_req)
+ to_end = _to_end.ravel()
l_end = len(to_end)
# do the calculation in place and copy to_begin and to_end
@@ -133,6 +143,12 @@ def _unpack_tuple(x):
return x
+def _unique_dispatcher(ar, return_index=None, return_inverse=None,
+ return_counts=None, axis=None):
+ return (ar,)
+
+
+@array_function_dispatch(_unique_dispatcher)
def unique(ar, return_index=False, return_inverse=False,
return_counts=False, axis=None):
"""
@@ -197,6 +213,7 @@ def unique(ar, return_index=False, return_inverse=False,
-----
When an axis is specified the subarrays indexed by the axis are sorted.
This is done by making the specified axis the first dimension of the array
+ (move the axis to the first dimension to keep the order of the other axes)
and then flattening the subarrays in C order. The flattened subarrays are
then viewed as a structured type with each element given a label, with the
effect that we end up with a 1-D array of structured types that can be
@@ -223,13 +240,11 @@ def unique(ar, return_index=False, return_inverse=False,
>>> a = np.array(['a', 'b', 'b', 'c', 'a'])
>>> u, indices = np.unique(a, return_index=True)
>>> u
- array(['a', 'b', 'c'],
- dtype='|S1')
+ array(['a', 'b', 'c'], dtype='<U1')
>>> indices
array([0, 1, 3])
>>> a[indices]
- array(['a', 'b', 'c'],
- dtype='|S1')
+ array(['a', 'b', 'c'], dtype='<U1')
Reconstruct the input array from the unique values:
@@ -238,9 +253,9 @@ def unique(ar, return_index=False, return_inverse=False,
>>> u
array([1, 2, 3, 4, 6])
>>> indices
- array([0, 1, 4, 3, 1, 2, 1])
+ array([0, 1, 4, ..., 1, 2, 1])
>>> u[indices]
- array([1, 2, 6, 4, 2, 3, 2])
+ array([1, 2, 6, ..., 2, 3, 2])
"""
ar = np.asanyarray(ar)
@@ -250,7 +265,7 @@ def unique(ar, return_index=False, return_inverse=False,
# axis was specified and not None
try:
- ar = np.swapaxes(ar, axis, 0)
+ ar = np.moveaxis(ar, axis, 0)
except np.AxisError:
# this removes the "axis1" or "axis2" prefix from the error message
raise np.AxisError(axis, ar.ndim)
@@ -271,7 +286,7 @@ def unique(ar, return_index=False, return_inverse=False,
def reshape_uniq(uniq):
uniq = uniq.view(orig_dtype)
uniq = uniq.reshape(-1, *orig_shape[1:])
- uniq = np.swapaxes(uniq, 0, axis)
+ uniq = np.moveaxis(uniq, 0, axis)
return uniq
output = _unique1d(consolidated, return_index,
@@ -313,6 +328,12 @@ def _unique1d(ar, return_index=False, return_inverse=False,
return ret
+def _intersect1d_dispatcher(
+ ar1, ar2, assume_unique=None, return_indices=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_intersect1d_dispatcher)
def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
"""
Find the intersection of two arrays.
@@ -363,6 +384,7 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
To return the indices of the values common to the input arrays
along with the intersected values:
+
>>> x = np.array([1, 1, 2, 3, 4])
>>> y = np.array([2, 1, 4, 6])
>>> xy, x_ind, y_ind = np.intersect1d(x, y, return_indices=True)
@@ -408,6 +430,11 @@ def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
return int1d
+def _setxor1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setxor1d_dispatcher)
def setxor1d(ar1, ar2, assume_unique=False):
"""
Find the set exclusive-or of two arrays.
@@ -450,6 +477,11 @@ def setxor1d(ar1, ar2, assume_unique=False):
return aux[flag[1:] & flag[:-1]]
+def _in1d_dispatcher(ar1, ar2, assume_unique=None, invert=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_in1d_dispatcher)
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of a 1-D array is also present in a second array.
@@ -562,6 +594,11 @@ def in1d(ar1, ar2, assume_unique=False, invert=False):
return ret[rev_idx]
+def _isin_dispatcher(element, test_elements, assume_unique=None, invert=None):
+ return (element, test_elements)
+
+
+@array_function_dispatch(_isin_dispatcher)
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over `element` only.
@@ -622,8 +659,8 @@ def isin(element, test_elements, assume_unique=False, invert=False):
>>> test_elements = [1, 2, 4, 8]
>>> mask = np.isin(element, test_elements)
>>> mask
- array([[ False, True],
- [ True, False]])
+ array([[False, True],
+ [ True, False]])
>>> element[mask]
array([2, 4])
@@ -637,7 +674,7 @@ def isin(element, test_elements, assume_unique=False, invert=False):
>>> mask = np.isin(element, test_elements, invert=True)
>>> mask
array([[ True, False],
- [ False, True]])
+ [False, True]])
>>> element[mask]
array([0, 6])
@@ -646,20 +683,25 @@ def isin(element, test_elements, assume_unique=False, invert=False):
>>> test_set = {1, 2, 4, 8}
>>> np.isin(element, test_set)
- array([[ False, False],
- [ False, False]])
+ array([[False, False],
+ [False, False]])
Casting the set to a list gives the expected result:
>>> np.isin(element, list(test_set))
- array([[ False, True],
- [ True, False]])
+ array([[False, True],
+ [ True, False]])
"""
element = np.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
+def _union1d_dispatcher(ar1, ar2):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_union1d_dispatcher)
def union1d(ar1, ar2):
"""
Find the union of two arrays.
@@ -695,11 +737,17 @@ def union1d(ar1, ar2):
"""
return unique(np.concatenate((ar1, ar2), axis=None))
+
+def _setdiff1d_dispatcher(ar1, ar2, assume_unique=None):
+ return (ar1, ar2)
+
+
+@array_function_dispatch(_setdiff1d_dispatcher)
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Find the set difference of two arrays.
- Return the sorted, unique values in `ar1` that are not in `ar2`.
+ Return the unique values in `ar1` that are not in `ar2`.
Parameters
----------
@@ -714,7 +762,9 @@ def setdiff1d(ar1, ar2, assume_unique=False):
Returns
-------
setdiff1d : ndarray
- Sorted 1D array of values in `ar1` that are not in `ar2`.
+ 1D array of values in `ar1` that are not in `ar2`. The result
+ is sorted when `assume_unique=False`, but otherwise only sorted
+ if the input is sorted.
See Also
--------
diff --git a/numpy/lib/arrayterator.py b/numpy/lib/arrayterator.py
index f2d4fe9fd..c16668582 100644
--- a/numpy/lib/arrayterator.py
+++ b/numpy/lib/arrayterator.py
@@ -80,9 +80,8 @@ class Arrayterator(object):
>>> for subarr in a_itor:
... if not subarr.all():
- ... print(subarr, subarr.shape)
- ...
- [[[[0 1]]]] (1, 1, 1, 2)
+ ... print(subarr, subarr.shape) # doctest: +SKIP
+ >>> # [[[[0 1]]]] (1, 1, 1, 2)
"""
@@ -160,7 +159,7 @@ class Arrayterator(object):
... if not subarr:
... print(subarr, type(subarr))
...
- 0 <type 'numpy.int32'>
+ 0 <class 'numpy.int64'>
"""
for block in self:
diff --git a/numpy/lib/financial.py b/numpy/lib/financial.py
index 06fa1bd92..d72384e99 100644
--- a/numpy/lib/financial.py
+++ b/numpy/lib/financial.py
@@ -13,8 +13,15 @@ otherwise stated.
from __future__ import division, absolute_import, print_function
from decimal import Decimal
+import functools
import numpy as np
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
__all__ = ['fv', 'pmt', 'nper', 'ipmt', 'ppmt', 'pv', 'rate',
'irr', 'npv', 'mirr']
@@ -36,6 +43,12 @@ def _convert_when(when):
except (KeyError, TypeError):
return [_when_to_num[x] for x in when]
+
+def _fv_dispatcher(rate, nper, pmt, pv, when=None):
+ return (rate, nper, pmt, pv)
+
+
+@array_function_dispatch(_fv_dispatcher)
def fv(rate, nper, pmt, pv, when='end'):
"""
Compute the future value.
@@ -114,7 +127,7 @@ def fv(rate, nper, pmt, pv, when='end'):
>>> a = np.array((0.05, 0.06, 0.07))/12
>>> np.fv(a, 10*12, -100, -100)
- array([ 15692.92889434, 16569.87435405, 17509.44688102])
+ array([ 15692.92889434, 16569.87435405, 17509.44688102]) # may vary
"""
when = _convert_when(when)
@@ -124,6 +137,12 @@ def fv(rate, nper, pmt, pv, when='end'):
(1 + rate*when)*(temp - 1)/rate)
return -(pv*temp + pmt*fact)
+
+def _pmt_dispatcher(rate, nper, pv, fv=None, when=None):
+ return (rate, nper, pv, fv)
+
+
+@array_function_dispatch(_pmt_dispatcher)
def pmt(rate, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal plus interest.
@@ -216,6 +235,12 @@ def pmt(rate, nper, pv, fv=0, when='end'):
(1 + masked_rate*when)*(temp - 1)/masked_rate)
return -(fv + pv*temp) / fact
+
+def _nper_dispatcher(rate, pmt, pv, fv=None, when=None):
+ return (rate, pmt, pv, fv)
+
+
+@array_function_dispatch(_nper_dispatcher)
def nper(rate, pmt, pv, fv=0, when='end'):
"""
Compute the number of periodic payments.
@@ -250,7 +275,7 @@ def nper(rate, pmt, pv, fv=0, when='end'):
If you only had $150/month to pay towards the loan, how long would it take
to pay-off a loan of $8,000 at 7% annual interest?
- >>> print(round(np.nper(0.07/12, -150, 8000), 5))
+ >>> print(np.round(np.nper(0.07/12, -150, 8000), 5))
64.07335
So, over 64 months would be required to pay off the loan.
@@ -261,10 +286,10 @@ def nper(rate, pmt, pv, fv=0, when='end'):
>>> np.nper(*(np.ogrid[0.07/12: 0.08/12: 0.01/12,
... -150 : -99 : 50 ,
... 8000 : 9001 : 1000]))
- array([[[ 64.07334877, 74.06368256],
- [ 108.07548412, 127.99022654]],
- [[ 66.12443902, 76.87897353],
- [ 114.70165583, 137.90124779]]])
+ array([[[ 64.07334877, 74.06368256],
+ [108.07548412, 127.99022654]],
+ [[ 66.12443902, 76.87897353],
+ [114.70165583, 137.90124779]]])
"""
when = _convert_when(when)
@@ -284,6 +309,12 @@ def nper(rate, pmt, pv, fv=0, when='end'):
B = np.log((-fv+z) / (pv+z))/np.log(1+rate)
return np.where(rate == 0, A, B)
+
+def _ipmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ return (rate, per, nper, pv, fv)
+
+
+@array_function_dispatch(_ipmt_dispatcher)
def ipmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the interest portion of a payment.
@@ -379,6 +410,7 @@ def ipmt(rate, per, nper, pv, fv=0, when='end'):
pass
return ipmt
+
def _rbl(rate, per, pmt, pv, when):
"""
This function is here to simply have a different name for the 'fv'
@@ -388,6 +420,12 @@ def _rbl(rate, per, pmt, pv, when):
"""
return fv(rate, (per - 1), pmt, pv, when)
+
+def _ppmt_dispatcher(rate, per, nper, pv, fv=None, when=None):
+ return (rate, per, nper, pv, fv)
+
+
+@array_function_dispatch(_ppmt_dispatcher)
def ppmt(rate, per, nper, pv, fv=0, when='end'):
"""
Compute the payment against loan principal.
@@ -416,6 +454,12 @@ def ppmt(rate, per, nper, pv, fv=0, when='end'):
total = pmt(rate, nper, pv, fv, when)
return total - ipmt(rate, per, nper, pv, fv, when)
+
+def _pv_dispatcher(rate, nper, pmt, fv=None, when=None):
+ return (rate, nper, nper, pv, fv)
+
+
+@array_function_dispatch(_pv_dispatcher)
def pv(rate, nper, pmt, fv=0, when='end'):
"""
Compute the present value.
@@ -495,7 +539,7 @@ def pv(rate, nper, pmt, fv=0, when='end'):
>>> a = np.array((0.05, 0.04, 0.03))/12
>>> np.pv(a, 10*12, -100, 15692.93)
- array([ -100.00067132, -649.26771385, -1273.78633713])
+ array([ -100.00067132, -649.26771385, -1273.78633713]) # may vary
So, to end up with the same $15692.93 under the same $100 per month
"savings plan," for annual interest rates of 4% and 3%, one would
@@ -520,6 +564,12 @@ def _g_div_gp(r, n, p, x, y, w):
(n*t2*x - p*(t1 - 1)*(r*w + 1)/(r**2) + n*p*t2*(r*w + 1)/r +
p*(t1 - 1)*w/r))
+
+def _rate_dispatcher(nper, pmt, pv, fv, when=None, guess=None, tol=None,
+ maxiter=None):
+ return (nper, pmt, pv, fv)
+
+
# Use Newton's iteration until the change is less than 1e-6
# for all values or a maximum of 100 iterations is reached.
# Newton's rule is
@@ -527,6 +577,7 @@ def _g_div_gp(r, n, p, x, y, w):
# where
# g(r) is the formula
# g'(r) is the derivative with respect to r.
+@array_function_dispatch(_rate_dispatcher)
def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
"""
Compute the rate of interest per period.
@@ -598,6 +649,12 @@ def rate(nper, pmt, pv, fv, when='end', guess=None, tol=None, maxiter=100):
else:
return rn
+
+def _irr_dispatcher(values):
+ return (values,)
+
+
+@array_function_dispatch(_irr_dispatcher)
def irr(values):
"""
Return the Internal Rate of Return (IRR).
@@ -647,19 +704,17 @@ def irr(values):
Examples
--------
- >>> round(irr([-100, 39, 59, 55, 20]), 5)
+ >>> round(np.irr([-100, 39, 59, 55, 20]), 5)
0.28095
- >>> round(irr([-100, 0, 0, 74]), 5)
+ >>> round(np.irr([-100, 0, 0, 74]), 5)
-0.0955
- >>> round(irr([-100, 100, 0, -7]), 5)
+ >>> round(np.irr([-100, 100, 0, -7]), 5)
-0.0833
- >>> round(irr([-100, 100, 0, 7]), 5)
+ >>> round(np.irr([-100, 100, 0, 7]), 5)
0.06206
- >>> round(irr([-5, 10.5, 1, -8, 1]), 5)
+ >>> round(np.irr([-5, 10.5, 1, -8, 1]), 5)
0.0886
- (Compare with the Example given for numpy.lib.financial.npv)
-
"""
# `np.roots` call is why this function does not support Decimal type.
#
@@ -677,6 +732,12 @@ def irr(values):
rate = rate.item(np.argmin(np.abs(rate)))
return rate
+
+def _npv_dispatcher(rate, values):
+ return (values,)
+
+
+@array_function_dispatch(_npv_dispatcher)
def npv(rate, values):
"""
Returns the NPV (Net Present Value) of a cash flow series.
@@ -700,6 +761,15 @@ def npv(rate, values):
The NPV of the input cash flow series `values` at the discount
`rate`.
+ Warnings
+ --------
+ ``npv`` considers a series of cashflows starting in the present (t = 0).
+ NPV can also be defined with a series of future cashflows, paid at the
+ end, rather than the start, of each period. If future cashflows are used,
+ the first cashflow `values[0]` must be zeroed and added to the net
+ present value of the future cashflows. This is demonstrated in the
+ examples.
+
Notes
-----
Returns the result of: [G]_
@@ -713,15 +783,35 @@ def npv(rate, values):
Examples
--------
- >>> np.npv(0.281,[-100, 39, 59, 55, 20])
- -0.0084785916384548798
+ Consider a potential project with an initial investment of $40 000 and
+ projected cashflows of $5 000, $8 000, $12 000 and $30 000 at the end of
+ each period discounted at a rate of 8% per period. To find the project's
+ net present value:
+
+ >>> rate, cashflows = 0.08, [-40_000, 5_000, 8_000, 12_000, 30_000]
+ >>> np.npv(rate, cashflows).round(5)
+ 3065.22267
- (Compare with the Example given for numpy.lib.financial.irr)
+ It may be preferable to split the projected cashflow into an initial
+ investment and expected future cashflows. In this case, the value of
+ the initial cashflow is zero and the initial investment is later added
+ to the future cashflows net present value:
+
+ >>> initial_cashflow = cashflows[0]
+ >>> cashflows[0] = 0
+ >>> np.round(np.npv(rate, cashflows) + initial_cashflow, 5)
+ 3065.22267
"""
values = np.asarray(values)
return (values / (1+rate)**np.arange(0, len(values))).sum(axis=0)
+
+def _mirr_dispatcher(values, finance_rate, reinvest_rate):
+ return (values,)
+
+
+@array_function_dispatch(_mirr_dispatcher)
def mirr(values, finance_rate, reinvest_rate):
"""
Modified internal rate of return.
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index ef5ec57e3..1ecd72815 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -146,10 +146,17 @@ The description of the fourth element of the header therefore has become:
"The next 4 bytes form a little-endian unsigned int: the length of the header
data HEADER_LEN."
+Format Version 3.0
+------------------
+
+This version replaces the ASCII string (which in practice was latin1) with
+a utf8-encoded string, so supports structured types with any unicode field
+names.
+
Notes
-----
The ``.npy`` format, including motivation for creating it and a comparison of
-alternatives, is described in the `"npy-format" NEP
+alternatives, is described in the `"npy-format" NEP
<https://www.numpy.org/neps/nep-0001-npy-format.html>`_, however details have
evolved with time and this document is more current.
@@ -161,12 +168,13 @@ import sys
import io
import warnings
from numpy.lib.utils import safe_eval
-from numpy.compat import asbytes, asstr, isfileobj, long, basestring
+from numpy.compat import (
+ isfileobj, long, os_fspath, pickle
+ )
+
+
+__all__ = []
-if sys.version_info[0] >= 3:
- import pickle
-else:
- import cPickle as pickle
MAGIC_PREFIX = b'\x93NUMPY'
MAGIC_LEN = len(MAGIC_PREFIX) + 2
@@ -175,10 +183,16 @@ BUFFER_SIZE = 2**18 # size of buffer for reading npz files in bytes
# difference between version 1.0 and 2.0 is a 4 byte (I) header length
# instead of 2 bytes (H) allowing storage of large structured arrays
+_header_size_info = {
+ (1, 0): ('<H', 'latin1'),
+ (2, 0): ('<I', 'latin1'),
+ (3, 0): ('<I', 'utf8'),
+}
+
def _check_version(version):
- if version not in [(1, 0), (2, 0), None]:
- msg = "we only support format version (1,0) and (2, 0), not %s"
+ if version not in [(1, 0), (2, 0), (3, 0), None]:
+ msg = "we only support format version (1,0), (2,0), and (3,0), not %s"
raise ValueError(msg % (version,))
def magic(major, minor):
@@ -260,6 +274,47 @@ def dtype_to_descr(dtype):
else:
return dtype.str
+def descr_to_dtype(descr):
+ '''
+ descr may be stored as dtype.descr, which is a list of
+ (name, format, [shape]) tuples where format may be a str or a tuple.
+ Offsets are not explicitly saved, rather empty fields with
+ name, format == '', '|Vn' are added as padding.
+
+ This function reverses the process, eliminating the empty padding fields.
+ '''
+ if isinstance(descr, str):
+ # No padding removal needed
+ return numpy.dtype(descr)
+ elif isinstance(descr, tuple):
+ # subtype, will always have a shape descr[1]
+ dt = descr_to_dtype(descr[0])
+ return numpy.dtype((dt, descr[1]))
+ fields = []
+ offset = 0
+ for field in descr:
+ if len(field) == 2:
+ name, descr_str = field
+ dt = descr_to_dtype(descr_str)
+ else:
+ name, descr_str, shape = field
+ dt = numpy.dtype((descr_to_dtype(descr_str), shape))
+
+ # Ignore padding bytes, which will be void bytes with '' as name
+ # Once support for blank names is removed, only "if name == ''" needed)
+ is_pad = (name == '' and dt.type is numpy.void and dt.names is None)
+ if not is_pad:
+ fields.append((name, dt, offset))
+
+ offset += dt.itemsize
+
+ names, formats, offsets = zip(*fields)
+ # names may be (title, names) tuples
+ nametups = (n if isinstance(n, tuple) else (None, n) for n in names)
+ titles, names = zip(*nametups)
+ return numpy.dtype({'names': names, 'formats': formats, 'titles': titles,
+ 'offsets': offsets, 'itemsize': offset})
+
def header_data_from_array_1_0(array):
""" Get the dictionary of header metadata from a numpy.ndarray.
@@ -287,6 +342,56 @@ def header_data_from_array_1_0(array):
d['descr'] = dtype_to_descr(array.dtype)
return d
+
+def _wrap_header(header, version):
+ """
+ Takes a stringified header, and attaches the prefix and padding to it
+ """
+ import struct
+ assert version is not None
+ fmt, encoding = _header_size_info[version]
+ if not isinstance(header, bytes): # always true on python 3
+ header = header.encode(encoding)
+ hlen = len(header) + 1
+ padlen = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize(fmt) + hlen) % ARRAY_ALIGN)
+ try:
+ header_prefix = magic(*version) + struct.pack(fmt, hlen + padlen)
+ except struct.error:
+ msg = "Header length {} too big for version={}".format(hlen, version)
+ raise ValueError(msg)
+
+ # Pad the header with spaces and a final newline such that the magic
+ # string, the header-length short and the header are aligned on a
+ # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
+ # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
+ # offset must be page-aligned (i.e. the beginning of the file).
+ return header_prefix + header + b' '*padlen + b'\n'
+
+
+def _wrap_header_guess_version(header):
+ """
+ Like `_wrap_header`, but chooses an appropriate version given the contents
+ """
+ try:
+ return _wrap_header(header, (1, 0))
+ except ValueError:
+ pass
+
+ try:
+ ret = _wrap_header(header, (2, 0))
+ except UnicodeEncodeError:
+ pass
+ else:
+ warnings.warn("Stored array in format 2.0. It can only be"
+ "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ return ret
+
+ header = _wrap_header(header, (3, 0))
+ warnings.warn("Stored array in format 3.0. It can only be "
+ "read by NumPy >= 1.17", UserWarning, stacklevel=2)
+ return header
+
+
def _write_array_header(fp, d, version=None):
""" Write the header for an array and returns the version used
@@ -300,48 +405,19 @@ def _write_array_header(fp, d, version=None):
None means use oldest that works
explicit version will raise a ValueError if the format does not
allow saving this data. Default: None
- Returns
- -------
- version : tuple of int
- the file version which needs to be used to store the data
"""
- import struct
header = ["{"]
for key, value in sorted(d.items()):
# Need to use repr here, since we eval these when reading
header.append("'%s': %s, " % (key, repr(value)))
header.append("}")
header = "".join(header)
- header = asbytes(_filter_header(header))
-
- hlen = len(header) + 1 # 1 for newline
- padlen_v1 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<H') + hlen) % ARRAY_ALIGN)
- padlen_v2 = ARRAY_ALIGN - ((MAGIC_LEN + struct.calcsize('<I') + hlen) % ARRAY_ALIGN)
-
- # Which version(s) we write depends on the total header size; v1 has a max of 65535
- if hlen + padlen_v1 < 2**16 and version in (None, (1, 0)):
- version = (1, 0)
- header_prefix = magic(1, 0) + struct.pack('<H', hlen + padlen_v1)
- topad = padlen_v1
- elif hlen + padlen_v2 < 2**32 and version in (None, (2, 0)):
- version = (2, 0)
- header_prefix = magic(2, 0) + struct.pack('<I', hlen + padlen_v2)
- topad = padlen_v2
+ header = _filter_header(header)
+ if version is None:
+ header = _wrap_header_guess_version(header)
else:
- msg = "Header length %s too big for version=%s"
- msg %= (hlen, version)
- raise ValueError(msg)
-
- # Pad the header with spaces and a final newline such that the magic
- # string, the header-length short and the header are aligned on a
- # ARRAY_ALIGN byte boundary. This supports memory mapping of dtypes
- # aligned up to ARRAY_ALIGN on systems like Linux where mmap()
- # offset must be page-aligned (i.e. the beginning of the file).
- header = header + b' '*topad + b'\n'
-
- fp.write(header_prefix)
+ header = _wrap_header(header, version)
fp.write(header)
- return version
def write_array_header_1_0(fp, d):
""" Write the header for an array using the 1.0 format.
@@ -444,7 +520,7 @@ def _filter_header(s):
Parameters
----------
- s : byte string
+ s : string
Npy file header.
Returns
@@ -462,7 +538,7 @@ def _filter_header(s):
tokens = []
last_token_was_number = False
# adding newline as python 2.7.5 workaround
- string = asstr(s) + "\n"
+ string = s + "\n"
for token in tokenize.generate_tokens(StringIO(string).readline):
token_type = token[0]
token_string = token[1]
@@ -484,16 +560,15 @@ def _read_array_header(fp, version):
# Read an unsigned, little-endian short int which has the length of the
# header.
import struct
- if version == (1, 0):
- hlength_type = '<H'
- elif version == (2, 0):
- hlength_type = '<I'
- else:
- raise ValueError("Invalid version %r" % version)
+ hinfo = _header_size_info.get(version)
+ if hinfo is None:
+ raise ValueError("Invalid version {!r}".format(version))
+ hlength_type, encoding = hinfo
hlength_str = _read_bytes(fp, struct.calcsize(hlength_type), "array header length")
header_length = struct.unpack(hlength_type, hlength_str)[0]
header = _read_bytes(fp, header_length, "array header")
+ header = header.decode(encoding)
# The header is a pretty-printed string representation of a literal
# Python dictionary with trailing newlines padded to a ARRAY_ALIGN byte
@@ -505,29 +580,29 @@ def _read_array_header(fp, version):
try:
d = safe_eval(header)
except SyntaxError as e:
- msg = "Cannot parse header: %r\nException: %r"
- raise ValueError(msg % (header, e))
+ msg = "Cannot parse header: {!r}\nException: {!r}"
+ raise ValueError(msg.format(header, e))
if not isinstance(d, dict):
- msg = "Header is not a dictionary: %r"
- raise ValueError(msg % d)
+ msg = "Header is not a dictionary: {!r}"
+ raise ValueError(msg.format(d))
keys = sorted(d.keys())
if keys != ['descr', 'fortran_order', 'shape']:
- msg = "Header does not contain the correct keys: %r"
- raise ValueError(msg % (keys,))
+ msg = "Header does not contain the correct keys: {!r}"
+ raise ValueError(msg.format(keys))
# Sanity-check the values.
if (not isinstance(d['shape'], tuple) or
not numpy.all([isinstance(x, (int, long)) for x in d['shape']])):
- msg = "shape is not valid: %r"
- raise ValueError(msg % (d['shape'],))
+ msg = "shape is not valid: {!r}"
+ raise ValueError(msg.format(d['shape']))
if not isinstance(d['fortran_order'], bool):
- msg = "fortran_order is not a valid bool: %r"
- raise ValueError(msg % (d['fortran_order'],))
+ msg = "fortran_order is not a valid bool: {!r}"
+ raise ValueError(msg.format(d['fortran_order']))
try:
- dtype = numpy.dtype(d['descr'])
+ dtype = descr_to_dtype(d['descr'])
except TypeError as e:
- msg = "descr is not a valid dtype descriptor: %r"
- raise ValueError(msg % (d['descr'],))
+ msg = "descr is not a valid dtype descriptor: {!r}"
+ raise ValueError(msg.format(d['descr']))
return d['shape'], d['fortran_order'], dtype
@@ -568,12 +643,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
"""
_check_version(version)
- used_ver = _write_array_header(fp, header_data_from_array_1_0(array),
- version)
- # this warning can be removed when 1.9 has aged enough
- if version != (2, 0) and used_ver == (2, 0):
- warnings.warn("Stored array in format 2.0. It can only be"
- "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ _write_array_header(fp, header_data_from_array_1_0(array), version)
if array.itemsize == 0:
buffersize = 0
@@ -583,14 +653,13 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
if array.dtype.hasobject:
# We contain Python objects so we cannot write out the data
- # directly. Instead, we will pickle it out with version 2 of the
- # pickle protocol.
+ # directly. Instead, we will pickle it out
if not allow_pickle:
raise ValueError("Object arrays cannot be saved when "
"allow_pickle=False")
if pickle_kwargs is None:
pickle_kwargs = {}
- pickle.dump(array, fp, protocol=2, **pickle_kwargs)
+ pickle.dump(array, fp, protocol=3, **pickle_kwargs)
elif array.flags.f_contiguous and not array.flags.c_contiguous:
if isfileobj(fp):
array.T.tofile(fp)
@@ -609,7 +678,7 @@ def write_array(fp, array, version=None, allow_pickle=True, pickle_kwargs=None):
fp.write(chunk.tobytes('C'))
-def read_array(fp, allow_pickle=True, pickle_kwargs=None):
+def read_array(fp, allow_pickle=False, pickle_kwargs=None):
"""
Read an array from an NPY file.
@@ -619,7 +688,11 @@ def read_array(fp, allow_pickle=True, pickle_kwargs=None):
If this is not a real file object, then this may take extra memory
and time.
allow_pickle : bool, optional
- Whether to allow reading pickled data. Default: True
+ Whether to allow writing pickled data. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
pickle_kwargs : dict
Additional keyword arguments to pass to pickle.load. These are only
useful when loading object arrays saved on Python 2 when using
@@ -709,7 +782,7 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
Parameters
----------
- filename : str
+ filename : str or path-like
The name of the file on disk. This may *not* be a file-like
object.
mode : str, optional
@@ -750,9 +823,9 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
memmap
"""
- if not isinstance(filename, basestring):
- raise ValueError("Filename must be a string. Memmap cannot use"
- " existing file handles.")
+ if isfileobj(filename):
+ raise ValueError("Filename must be a string or a path-like object."
+ " Memmap cannot use existing file handles.")
if 'w' in mode:
# We are creating the file, not reading it.
@@ -770,20 +843,12 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
shape=shape,
)
# If we got here, then it should be safe to create the file.
- fp = open(filename, mode+'b')
- try:
- used_ver = _write_array_header(fp, d, version)
- # this warning can be removed when 1.9 has aged enough
- if version != (2, 0) and used_ver == (2, 0):
- warnings.warn("Stored array in format 2.0. It can only be"
- "read by NumPy >= 1.9", UserWarning, stacklevel=2)
+ with open(os_fspath(filename), mode+'b') as fp:
+ _write_array_header(fp, d, version)
offset = fp.tell()
- finally:
- fp.close()
else:
# Read the header of the file first.
- fp = open(filename, 'rb')
- try:
+ with open(os_fspath(filename), 'rb') as fp:
version = read_magic(fp)
_check_version(version)
@@ -792,8 +857,6 @@ def open_memmap(filename, mode='r+', dtype=None, shape=None,
msg = "Array can't be memory-mapped: Python objects in dtype."
raise ValueError(msg)
offset = fp.tell()
- finally:
- fp.close()
if fortran_order:
order = 'F'
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index e2a8f4bc2..c39c2eea1 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -1,11 +1,12 @@
from __future__ import division, absolute_import, print_function
try:
- # Accessing collections abstact classes from collections
+ # Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc
+import functools
import re
import sys
import warnings
@@ -26,9 +27,10 @@ from numpy.core.fromnumeric import (
ravel, nonzero, partition, mean, any, sum
)
from numpy.core.numerictypes import typecodes
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.core.function_base import add_newdoc
from numpy.lib.twodim_base import diag
-from .utils import deprecate
from numpy.core.multiarray import (
_insert, add_docstring, bincount, normalize_axis_index, _monotonicity,
interp as compiled_interp, interp_complex as compiled_interp_complex
@@ -43,6 +45,11 @@ if sys.version_info[0] < 3:
else:
import builtins
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
# needed in this module for compatibility
from numpy.lib.histograms import histogram, histogramdd
@@ -58,6 +65,11 @@ __all__ = [
]
+def _rot90_dispatcher(m, k=None, axes=None):
+ return (m,)
+
+
+@array_function_dispatch(_rot90_dispatcher)
def rot90(m, k=1, axes=(0,1)):
"""
Rotate an array by 90 degrees in the plane specified by axes.
@@ -144,6 +156,11 @@ def rot90(m, k=1, axes=(0,1)):
return flip(transpose(m, axes_list), axes[1])
+def _flip_dispatcher(m, axis=None):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
def flip(m, axis=None):
"""
Reverse the order of elements in an array along the given axis.
@@ -200,12 +217,12 @@ def flip(m, axis=None):
[2, 3]],
[[4, 5],
[6, 7]]])
- >>> flip(A, 0)
+ >>> np.flip(A, 0)
array([[[4, 5],
[6, 7]],
[[0, 1],
[2, 3]]])
- >>> flip(A, 1)
+ >>> np.flip(A, 1)
array([[[2, 3],
[0, 1]],
[[6, 7],
@@ -221,7 +238,7 @@ def flip(m, axis=None):
[[1, 0],
[3, 2]]])
>>> A = np.random.randn(3,4,5)
- >>> np.all(flip(A,2) == A[:,:,::-1,...])
+ >>> np.all(np.flip(A,2) == A[:,:,::-1,...])
True
"""
if not hasattr(m, 'ndim'):
@@ -237,6 +254,7 @@ def flip(m, axis=None):
return m[indexer]
+@set_module('numpy')
def iterable(y):
"""
Check whether or not an object can be iterated over.
@@ -268,6 +286,11 @@ def iterable(y):
return True
+def _average_dispatcher(a, axis=None, weights=None, returned=None):
+ return (a, weights)
+
+
+@array_function_dispatch(_average_dispatcher)
def average(a, axis=None, weights=None, returned=False):
"""
Compute the weighted average along the specified axis.
@@ -293,14 +316,17 @@ def average(a, axis=None, weights=None, returned=False):
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If `weights=None`, then all data in `a` are assumed to have a
- weight equal to one.
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`)
is returned, otherwise only the average is returned.
If `weights=None`, `sum_of_weights` is equivalent to the number of
elements over which the average is taken.
-
Returns
-------
retval, [sum_of_weights] : array_type or double
@@ -335,12 +361,12 @@ def average(a, axis=None, weights=None, returned=False):
Examples
--------
- >>> data = range(1,5)
+ >>> data = np.arange(1, 5)
>>> data
- [1, 2, 3, 4]
+ array([1, 2, 3, 4])
>>> np.average(data)
2.5
- >>> np.average(range(1,11), weights=range(10,0,-1))
+ >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1))
4.0
>>> data = np.arange(6).reshape((3,2))
@@ -349,13 +375,12 @@ def average(a, axis=None, weights=None, returned=False):
[2, 3],
[4, 5]])
>>> np.average(data, axis=1, weights=[1./4, 3./4])
- array([ 0.75, 2.75, 4.75])
+ array([0.75, 2.75, 4.75])
>>> np.average(data, weights=[1./4, 3./4])
-
Traceback (most recent call last):
- ...
+ ...
TypeError: Axis must be specified when shapes of a and weights differ.
-
+
>>> a = np.ones(5, dtype=np.float128)
>>> w = np.ones(5, dtype=np.complex64)
>>> avg = np.average(a, weights=w)
@@ -407,6 +432,7 @@ def average(a, axis=None, weights=None, returned=False):
return avg
+@set_module('numpy')
def asarray_chkfinite(a, dtype=None, order=None):
"""Convert the input to an array, checking for NaNs or Infs.
@@ -474,6 +500,15 @@ def asarray_chkfinite(a, dtype=None, order=None):
return a
+def _piecewise_dispatcher(x, condlist, funclist, *args, **kw):
+ yield x
+ # support the undocumented behavior of allowing scalars
+ if np.iterable(condlist):
+ for c in condlist:
+ yield c
+
+
+@array_function_dispatch(_piecewise_dispatcher)
def piecewise(x, condlist, funclist, *args, **kw):
"""
Evaluate a piecewise-defined function.
@@ -552,7 +587,7 @@ def piecewise(x, condlist, funclist, *args, **kw):
``x >= 0``.
>>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x])
- array([ 2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
+ array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5])
Apply the same function to a scalar value.
@@ -595,6 +630,14 @@ def piecewise(x, condlist, funclist, *args, **kw):
return y
+def _select_dispatcher(condlist, choicelist, default=None):
+ for c in condlist:
+ yield c
+ for c in choicelist:
+ yield c
+
+
+@array_function_dispatch(_select_dispatcher)
def select(condlist, choicelist, default=0):
"""
Return an array drawn from elements in choicelist, depending on conditions.
@@ -629,7 +672,7 @@ def select(condlist, choicelist, default=0):
>>> condlist = [x<3, x>5]
>>> choicelist = [x, x**2]
>>> np.select(condlist, choicelist)
- array([ 0, 1, 2, 0, 0, 0, 36, 49, 64, 81])
+ array([ 0, 1, 2, ..., 49, 64, 81])
"""
# Check the size of condlist and choicelist are the same, or abort.
@@ -639,11 +682,7 @@ def select(condlist, choicelist, default=0):
# Now that the dtype is known, handle the deprecated select([], []) case
if len(condlist) == 0:
- # 2014-02-24, 1.9
- warnings.warn("select with an empty condition list is not possible"
- "and will be deprecated",
- DeprecationWarning, stacklevel=2)
- return np.asarray(default)[()]
+ raise ValueError("select with an empty condition list is not possible")
choicelist = [np.asarray(choice) for choice in choicelist]
choicelist.append(np.asarray(default))
@@ -659,25 +698,11 @@ def select(condlist, choicelist, default=0):
choicelist = np.broadcast_arrays(*choicelist)
# If cond array is not an ndarray in boolean format or scalar bool, abort.
- deprecated_ints = False
for i in range(len(condlist)):
cond = condlist[i]
if cond.dtype.type is not np.bool_:
- if np.issubdtype(cond.dtype, np.integer):
- # A previous implementation accepted int ndarrays accidentally.
- # Supported here deliberately, but deprecated.
- condlist[i] = condlist[i].astype(bool)
- deprecated_ints = True
- else:
- raise ValueError(
- 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
-
- if deprecated_ints:
- # 2014-02-24, 1.9
- msg = "select condlists containing integer ndarrays is deprecated " \
- "and will be removed in the future. Use `.astype(bool)` to " \
- "convert to bools."
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ raise TypeError(
+ 'invalid entry {} in condlist: should be boolean ndarray'.format(i))
if choicelist[0].ndim == 0:
# This may be common, so avoid the call.
@@ -698,6 +723,11 @@ def select(condlist, choicelist, default=0):
return result
+def _copy_dispatcher(a, order=None):
+ return (a,)
+
+
+@array_function_dispatch(_copy_dispatcher)
def copy(a, order='K'):
"""
Return an array copy of the given object.
@@ -747,6 +777,13 @@ def copy(a, order='K'):
# Basic operations
+def _gradient_dispatcher(f, *varargs, **kwargs):
+ yield f
+ for v in varargs:
+ yield v
+
+
+@array_function_dispatch(_gradient_dispatcher)
def gradient(f, *varargs, **kwargs):
"""
Return the gradient of an N-dimensional array.
@@ -800,9 +837,9 @@ def gradient(f, *varargs, **kwargs):
--------
>>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float)
>>> np.gradient(f)
- array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+ array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])
>>> np.gradient(f, 2)
- array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
+ array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ])
Spacing can be also specified with an array that represents the coordinates
of the values F along the dimensions.
@@ -810,13 +847,13 @@ def gradient(f, *varargs, **kwargs):
>>> x = np.arange(f.size)
>>> np.gradient(f, x)
- array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ])
+ array([1. , 1.5, 2.5, 3.5, 4.5, 5. ])
Or a non uniform one:
>>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float)
>>> np.gradient(f, x)
- array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5])
+ array([1. , 3. , 3.5, 6.7, 6.9, 2.5])
For two dimensional arrays, the return will be two arrays ordered by
axis. In this example the first array stands for the gradient in
@@ -824,8 +861,8 @@ def gradient(f, *varargs, **kwargs):
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float))
[array([[ 2., 2., -1.],
- [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ],
- [ 1. , 1. , 1. ]])]
+ [ 2., 2., -1.]]), array([[1. , 2.5, 4. ],
+ [1. , 1. , 1. ]])]
In this example the spacing is also specified:
uniform for axis=0 and non uniform for axis=1
@@ -834,17 +871,17 @@ def gradient(f, *varargs, **kwargs):
>>> y = [1., 1.5, 3.5]
>>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y)
[array([[ 1. , 1. , -0.5],
- [ 1. , 1. , -0.5]]), array([[ 2. , 2. , 2. ],
- [ 2. , 1.7, 0.5]])]
+ [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ],
+ [2. , 1.7, 0.5]])]
It is possible to specify how boundaries are treated using `edge_order`
>>> x = np.array([0, 1, 2, 3, 4])
>>> f = x**2
>>> np.gradient(f, edge_order=1)
- array([ 1., 2., 4., 6., 7.])
+ array([1., 2., 4., 6., 7.])
>>> np.gradient(f, edge_order=2)
- array([-0., 2., 4., 6., 8.])
+ array([0., 2., 4., 6., 8.])
The `axis` keyword can be used to specify a subset of axes of which the
gradient is calculated
@@ -1088,11 +1125,16 @@ def gradient(f, *varargs, **kwargs):
return outvals
+def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None):
+ return (a, prepend, append)
+
+
+@array_function_dispatch(_diff_dispatcher)
def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
"""
Calculate the n-th discrete difference along the given axis.
- The first difference is given by ``out[n] = a[n+1] - a[n]`` along
+ The first difference is given by ``out[i] = a[i+1] - a[i]`` along
the given axis, higher differences are calculated by using `diff`
recursively.
@@ -1107,11 +1149,13 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
The axis along which the difference is taken, default is the
last axis.
prepend, append : array_like, optional
- Values to prepend or append to "a" along axis prior to
+ Values to prepend or append to `a` along axis prior to
performing the difference. Scalar values are expanded to
arrays with length 1 in the direction of axis and the shape
of the input array in along all other axes. Otherwise the
- dimension and shape must match "a" except along axis.
+ dimension and shape must match `a` except along axis.
+
+ .. versionadded:: 1.16.0
Returns
-------
@@ -1141,7 +1185,7 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
>>> np.diff(u8_arr)
array([255], dtype=uint8)
>>> u8_arr[1,...] - u8_arr[0,...]
- array(255, np.uint8)
+ 255
If this is not desirable, then the array should be cast to a larger
integer type first:
@@ -1178,6 +1222,8 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
a = asanyarray(a)
nd = a.ndim
+ if nd == 0:
+ raise ValueError("diff requires input that is at least one dimensional")
axis = normalize_axis_index(axis, nd)
combined = []
@@ -1216,6 +1262,11 @@ def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue):
return a
+def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None):
+ return (x, xp, fp)
+
+
+@array_function_dispatch(_interp_dispatcher)
def interp(x, xp, fp, left=None, right=None, period=None):
"""
One-dimensional linear interpolation.
@@ -1263,9 +1314,13 @@ def interp(x, xp, fp, left=None, right=None, period=None):
Notes
-----
- Does not check that the x-coordinate sequence `xp` is increasing.
- If `xp` is not increasing, the results are nonsense.
- A simple check for increasing is::
+ The x-coordinate sequence is expected to be increasing, but this is not
+ explicitly enforced. However, if the sequence `xp` is non-increasing,
+ interpolation results are meaningless.
+
+ Note that, since NaN is unsortable, `xp` also cannot contain NaNs.
+
+ A simple check for `xp` being strictly increasing is::
np.all(np.diff(xp) > 0)
@@ -1276,7 +1331,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
>>> np.interp(2.5, xp, fp)
1.0
>>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp)
- array([ 3. , 3. , 2.5 , 0.56, 0. ])
+ array([3. , 3. , 2.5 , 0.56, 0. ])
>>> UNDEF = -99.0
>>> np.interp(3.14, xp, fp, right=UNDEF)
-99.0
@@ -1300,7 +1355,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
>>> xp = [190, -190, 350, -350]
>>> fp = [5, 10, 3, 4]
>>> np.interp(x, xp, fp, period=360)
- array([7.5, 5., 8.75, 6.25, 3., 3.25, 3.5, 3.75])
+ array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75])
Complex interpolation:
@@ -1308,7 +1363,7 @@ def interp(x, xp, fp, left=None, right=None, period=None):
>>> xp = [2,3,5]
>>> fp = [1.0j, 0, 2+3j]
>>> np.interp(x, xp, fp)
- array([ 0.+1.j , 1.+1.5j])
+ array([0.+1.j , 1.+1.5j])
"""
@@ -1348,6 +1403,11 @@ def interp(x, xp, fp, left=None, right=None, period=None):
return interp_func(x, xp, fp, left, right)
+def _angle_dispatcher(z, deg=None):
+ return (z,)
+
+
+@array_function_dispatch(_angle_dispatcher)
def angle(z, deg=False):
"""
Return the angle of the complex argument.
@@ -1362,9 +1422,9 @@ def angle(z, deg=False):
Returns
-------
angle : ndarray or scalar
- The counterclockwise angle from the positive real axis on
- the complex plane, with dtype as numpy.float64.
-
+ The counterclockwise angle from the positive real axis on the complex
+ plane in the range ``(-pi, pi]``, with dtype as numpy.float64.
+
..versionchanged:: 1.16.0
This function works on subclasses of ndarray like `ma.array`.
@@ -1376,7 +1436,7 @@ def angle(z, deg=False):
Examples
--------
>>> np.angle([1.0, 1.0j, 1+1j]) # in radians
- array([ 0. , 1.57079633, 0.78539816])
+ array([ 0. , 1.57079633, 0.78539816]) # may vary
>>> np.angle(1+1j, deg=True) # in degrees
45.0
@@ -1395,6 +1455,11 @@ def angle(z, deg=False):
return a
+def _unwrap_dispatcher(p, discont=None, axis=None):
+ return (p,)
+
+
+@array_function_dispatch(_unwrap_dispatcher)
def unwrap(p, discont=pi, axis=-1):
"""
Unwrap by changing deltas between values to 2*pi complement.
@@ -1431,9 +1496,9 @@ def unwrap(p, discont=pi, axis=-1):
>>> phase = np.linspace(0, np.pi, num=5)
>>> phase[3:] += np.pi
>>> phase
- array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531])
+ array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary
>>> np.unwrap(phase)
- array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ])
+ array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary
"""
p = asarray(p)
@@ -1451,6 +1516,11 @@ def unwrap(p, discont=pi, axis=-1):
return up
+def _sort_complex(a):
+ return (a,)
+
+
+@array_function_dispatch(_sort_complex)
def sort_complex(a):
"""
Sort a complex array using the real part first, then the imaginary part.
@@ -1468,10 +1538,10 @@ def sort_complex(a):
Examples
--------
>>> np.sort_complex([5, 3, 6, 2, 1])
- array([ 1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
+ array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j])
>>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j])
- array([ 1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
+ array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j])
"""
b = array(a, copy=True)
@@ -1487,6 +1557,11 @@ def sort_complex(a):
return b
+def _trim_zeros(filt, trim=None):
+ return (filt,)
+
+
+@array_function_dispatch(_trim_zeros)
def trim_zeros(filt, trim='fb'):
"""
Trim the leading and/or trailing zeros from a 1-D array or sequence.
@@ -1512,7 +1587,7 @@ def trim_zeros(filt, trim='fb'):
array([1, 2, 3, 0, 2, 1])
>>> np.trim_zeros(a, 'b')
- array([0, 0, 0, 1, 2, 3, 0, 2, 1])
+ array([0, 0, 0, ..., 0, 2, 1])
The input data type is preserved, list/tuple in means list/tuple out.
@@ -1537,25 +1612,11 @@ def trim_zeros(filt, trim='fb'):
last = last - 1
return filt[first:last]
-
-@deprecate
-def unique(x):
- """
- This function is deprecated. Use numpy.lib.arraysetops.unique()
- instead.
- """
- try:
- tmp = x.flatten()
- if tmp.size == 0:
- return tmp
- tmp.sort()
- idx = concatenate(([True], tmp[1:] != tmp[:-1]))
- return tmp[idx]
- except AttributeError:
- items = sorted(set(x))
- return asarray(items)
+def _extract_dispatcher(condition, arr):
+ return (condition, arr)
+@array_function_dispatch(_extract_dispatcher)
def extract(condition, arr):
"""
Return the elements of an array that satisfy some condition.
@@ -1607,6 +1668,11 @@ def extract(condition, arr):
return _nx.take(ravel(arr), nonzero(ravel(condition))[0])
+def _place_dispatcher(arr, mask, vals):
+ return (arr, mask, vals)
+
+
+@array_function_dispatch(_place_dispatcher)
def place(arr, mask, vals):
"""
Change elements of an array based on conditional and input values.
@@ -1800,6 +1866,7 @@ def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes):
return arrays
+@set_module('numpy')
class vectorize(object):
"""
vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False,
@@ -1855,6 +1922,30 @@ class vectorize(object):
vectorized : callable
Vectorized function.
+ See Also
+ --------
+ frompyfunc : Takes an arbitrary Python function and returns a ufunc
+
+ Notes
+ -----
+ The `vectorize` function is provided primarily for convenience, not for
+ performance. The implementation is essentially a for loop.
+
+ If `otypes` is not specified, then a call to the function with the
+ first argument will be used to determine the number of outputs. The
+ results of this call will be cached if `cache` is `True` to prevent
+ calling the function twice. However, to implement the cache, the
+ original function must be wrapped which will slow down subsequent
+ calls, so only do this if your function is expensive.
+
+ The new keyword argument interface and `excluded` argument support
+ further degrades performance.
+
+ References
+ ----------
+ .. [1] NumPy Reference, section `Generalized Universal Function API
+ <https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
+
Examples
--------
>>> def myfunc(a, b):
@@ -1882,11 +1973,11 @@ class vectorize(object):
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
- <type 'numpy.int32'>
+ <class 'numpy.int64'>
>>> vfunc = np.vectorize(myfunc, otypes=[float])
>>> out = vfunc([1, 2, 3, 4], 2)
>>> type(out[0])
- <type 'numpy.float64'>
+ <class 'numpy.float64'>
The `excluded` argument can be used to prevent vectorizing over certain
arguments. This can be useful for array-like arguments of a fixed length
@@ -1914,7 +2005,7 @@ class vectorize(object):
>>> import scipy.stats
>>> pearsonr = np.vectorize(scipy.stats.pearsonr,
- ... signature='(n),(n)->(),()')
+ ... signature='(n),(n)->(),()')
>>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]])
(array([ 1., -1.]), array([ 0., 0.]))
@@ -1922,36 +2013,12 @@ class vectorize(object):
>>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)')
>>> convolve(np.eye(4), [1, 2, 1])
- array([[ 1., 2., 1., 0., 0., 0.],
- [ 0., 1., 2., 1., 0., 0.],
- [ 0., 0., 1., 2., 1., 0.],
- [ 0., 0., 0., 1., 2., 1.]])
-
- See Also
- --------
- frompyfunc : Takes an arbitrary Python function and returns a ufunc
-
- Notes
- -----
- The `vectorize` function is provided primarily for convenience, not for
- performance. The implementation is essentially a for loop.
+ array([[1., 2., 1., 0., 0., 0.],
+ [0., 1., 2., 1., 0., 0.],
+ [0., 0., 1., 2., 1., 0.],
+ [0., 0., 0., 1., 2., 1.]])
- If `otypes` is not specified, then a call to the function with the
- first argument will be used to determine the number of outputs. The
- results of this call will be cached if `cache` is `True` to prevent
- calling the function twice. However, to implement the cache, the
- original function must be wrapped which will slow down subsequent
- calls, so only do this if your function is expensive.
-
- The new keyword argument interface and `excluded` argument support
- further degrades performance.
-
- References
- ----------
- .. [1] NumPy Reference, section `Generalized Universal Function API
- <https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html>`_.
"""
-
def __init__(self, pyfunc, otypes=None, doc=None, excluded=None,
cache=False, signature=None):
self.pyfunc = pyfunc
@@ -2161,6 +2228,12 @@ class vectorize(object):
return outputs[0] if nout == 1 else outputs
+def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None,
+ fweights=None, aweights=None):
+ return (m, y, fweights, aweights)
+
+
+@array_function_dispatch(_cov_dispatcher)
def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
aweights=None):
"""
@@ -2229,10 +2302,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The
steps to compute the weighted covariance are as follows::
+ >>> m = np.arange(10, dtype=np.float64)
+ >>> f = np.arange(10) * 2
+ >>> a = np.arange(10) ** 2.
+ >>> ddof = 9 # N - 1
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
- >>> m -= np.sum(m * w, axis=1, keepdims=True) / v1
+ >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1
>>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2)
Note that when ``a == 1``, the normalization factor
@@ -2264,14 +2341,14 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
>>> x = [-2.1, -1, 4.3]
>>> y = [3, 1.1, 0.12]
>>> X = np.stack((x, y), axis=0)
- >>> print(np.cov(X))
- [[ 11.71 -4.286 ]
- [ -4.286 2.14413333]]
- >>> print(np.cov(x, y))
- [[ 11.71 -4.286 ]
- [ -4.286 2.14413333]]
- >>> print(np.cov(x))
- 11.71
+ >>> np.cov(X)
+ array([[11.71 , -4.286 ], # may vary
+ [-4.286 , 2.144133]])
+ >>> np.cov(x, y)
+ array([[11.71 , -4.286 ], # may vary
+ [-4.286 , 2.144133]])
+ >>> np.cov(x)
+ array(11.71)
"""
# Check inputs
@@ -2357,7 +2434,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
if fact <= 0:
warnings.warn("Degrees of freedom <= 0 for slice",
- RuntimeWarning, stacklevel=2)
+ RuntimeWarning, stacklevel=3)
fact = 0.0
X -= avg[:, None]
@@ -2370,6 +2447,11 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
return c.squeeze()
+def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None):
+ return (x, y)
+
+
+@array_function_dispatch(_corrcoef_dispatcher)
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
@@ -2431,7 +2513,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn('bias and ddof have no effect and are deprecated',
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
c = cov(x, y, rowvar)
try:
d = diag(c)
@@ -2453,6 +2535,7 @@ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue):
return c
+@set_module('numpy')
def blackman(M):
"""
Return the Blackman window.
@@ -2502,12 +2585,12 @@ def blackman(M):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> np.blackman(12)
- array([ -1.38777878e-17, 3.26064346e-02, 1.59903635e-01,
- 4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
- 9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
- 1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
-
+ array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary
+ 4.14397981e-01, 7.36045180e-01, 9.67046769e-01,
+ 9.67046769e-01, 7.36045180e-01, 4.14397981e-01,
+ 1.59903635e-01, 3.26064346e-02, -1.38777878e-17])
Plot the window and the frequency response:
@@ -2516,30 +2599,31 @@ def blackman(M):
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Blackman window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Blackman window')
>>> plt.ylabel("Amplitude")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Sample')
>>> plt.show()
>>> plt.figure()
- <matplotlib.figure.Figure object at 0x...>
+ <Figure size 640x480 with 0 Axes>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
- >>> response = 20 * np.log10(mag)
+ >>> with np.errstate(divide='ignore', invalid='ignore'):
+ ... response = 20 * np.log10(mag)
+ ...
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Blackman window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Frequency response of Blackman window')
>>> plt.ylabel("Magnitude [dB]")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Magnitude [dB]')
>>> plt.xlabel("Normalized frequency [cycles per sample]")
- <matplotlib.text.Text object at 0x...>
- >>> plt.axis('tight')
- (-0.5, 0.5, -100.0, ...)
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> _ = plt.axis('tight')
>>> plt.show()
"""
@@ -2551,6 +2635,7 @@ def blackman(M):
return 0.42 - 0.5*cos(2.0*pi*n/(M-1)) + 0.08*cos(4.0*pi*n/(M-1))
+@set_module('numpy')
def bartlett(M):
"""
Return the Bartlett window.
@@ -2610,8 +2695,9 @@ def bartlett(M):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> np.bartlett(12)
- array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273,
+ array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary
0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636,
0.18181818, 0. ])
@@ -2622,30 +2708,31 @@ def bartlett(M):
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Bartlett window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Bartlett window')
>>> plt.ylabel("Amplitude")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Sample')
>>> plt.show()
>>> plt.figure()
- <matplotlib.figure.Figure object at 0x...>
+ <Figure size 640x480 with 0 Axes>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
- >>> response = 20 * np.log10(mag)
+ >>> with np.errstate(divide='ignore', invalid='ignore'):
+ ... response = 20 * np.log10(mag)
+ ...
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Bartlett window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Frequency response of Bartlett window')
>>> plt.ylabel("Magnitude [dB]")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Magnitude [dB]')
>>> plt.xlabel("Normalized frequency [cycles per sample]")
- <matplotlib.text.Text object at 0x...>
- >>> plt.axis('tight')
- (-0.5, 0.5, -100.0, ...)
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
+ >>> _ = plt.axis('tight')
>>> plt.show()
"""
@@ -2657,6 +2744,7 @@ def bartlett(M):
return where(less_equal(n, (M-1)/2.0), 2.0*n/(M-1), 2.0 - 2.0*n/(M-1))
+@set_module('numpy')
def hanning(M):
"""
Return the Hanning window.
@@ -2711,41 +2799,44 @@ def hanning(M):
Examples
--------
>>> np.hanning(12)
- array([ 0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
- 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
- 0.07937323, 0. ])
+ array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037,
+ 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249,
+ 0.07937323, 0. ])
Plot the window and its frequency response:
+ >>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, fftshift
>>> window = np.hanning(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hann window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Hann window')
>>> plt.ylabel("Amplitude")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Sample')
>>> plt.show()
>>> plt.figure()
- <matplotlib.figure.Figure object at 0x...>
+ <Figure size 640x480 with 0 Axes>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
- >>> response = 20 * np.log10(mag)
+ >>> with np.errstate(divide='ignore', invalid='ignore'):
+ ... response = 20 * np.log10(mag)
+ ...
>>> response = np.clip(response, -100, 100)
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of the Hann window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Frequency response of the Hann window')
>>> plt.ylabel("Magnitude [dB]")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Magnitude [dB]')
>>> plt.xlabel("Normalized frequency [cycles per sample]")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
>>> plt.axis('tight')
- (-0.5, 0.5, -100.0, ...)
+ ...
>>> plt.show()
"""
@@ -2757,6 +2848,7 @@ def hanning(M):
return 0.5 - 0.5*cos(2.0*pi*n/(M-1))
+@set_module('numpy')
def hamming(M):
"""
Return the Hamming window.
@@ -2809,26 +2901,27 @@ def hamming(M):
Examples
--------
>>> np.hamming(12)
- array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594,
+ array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary
0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909,
0.15302337, 0.08 ])
Plot the window and the frequency response:
+ >>> import matplotlib.pyplot as plt
>>> from numpy.fft import fft, fftshift
>>> window = np.hamming(51)
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Hamming window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Hamming window')
>>> plt.ylabel("Amplitude")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Sample')
>>> plt.show()
>>> plt.figure()
- <matplotlib.figure.Figure object at 0x...>
+ <Figure size 640x480 with 0 Axes>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
@@ -2837,13 +2930,13 @@ def hamming(M):
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Hamming window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Frequency response of Hamming window')
>>> plt.ylabel("Magnitude [dB]")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Magnitude [dB]')
>>> plt.xlabel("Normalized frequency [cycles per sample]")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
>>> plt.axis('tight')
- (-0.5, 0.5, -100.0, ...)
+ ...
>>> plt.show()
"""
@@ -2938,6 +3031,11 @@ def _i0_2(x):
return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x)
+def _i0_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_i0_dispatcher)
def i0(x):
"""
Modified Bessel function of the first kind, order 0.
@@ -2963,10 +3061,13 @@ def i0(x):
See Also
--------
- scipy.special.iv, scipy.special.ive
+ scipy.special.i0, scipy.special.iv, scipy.special.ive
Notes
-----
+ The scipy implementation is recommended over this function: it is a
+ proper ufunc written in C, and more than an order of magnitude faster.
+
We use the algorithm published by Clenshaw [1]_ and referenced by
Abramowitz and Stegun [2]_, for which the function domain is
partitioned into the two intervals [0,8] and (8,inf), and Chebyshev
@@ -2986,25 +3087,20 @@ def i0(x):
Examples
--------
- >>> np.i0([0.])
- array(1.0)
+ >>> np.i0(0.)
+ array(1.0) # may vary
>>> np.i0([0., 1. + 2j])
- array([ 1.00000000+0.j , 0.18785373+0.64616944j])
+ array([ 1.00000000+0.j , 0.18785373+0.64616944j]) # may vary
"""
- x = atleast_1d(x).copy()
- y = empty_like(x)
- ind = (x < 0)
- x[ind] = -x[ind]
- ind = (x <= 8.0)
- y[ind] = _i0_1(x[ind])
- ind2 = ~ind
- y[ind2] = _i0_2(x[ind2])
- return y.squeeze()
+ x = np.asanyarray(x)
+ x = np.abs(x)
+ return piecewise(x, [x <= 8.0], [_i0_1, _i0_2])
## End of cephes code for i0
+@set_module('numpy')
def kaiser(M, beta):
"""
Return the Kaiser window.
@@ -3083,11 +3179,12 @@ def kaiser(M, beta):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> np.kaiser(12, 14)
- array([ 7.72686684e-06, 3.46009194e-03, 4.65200189e-02,
- 2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
- 9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
- 4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
+ array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary
+ 2.29737120e-01, 5.99885316e-01, 9.45674898e-01,
+ 9.45674898e-01, 5.99885316e-01, 2.29737120e-01,
+ 4.65200189e-02, 3.46009194e-03, 7.72686684e-06])
Plot the window and the frequency response:
@@ -3097,15 +3194,15 @@ def kaiser(M, beta):
>>> plt.plot(window)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Kaiser window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Kaiser window')
>>> plt.ylabel("Amplitude")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("Sample")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Sample')
>>> plt.show()
>>> plt.figure()
- <matplotlib.figure.Figure object at 0x...>
+ <Figure size 640x480 with 0 Axes>
>>> A = fft(window, 2048) / 25.5
>>> mag = np.abs(fftshift(A))
>>> freq = np.linspace(-0.5, 0.5, len(A))
@@ -3114,13 +3211,13 @@ def kaiser(M, beta):
>>> plt.plot(freq, response)
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Frequency response of Kaiser window")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Frequency response of Kaiser window')
>>> plt.ylabel("Magnitude [dB]")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Magnitude [dB]')
>>> plt.xlabel("Normalized frequency [cycles per sample]")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'Normalized frequency [cycles per sample]')
>>> plt.axis('tight')
- (-0.5, 0.5, -100.0, ...)
+ (-0.5, 0.5, -100.0, ...) # may vary
>>> plt.show()
"""
@@ -3132,6 +3229,11 @@ def kaiser(M, beta):
return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta))
+def _sinc_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_sinc_dispatcher)
def sinc(x):
"""
Return the sinc function.
@@ -3171,46 +3273,45 @@ def sinc(x):
Examples
--------
+ >>> import matplotlib.pyplot as plt
>>> x = np.linspace(-4, 4, 41)
>>> np.sinc(x)
- array([ -3.89804309e-17, -4.92362781e-02, -8.40918587e-02,
+ array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary
-8.90384387e-02, -5.84680802e-02, 3.89804309e-17,
- 6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
- 8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
+ 6.68206631e-02, 1.16434881e-01, 1.26137788e-01,
+ 8.50444803e-02, -3.89804309e-17, -1.03943254e-01,
-1.89206682e-01, -2.16236208e-01, -1.55914881e-01,
- 3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
- 7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
- 9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
- 2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
- -2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
- -3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
- 1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
+ 3.89804309e-17, 2.33872321e-01, 5.04551152e-01,
+ 7.56826729e-01, 9.35489284e-01, 1.00000000e+00,
+ 9.35489284e-01, 7.56826729e-01, 5.04551152e-01,
+ 2.33872321e-01, 3.89804309e-17, -1.55914881e-01,
+ -2.16236208e-01, -1.89206682e-01, -1.03943254e-01,
+ -3.89804309e-17, 8.50444803e-02, 1.26137788e-01,
+ 1.16434881e-01, 6.68206631e-02, 3.89804309e-17,
-5.84680802e-02, -8.90384387e-02, -8.40918587e-02,
-4.92362781e-02, -3.89804309e-17])
>>> plt.plot(x, np.sinc(x))
[<matplotlib.lines.Line2D object at 0x...>]
>>> plt.title("Sinc Function")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 1.0, 'Sinc Function')
>>> plt.ylabel("Amplitude")
- <matplotlib.text.Text object at 0x...>
+ Text(0, 0.5, 'Amplitude')
>>> plt.xlabel("X")
- <matplotlib.text.Text object at 0x...>
+ Text(0.5, 0, 'X')
>>> plt.show()
- It works in 2-D as well:
-
- >>> x = np.linspace(-4, 4, 401)
- >>> xx = np.outer(x, x)
- >>> plt.imshow(np.sinc(xx))
- <matplotlib.image.AxesImage object at 0x...>
-
"""
x = np.asanyarray(x)
y = pi * where(x == 0, 1.0e-20, x)
return sin(y)/y
+def _msort_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_msort_dispatcher)
def msort(a):
"""
Return a copy of an array sorted along the first axis.
@@ -3294,6 +3395,12 @@ def _ureduce(a, func, **kwargs):
return r, keepdim
+def _median_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_median_dispatcher)
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
@@ -3356,18 +3463,18 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
>>> np.median(a)
3.5
>>> np.median(a, axis=0)
- array([ 6.5, 4.5, 2.5])
+ array([6.5, 4.5, 2.5])
>>> np.median(a, axis=1)
- array([ 7., 2.])
+ array([7., 2.])
>>> m = np.median(a, axis=0)
>>> out = np.zeros_like(m)
>>> np.median(a, axis=0, out=m)
- array([ 6.5, 4.5, 2.5])
+ array([6.5, 4.5, 2.5])
>>> m
- array([ 6.5, 4.5, 2.5])
+ array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.median(b, axis=1, overwrite_input=True)
- array([ 7., 2.])
+ array([7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.median(b, axis=None, overwrite_input=True)
@@ -3438,6 +3545,12 @@ def _median(a, axis=None, out=None, overwrite_input=False):
return mean(part[indexer], axis=axis, out=out)
+def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_percentile_dispatcher)
def percentile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
@@ -3528,23 +3641,23 @@ def percentile(a, q, axis=None, out=None,
>>> np.percentile(a, 50)
3.5
>>> np.percentile(a, 50, axis=0)
- array([[ 6.5, 4.5, 2.5]])
+ array([6.5, 4.5, 2.5])
>>> np.percentile(a, 50, axis=1)
- array([ 7., 2.])
+ array([7., 2.])
>>> np.percentile(a, 50, axis=1, keepdims=True)
- array([[ 7.],
- [ 2.]])
+ array([[7.],
+ [2.]])
>>> m = np.percentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.percentile(a, 50, axis=0, out=out)
- array([[ 6.5, 4.5, 2.5]])
+ array([6.5, 4.5, 2.5])
>>> m
- array([[ 6.5, 4.5, 2.5]])
+ array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.percentile(b, 50, axis=1, overwrite_input=True)
- array([ 7., 2.])
+ array([7., 2.])
>>> assert not np.all(a == b)
The different types of interpolation can be visualized graphically:
@@ -3576,18 +3689,26 @@ def percentile(a, q, axis=None, out=None,
plt.show()
"""
- q = np.true_divide(q, 100.0) # handles the asarray for us too
+ q = np.true_divide(q, 100)
+ q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105)
if not _quantile_is_valid(q):
raise ValueError("Percentiles must be in the range [0, 100]")
return _quantile_unchecked(
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_quantile_dispatcher)
def quantile(a, q, axis=None, out=None,
overwrite_input=False, interpolation='linear', keepdims=False):
"""
Compute the q-th quantile of the data along the specified axis.
- ..versionadded:: 1.15.0
+
+ .. versionadded:: 1.15.0
Parameters
----------
@@ -3664,21 +3785,21 @@ def quantile(a, q, axis=None, out=None,
>>> np.quantile(a, 0.5)
3.5
>>> np.quantile(a, 0.5, axis=0)
- array([[ 6.5, 4.5, 2.5]])
+ array([6.5, 4.5, 2.5])
>>> np.quantile(a, 0.5, axis=1)
- array([ 7., 2.])
+ array([7., 2.])
>>> np.quantile(a, 0.5, axis=1, keepdims=True)
- array([[ 7.],
- [ 2.]])
+ array([[7.],
+ [2.]])
>>> m = np.quantile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.quantile(a, 0.5, axis=0, out=out)
- array([[ 6.5, 4.5, 2.5]])
+ array([6.5, 4.5, 2.5])
>>> m
- array([[ 6.5, 4.5, 2.5]])
+ array([6.5, 4.5, 2.5])
>>> b = a.copy()
>>> np.quantile(b, 0.5, axis=1, overwrite_input=True)
- array([ 7., 2.])
+ array([7., 2.])
>>> assert not np.all(a == b)
"""
q = np.asanyarray(q)
@@ -3788,7 +3909,7 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
indices_above = concatenate((indices_above, [-1]))
weights_above = indices - indices_below
- weights_below = 1.0 - weights_above
+ weights_below = 1 - weights_above
weights_shape = [1, ] * ap.ndim
weights_shape[axis] = len(indices)
@@ -3825,8 +3946,6 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
r = add(x1, x2)
if np.any(n):
- warnings.warn("Invalid value encountered in percentile",
- RuntimeWarning, stacklevel=3)
if zerod:
if ap.ndim == 1:
if out is not None:
@@ -3845,6 +3964,11 @@ def _quantile_ureduce_func(a, q, axis=None, out=None, overwrite_input=False,
return r
+def _trapz_dispatcher(y, x=None, dx=None, axis=None):
+ return (y, x)
+
+
+@array_function_dispatch(_trapz_dispatcher)
def trapz(y, x=None, dx=1.0, axis=-1):
"""
Integrate along the given axis using the composite trapezoidal rule.
@@ -3902,9 +4026,9 @@ def trapz(y, x=None, dx=1.0, axis=-1):
array([[0, 1, 2],
[3, 4, 5]])
>>> np.trapz(a, axis=0)
- array([ 1.5, 2.5, 3.5])
+ array([1.5, 2.5, 3.5])
>>> np.trapz(a, axis=1)
- array([ 2., 8.])
+ array([2., 8.])
"""
y = asanyarray(y)
@@ -3935,7 +4059,12 @@ def trapz(y, x=None, dx=1.0, axis=-1):
return ret
+def _meshgrid_dispatcher(*xi, **kwargs):
+ return xi
+
+
# Based on scitools meshgrid
+@array_function_dispatch(_meshgrid_dispatcher)
def meshgrid(*xi, **kwargs):
"""
Return coordinate matrices from coordinate vectors.
@@ -4017,17 +4146,17 @@ def meshgrid(*xi, **kwargs):
>>> y = np.linspace(0, 1, ny)
>>> xv, yv = np.meshgrid(x, y)
>>> xv
- array([[ 0. , 0.5, 1. ],
- [ 0. , 0.5, 1. ]])
+ array([[0. , 0.5, 1. ],
+ [0. , 0.5, 1. ]])
>>> yv
- array([[ 0., 0., 0.],
- [ 1., 1., 1.]])
+ array([[0., 0., 0.],
+ [1., 1., 1.]])
>>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays
>>> xv
- array([[ 0. , 0.5, 1. ]])
+ array([[0. , 0.5, 1. ]])
>>> yv
- array([[ 0.],
- [ 1.]])
+ array([[0.],
+ [1.]])
`meshgrid` is very useful to evaluate functions on a grid.
@@ -4073,6 +4202,11 @@ def meshgrid(*xi, **kwargs):
return output
+def _delete_dispatcher(arr, obj, axis=None):
+ return (arr, obj)
+
+
+@array_function_dispatch(_delete_dispatcher)
def delete(arr, obj, axis=None):
"""
Return a new array with sub-arrays along an axis deleted. For a one
@@ -4084,7 +4218,7 @@ def delete(arr, obj, axis=None):
arr : array_like
Input array.
obj : slice, int or array of ints
- Indicate which sub-arrays to remove.
+ Indicate indices of sub-arrays to remove along the specified axis.
axis : int, optional
The axis along which to delete the subarray defined by `obj`.
If `axis` is None, `obj` is applied to the flattened array.
@@ -4105,6 +4239,7 @@ def delete(arr, obj, axis=None):
-----
Often it is preferable to use a boolean mask. For example:
+ >>> arr = np.arange(12) + 1
>>> mask = np.ones(len(arr), dtype=bool)
>>> mask[[0,2,4]] = False
>>> result = arr[mask,...]
@@ -4151,7 +4286,7 @@ def delete(arr, obj, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
- "from delete and raise an error", DeprecationWarning, stacklevel=2)
+ "from delete and raise an error", DeprecationWarning, stacklevel=3)
if wrap:
return wrap(arr)
else:
@@ -4188,7 +4323,7 @@ def delete(arr, obj, axis=None):
else:
slobj[axis] = slice(None, start)
new[tuple(slobj)] = arr[tuple(slobj)]
- # copy end chunck
+ # copy end chunk
if stop == N:
pass
else:
@@ -4220,7 +4355,7 @@ def delete(arr, obj, axis=None):
if obj.dtype == bool:
warnings.warn("in the future insert will treat boolean arrays and "
"array-likes as boolean index instead of casting it "
- "to integer", FutureWarning, stacklevel=2)
+ "to integer", FutureWarning, stacklevel=3)
obj = obj.astype(intp)
if isinstance(_obj, (int, long, integer)):
# optimization for a single value
@@ -4248,7 +4383,7 @@ def delete(arr, obj, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in delete will result in an "
- "error in the future", DeprecationWarning, stacklevel=2)
+ "error in the future", DeprecationWarning, stacklevel=3)
obj = obj.astype(intp)
keep = ones(N, dtype=bool)
@@ -4259,13 +4394,13 @@ def delete(arr, obj, axis=None):
warnings.warn(
"in the future out of bounds indices will raise an error "
"instead of being ignored by `numpy.delete`.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
obj = obj[inside_bounds]
positive_indices = obj >= 0
if not positive_indices.all():
warnings.warn(
"in the future negative indices will not be ignored by "
- "`numpy.delete`.", FutureWarning, stacklevel=2)
+ "`numpy.delete`.", FutureWarning, stacklevel=3)
obj = obj[positive_indices]
keep[obj, ] = False
@@ -4278,6 +4413,11 @@ def delete(arr, obj, axis=None):
return new
+def _insert_dispatcher(arr, obj, values, axis=None):
+ return (arr, obj, values)
+
+
+@array_function_dispatch(_insert_dispatcher)
def insert(arr, obj, values, axis=None):
"""
Insert values along the given axis before the given indices.
@@ -4331,7 +4471,7 @@ def insert(arr, obj, values, axis=None):
[2, 2],
[3, 3]])
>>> np.insert(a, 1, 5)
- array([1, 5, 1, 2, 2, 3, 3])
+ array([1, 5, 1, ..., 2, 3, 3])
>>> np.insert(a, 1, 5, axis=1)
array([[1, 5, 1],
[2, 5, 2],
@@ -4351,13 +4491,13 @@ def insert(arr, obj, values, axis=None):
>>> b
array([1, 1, 2, 2, 3, 3])
>>> np.insert(b, [2, 2], [5, 6])
- array([1, 1, 5, 6, 2, 2, 3, 3])
+ array([1, 1, 5, ..., 2, 3, 3])
>>> np.insert(b, slice(2, 4), [5, 6])
- array([1, 1, 5, 2, 6, 2, 3, 3])
+ array([1, 1, 5, ..., 2, 3, 3])
>>> np.insert(b, [2, 2], [7.13, False]) # type casting
- array([1, 1, 7, 0, 2, 2, 3, 3])
+ array([1, 1, 7, ..., 2, 3, 3])
>>> x = np.arange(8).reshape(2, 4)
>>> idx = (1, 3)
@@ -4385,7 +4525,7 @@ def insert(arr, obj, values, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"in the future the special handling of scalars will be removed "
- "from insert and raise an error", DeprecationWarning, stacklevel=2)
+ "from insert and raise an error", DeprecationWarning, stacklevel=3)
arr = arr.copy(order=arrorder)
arr[...] = values
if wrap:
@@ -4409,7 +4549,7 @@ def insert(arr, obj, values, axis=None):
warnings.warn(
"in the future insert will treat boolean arrays and "
"array-likes as a boolean index instead of casting it to "
- "integer", FutureWarning, stacklevel=2)
+ "integer", FutureWarning, stacklevel=3)
indices = indices.astype(intp)
# Code after warning period:
#if obj.ndim != 1:
@@ -4459,7 +4599,7 @@ def insert(arr, obj, values, axis=None):
# 2013-09-24, 1.9
warnings.warn(
"using a non-integer array as obj in insert will result in an "
- "error in the future", DeprecationWarning, stacklevel=2)
+ "error in the future", DeprecationWarning, stacklevel=3)
indices = indices.astype(intp)
indices[indices < 0] += N
@@ -4484,6 +4624,11 @@ def insert(arr, obj, values, axis=None):
return new
+def _append_dispatcher(arr, values, axis=None):
+ return (arr, values)
+
+
+@array_function_dispatch(_append_dispatcher)
def append(arr, values, axis=None):
"""
Append values to the end of an array.
@@ -4516,7 +4661,7 @@ def append(arr, values, axis=None):
Examples
--------
>>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]])
- array([1, 2, 3, 4, 5, 6, 7, 8, 9])
+ array([1, 2, 3, ..., 7, 8, 9])
When `axis` is specified, `values` must have the correct shape.
@@ -4526,8 +4671,8 @@ def append(arr, values, axis=None):
[7, 8, 9]])
>>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0)
Traceback (most recent call last):
- ...
- ValueError: arrays must have same number of dimensions
+ ...
+ ValueError: all the input arrays must have same number of dimensions
"""
arr = asanyarray(arr)
@@ -4539,6 +4684,11 @@ def append(arr, values, axis=None):
return concatenate((arr, values), axis=axis)
+def _digitize_dispatcher(x, bins, right=None):
+ return (x, bins)
+
+
+@array_function_dispatch(_digitize_dispatcher)
def digitize(x, bins, right=False):
"""
Return the indices of the bins to which each value in input array belongs.
diff --git a/numpy/lib/histograms.py b/numpy/lib/histograms.py
index f03f30fb0..8474bd5d3 100644
--- a/numpy/lib/histograms.py
+++ b/numpy/lib/histograms.py
@@ -3,20 +3,26 @@ Histogram-related functions
"""
from __future__ import division, absolute_import, print_function
+import contextlib
+import functools
import operator
import warnings
import numpy as np
from numpy.compat.py3k import basestring
+from numpy.core import overrides
__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
# range is a keyword argument to many functions, so save the builtin so they can
# use it.
_range = range
-def _hist_bin_sqrt(x):
+def _hist_bin_sqrt(x, range):
"""
Square root histogram bin estimator.
@@ -33,10 +39,11 @@ def _hist_bin_sqrt(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return x.ptp() / np.sqrt(x.size)
-def _hist_bin_sturges(x):
+def _hist_bin_sturges(x, range):
"""
Sturges histogram bin estimator.
@@ -55,10 +62,11 @@ def _hist_bin_sturges(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return x.ptp() / (np.log2(x.size) + 1.0)
-def _hist_bin_rice(x):
+def _hist_bin_rice(x, range):
"""
Rice histogram bin estimator.
@@ -78,10 +86,11 @@ def _hist_bin_rice(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return x.ptp() / (2.0 * x.size ** (1.0 / 3))
-def _hist_bin_scott(x):
+def _hist_bin_scott(x, range):
"""
Scott histogram bin estimator.
@@ -99,10 +108,53 @@ def _hist_bin_scott(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
-def _hist_bin_doane(x):
+def _hist_bin_stone(x, range):
+ """
+ Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
+
+ The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
+ The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
+ https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
+
+ This paper by Stone appears to be the origination of this rule.
+ http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
+
+ Parameters
+ ----------
+ x : array_like
+ Input data that is to be histogrammed, trimmed to range. May not
+ be empty.
+ range : (float, float)
+ The lower and upper range of the bins.
+
+ Returns
+ -------
+ h : An estimate of the optimal bin width for the given data.
+ """
+
+ n = x.size
+ ptp_x = np.ptp(x)
+ if n <= 1 or ptp_x == 0:
+ return 0
+
+ def jhat(nbins):
+ hh = ptp_x / nbins
+ p_k = np.histogram(x, bins=nbins, range=range)[0] / n
+ return (2 - (n + 1) * p_k.dot(p_k)) / hh
+
+ nbins_upper_bound = max(100, int(np.sqrt(n)))
+ nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
+ if nbins == nbins_upper_bound:
+ warnings.warn("The number of bins estimated may be suboptimal.",
+ RuntimeWarning, stacklevel=3)
+ return ptp_x / nbins
+
+
+def _hist_bin_doane(x, range):
"""
Doane's histogram bin estimator.
@@ -120,6 +172,7 @@ def _hist_bin_doane(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
if x.size > 2:
sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
sigma = np.std(x)
@@ -136,7 +189,7 @@ def _hist_bin_doane(x):
return 0.0
-def _hist_bin_fd(x):
+def _hist_bin_fd(x, range):
"""
The Freedman-Diaconis histogram bin estimator.
@@ -161,11 +214,12 @@ def _hist_bin_fd(x):
-------
h : An estimate of the optimal bin width for the given data.
"""
+ del range # unused
iqr = np.subtract(*np.percentile(x, [75, 25]))
return 2.0 * iqr * x.size ** (-1.0 / 3.0)
-def _hist_bin_auto(x):
+def _hist_bin_auto(x, range):
"""
Histogram bin estimator that uses the minimum width of the
Freedman-Diaconis and Sturges estimators if the FD bandwidth is non zero
@@ -199,8 +253,9 @@ def _hist_bin_auto(x):
--------
_hist_bin_fd, _hist_bin_sturges
"""
- fd_bw = _hist_bin_fd(x)
- sturges_bw = _hist_bin_sturges(x)
+ fd_bw = _hist_bin_fd(x, range)
+ sturges_bw = _hist_bin_sturges(x, range)
+ del range # unused
if fd_bw:
return min(fd_bw, sturges_bw)
else:
@@ -208,7 +263,8 @@ def _hist_bin_auto(x):
return sturges_bw
# Private dict initialized at module load time
-_hist_bin_selectors = {'auto': _hist_bin_auto,
+_hist_bin_selectors = {'stone': _hist_bin_stone,
+ 'auto': _hist_bin_auto,
'doane': _hist_bin_doane,
'fd': _hist_bin_fd,
'rice': _hist_bin_rice,
@@ -220,6 +276,14 @@ _hist_bin_selectors = {'auto': _hist_bin_auto,
def _ravel_and_check_weights(a, weights):
""" Check a and weights have matching shapes, and ravel both """
a = np.asarray(a)
+
+ # Ensure that the array is a "subtractable" dtype
+ if a.dtype == np.bool_:
+ warnings.warn("Converting input from {} to {} for compatibility."
+ .format(a.dtype, np.uint8),
+ RuntimeWarning, stacklevel=3)
+ a = a.astype(np.uint8)
+
if weights is not None:
weights = np.asarray(weights)
if weights.shape != a.shape:
@@ -335,7 +399,7 @@ def _get_bin_edges(a, bins, range, weights):
n_equal_bins = 1
else:
# Do not call selectors on empty arrays
- width = _hist_bin_selectors[bin_name](a)
+ width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
if width:
n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
else:
@@ -392,9 +456,15 @@ def _search_sorted_inclusive(a, v):
))
+def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_bin_edges_dispatcher)
def histogram_bin_edges(a, bins=10, range=None, weights=None):
r"""
- Function to calculate only the edges of the bins used by the `histogram` function.
+ Function to calculate only the edges of the bins used by the `histogram`
+ function.
Parameters
----------
@@ -432,6 +502,11 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
Less robust estimator that that takes into account data
variability and data size.
+ 'stone'
+ Estimator based on leave-one-out cross-validation estimate of
+ the integrated squared error. Can be regarded as a generalization
+ of Scott's rule.
+
'rice'
Estimator does not take variability into account, only data
size. Commonly overestimates number of bins required.
@@ -482,14 +557,14 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
using the `ptp` of the data. The final bin count is obtained from
``np.round(np.ceil(range / h))``.
- 'Auto' (maximum of the 'Sturges' and 'FD' estimators)
+ 'auto' (maximum of the 'sturges' and 'fd' estimators)
A compromise to get a good value. For small datasets the Sturges
value will usually be chosen, while larger datasets will usually
default to FD. Avoids the overly conservative behaviour of FD
and Sturges for small and large datasets respectively.
Switchover point is usually :math:`a.size \approx 1000`.
- 'FD' (Freedman Diaconis Estimator)
+ 'fd' (Freedman Diaconis Estimator)
.. math:: h = 2 \frac{IQR}{n^{1/3}}
The binwidth is proportional to the interquartile range (IQR)
@@ -497,7 +572,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
conservative for small datasets, but is quite good for large
datasets. The IQR is very robust to outliers.
- 'Scott'
+ 'scott'
.. math:: h = \sigma \sqrt[3]{\frac{24 * \sqrt{\pi}}{n}}
The binwidth is proportional to the standard deviation of the
@@ -507,14 +582,14 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
outliers. Values are very similar to the Freedman-Diaconis
estimator in the absence of outliers.
- 'Rice'
+ 'rice'
.. math:: n_h = 2n^{1/3}
The number of bins is only proportional to cube root of
``a.size``. It tends to overestimate the number of bins and it
does not take into account data variability.
- 'Sturges'
+ 'sturges'
.. math:: n_h = \log _{2}n+1
The number of bins is the base 2 log of ``a.size``. This
@@ -522,7 +597,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
larger, non-normal datasets. This is the default method in R's
``hist`` method.
- 'Doane'
+ 'doane'
.. math:: n_h = 1 + \log_{2}(n) +
\log_{2}(1 + \frac{|g_1|}{\sigma_{g_1}})
@@ -534,8 +609,9 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
estimates for non-normal datasets. This estimator attempts to
account for the skew of the data.
- 'Sqrt'
+ 'sqrt'
.. math:: n_h = \sqrt n
+
The simplest and fastest estimator. Only takes into account the
data size.
@@ -573,7 +649,7 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
>>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
>>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
- >>> hist_0; hist1
+ >>> hist_0; hist_1
array([1, 1, 1])
array([2, 1, 1, 2])
>>> bins_0; bins_1
@@ -586,6 +662,12 @@ def histogram_bin_edges(a, bins=10, range=None, weights=None):
return bin_edges
+def _histogram_dispatcher(
+ a, bins=None, range=None, normed=None, weights=None, density=None):
+ return (a, bins, weights)
+
+
+@array_function_dispatch(_histogram_dispatcher)
def histogram(a, bins=10, range=None, normed=None, weights=None,
density=None):
r"""
@@ -670,14 +752,14 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
>>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
(array([0, 2, 1]), array([0, 1, 2, 3]))
>>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
- (array([ 0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
+ (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
>>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
(array([1, 4, 1]), array([0, 1, 2, 3]))
>>> a = np.arange(5)
>>> hist, bin_edges = np.histogram(a, density=True)
>>> hist
- array([ 0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
+ array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
>>> hist.sum()
2.4999999999999996
>>> np.sum(hist * np.diff(bin_edges))
@@ -692,8 +774,9 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
>>> rng = np.random.RandomState(10) # deterministic random data
>>> a = np.hstack((rng.normal(size=1000),
... rng.normal(loc=5, scale=2, size=1000)))
- >>> plt.hist(a, bins='auto') # arguments are passed to np.histogram
+ >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
>>> plt.title("Histogram with 'auto' bins")
+ Text(0.5, 1.0, "Histogram with 'auto' bins")
>>> plt.show()
"""
@@ -807,7 +890,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
warnings.warn(
"The normed argument is ignored when density is provided. "
"In future passing both will result in an error.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
normed = None
if density:
@@ -823,7 +906,7 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
"density=True will produce the same result anyway. "
"The argument will be removed in a future version of "
"numpy.",
- np.VisibleDeprecationWarning, stacklevel=2)
+ np.VisibleDeprecationWarning, stacklevel=3)
# this normalization is incorrect, but
db = np.array(np.diff(bin_edges), float)
@@ -834,10 +917,22 @@ def histogram(a, bins=10, range=None, normed=None, weights=None,
warnings.warn(
"Passing normed=False is deprecated, and has no effect. "
"Consider passing the density argument instead.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
return n, bin_edges
+def _histogramdd_dispatcher(sample, bins=None, range=None, normed=None,
+ weights=None, density=None):
+ if hasattr(sample, 'shape'): # same condition as used in histogramdd
+ yield sample
+ else:
+ yield from sample
+ with contextlib.suppress(TypeError):
+ yield from bins
+ yield weights
+
+
+@array_function_dispatch(_histogramdd_dispatcher)
def histogramdd(sample, bins=10, range=None, normed=None, weights=None,
density=None):
"""
diff --git a/numpy/lib/index_tricks.py b/numpy/lib/index_tricks.py
index 009e6d229..04384854c 100644
--- a/numpy/lib/index_tricks.py
+++ b/numpy/lib/index_tricks.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import functools
import sys
import math
@@ -9,13 +10,18 @@ from numpy.core.numeric import (
)
from numpy.core.numerictypes import find_common_type, issubdtype
-from . import function_base
import numpy.matrixlib as matrixlib
from .function_base import diff
from numpy.core.multiarray import ravel_multi_index, unravel_index
+from numpy.core.overrides import set_module
+from numpy.core import overrides, linspace
from numpy.lib.stride_tricks import as_strided
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
__all__ = [
'ravel_multi_index', 'unravel_index', 'mgrid', 'ogrid', 'r_', 'c_',
's_', 'index_exp', 'ix_', 'ndenumerate', 'ndindex', 'fill_diagonal',
@@ -23,6 +29,11 @@ __all__ = [
]
+def _ix__dispatcher(*args):
+ return args
+
+
+@array_function_dispatch(_ix__dispatcher)
def ix_(*args):
"""
Construct an open mesh from multiple sequences.
@@ -83,12 +94,13 @@ def ix_(*args):
out = []
nd = len(args)
for k, new in enumerate(args):
- new = asarray(new)
+ if not isinstance(new, _nx.ndarray):
+ new = asarray(new)
+ if new.size == 0:
+ # Explicitly type empty arrays to avoid float default
+ new = new.astype(_nx.intp)
if new.ndim != 1:
raise ValueError("Cross index must be 1 dimensional")
- if new.size == 0:
- # Explicitly type empty arrays to avoid float default
- new = new.astype(_nx.intp)
if issubdtype(new.dtype, _nx.bool_):
new, = new.nonzero()
new = new.reshape((1,)*k + (new.size,) + (1,)*(nd-k-1))
@@ -194,9 +206,6 @@ class nd_grid(object):
else:
return _nx.arange(start, stop, step)
- def __len__(self):
- return 0
-
class MGridClass(nd_grid):
"""
@@ -261,8 +270,9 @@ class OGridClass(nd_grid):
the stop value **is inclusive**.
Returns
- ----------
- mesh-grid `ndarrays` with only one dimension :math:`\\neq 1`
+ -------
+ mesh-grid
+ `ndarrays` with only one dimension not equal to 1
See Also
--------
@@ -338,7 +348,7 @@ class AxisConcatenator(object):
step = 1
if isinstance(step, complex):
size = int(abs(step))
- newobj = function_base.linspace(start, stop, num=size)
+ newobj = linspace(start, stop, num=size)
else:
newobj = _nx.arange(start, stop, step)
if ndmin > 1:
@@ -470,7 +480,7 @@ class RClass(AxisConcatenator):
Examples
--------
>>> np.r_[np.array([1,2,3]), 0, 0, np.array([4,5,6])]
- array([1, 2, 3, 0, 0, 4, 5, 6])
+ array([1, 2, 3, ..., 4, 5, 6])
>>> np.r_[-1:1:6j, [0]*3, 5, 6]
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. , 0. , 0. , 0. , 5. , 6. ])
@@ -530,15 +540,18 @@ class CClass(AxisConcatenator):
[2, 5],
[3, 6]])
>>> np.c_[np.array([[1,2,3]]), 0, 0, np.array([[4,5,6]])]
- array([[1, 2, 3, 0, 0, 4, 5, 6]])
+ array([[1, 2, 3, ..., 4, 5, 6]])
"""
def __init__(self):
AxisConcatenator.__init__(self, -1, ndmin=2, trans1d=0)
+
c_ = CClass()
+
+@set_module('numpy')
class ndenumerate(object):
"""
Multidimensional index iterator.
@@ -589,6 +602,7 @@ class ndenumerate(object):
next = __next__
+@set_module('numpy')
class ndindex(object):
"""
An N-dimensional iterator object to index arrays.
@@ -729,6 +743,12 @@ s_ = IndexExpression(maketuple=False)
# The following functions complement those in twodim_base, but are
# applicable to N-dimensions.
+
+def _fill_diagonal_dispatcher(a, val, wrap=None):
+ return (a,)
+
+
+@array_function_dispatch(_fill_diagonal_dispatcher)
def fill_diagonal(a, val, wrap=False):
"""Fill the main diagonal of the given array of any dimensionality.
@@ -794,8 +814,8 @@ def fill_diagonal(a, val, wrap=False):
The wrap option affects only tall matrices:
>>> # tall matrices no wrap
- >>> a = np.zeros((5, 3),int)
- >>> fill_diagonal(a, 4)
+ >>> a = np.zeros((5, 3), int)
+ >>> np.fill_diagonal(a, 4)
>>> a
array([[4, 0, 0],
[0, 4, 0],
@@ -804,8 +824,8 @@ def fill_diagonal(a, val, wrap=False):
[0, 0, 0]])
>>> # tall matrices wrap
- >>> a = np.zeros((5, 3),int)
- >>> fill_diagonal(a, 4, wrap=True)
+ >>> a = np.zeros((5, 3), int)
+ >>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0],
[0, 4, 0],
@@ -814,13 +834,30 @@ def fill_diagonal(a, val, wrap=False):
[4, 0, 0]])
>>> # wide matrices
- >>> a = np.zeros((3, 5),int)
- >>> fill_diagonal(a, 4, wrap=True)
+ >>> a = np.zeros((3, 5), int)
+ >>> np.fill_diagonal(a, 4, wrap=True)
>>> a
array([[4, 0, 0, 0, 0],
[0, 4, 0, 0, 0],
[0, 0, 4, 0, 0]])
+ The anti-diagonal can be filled by reversing the order of elements
+ using either `numpy.flipud` or `numpy.fliplr`.
+
+ >>> a = np.zeros((3, 3), int);
+ >>> np.fill_diagonal(np.fliplr(a), [1,2,3]) # Horizontal flip
+ >>> a
+ array([[0, 0, 1],
+ [0, 2, 0],
+ [3, 0, 0]])
+ >>> np.fill_diagonal(np.flipud(a), [1,2,3]) # Vertical flip
+ >>> a
+ array([[0, 0, 3],
+ [0, 2, 0],
+ [1, 0, 0]])
+
+ Note that the order in which the diagonal is filled varies depending
+ on the flip function.
"""
if a.ndim < 2:
raise ValueError("array must be at least 2-d")
@@ -843,6 +880,7 @@ def fill_diagonal(a, val, wrap=False):
a.flat[:end:step] = val
+@set_module('numpy')
def diag_indices(n, ndim=2):
"""
Return the indices to access the main diagonal of an array.
@@ -911,6 +949,11 @@ def diag_indices(n, ndim=2):
return (idx,) * ndim
+def _diag_indices_from(arr):
+ return (arr,)
+
+
+@array_function_dispatch(_diag_indices_from)
def diag_indices_from(arr):
"""
Return the indices to access the main diagonal of an n-dimensional array.
diff --git a/numpy/lib/info.py b/numpy/lib/info.py
deleted file mode 100644
index 8815a52f0..000000000
--- a/numpy/lib/info.py
+++ /dev/null
@@ -1,160 +0,0 @@
-"""
-Basic functions used by several sub-packages and
-useful to have in the main name-space.
-
-Type Handling
--------------
-================ ===================
-iscomplexobj Test for complex object, scalar result
-isrealobj Test for real object, scalar result
-iscomplex Test for complex elements, array result
-isreal Test for real elements, array result
-imag Imaginary part
-real Real part
-real_if_close Turns complex number with tiny imaginary part to real
-isneginf Tests for negative infinity, array result
-isposinf Tests for positive infinity, array result
-isnan Tests for nans, array result
-isinf Tests for infinity, array result
-isfinite Tests for finite numbers, array result
-isscalar True if argument is a scalar
-nan_to_num Replaces NaN's with 0 and infinities with large numbers
-cast Dictionary of functions to force cast to each type
-common_type Determine the minimum common type code for a group
- of arrays
-mintypecode Return minimal allowed common typecode.
-================ ===================
-
-Index Tricks
-------------
-================ ===================
-mgrid Method which allows easy construction of N-d
- 'mesh-grids'
-``r_`` Append and construct arrays: turns slice objects into
- ranges and concatenates them, for 2d arrays appends rows.
-index_exp Konrad Hinsen's index_expression class instance which
- can be useful for building complicated slicing syntax.
-================ ===================
-
-Useful Functions
-----------------
-================ ===================
-select Extension of where to multiple conditions and choices
-extract Extract 1d array from flattened array according to mask
-insert Insert 1d array of values into Nd array according to mask
-linspace Evenly spaced samples in linear space
-logspace Evenly spaced samples in logarithmic space
-fix Round x to nearest integer towards zero
-mod Modulo mod(x,y) = x % y except keeps sign of y
-amax Array maximum along axis
-amin Array minimum along axis
-ptp Array max-min along axis
-cumsum Cumulative sum along axis
-prod Product of elements along axis
-cumprod Cumluative product along axis
-diff Discrete differences along axis
-angle Returns angle of complex argument
-unwrap Unwrap phase along given axis (1-d algorithm)
-sort_complex Sort a complex-array (based on real, then imaginary)
-trim_zeros Trim the leading and trailing zeros from 1D array.
-vectorize A class that wraps a Python function taking scalar
- arguments into a generalized function which can handle
- arrays of arguments using the broadcast rules of
- numerix Python.
-================ ===================
-
-Shape Manipulation
-------------------
-================ ===================
-squeeze Return a with length-one dimensions removed.
-atleast_1d Force arrays to be >= 1D
-atleast_2d Force arrays to be >= 2D
-atleast_3d Force arrays to be >= 3D
-vstack Stack arrays vertically (row on row)
-hstack Stack arrays horizontally (column on column)
-column_stack Stack 1D arrays as columns into 2D array
-dstack Stack arrays depthwise (along third dimension)
-stack Stack arrays along a new axis
-split Divide array into a list of sub-arrays
-hsplit Split into columns
-vsplit Split into rows
-dsplit Split along third dimension
-================ ===================
-
-Matrix (2D Array) Manipulations
--------------------------------
-================ ===================
-fliplr 2D array with columns flipped
-flipud 2D array with rows flipped
-rot90 Rotate a 2D array a multiple of 90 degrees
-eye Return a 2D array with ones down a given diagonal
-diag Construct a 2D array from a vector, or return a given
- diagonal from a 2D array.
-mat Construct a Matrix
-bmat Build a Matrix from blocks
-================ ===================
-
-Polynomials
------------
-================ ===================
-poly1d A one-dimensional polynomial class
-poly Return polynomial coefficients from roots
-roots Find roots of polynomial given coefficients
-polyint Integrate polynomial
-polyder Differentiate polynomial
-polyadd Add polynomials
-polysub Subtract polynomials
-polymul Multiply polynomials
-polydiv Divide polynomials
-polyval Evaluate polynomial at given argument
-================ ===================
-
-Iterators
----------
-================ ===================
-Arrayterator A buffered iterator for big arrays.
-================ ===================
-
-Import Tricks
--------------
-================ ===================
-ppimport Postpone module import until trying to use it
-ppimport_attr Postpone module import until trying to use its attribute
-ppresolve Import postponed module and return it.
-================ ===================
-
-Machine Arithmetics
--------------------
-================ ===================
-machar_single Single precision floating point arithmetic parameters
-machar_double Double precision floating point arithmetic parameters
-================ ===================
-
-Threading Tricks
-----------------
-================ ===================
-ParallelExec Execute commands in parallel thread.
-================ ===================
-
-Array Set Operations
------------------------
-Set operations for numeric arrays based on sort() function.
-
-================ ===================
-unique Unique elements of an array.
-isin Test whether each element of an ND array is present
- anywhere within a second array.
-ediff1d Array difference (auxiliary function).
-intersect1d Intersection of 1D arrays with unique elements.
-setxor1d Set exclusive-or of 1D arrays with unique elements.
-in1d Test whether elements in a 1D array are also present in
- another array.
-union1d Union of 1D arrays with unique elements.
-setdiff1d Set difference of 1D arrays with unique elements.
-================ ===================
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core', 'testing']
-global_symbols = ['*']
diff --git a/numpy/lib/mixins.py b/numpy/lib/mixins.py
index 0379ecb1a..f974a7724 100644
--- a/numpy/lib/mixins.py
+++ b/numpy/lib/mixins.py
@@ -5,8 +5,8 @@ import sys
from numpy.core import umath as um
-# Nothing should be exposed in the top-level NumPy module.
-__all__ = []
+
+__all__ = ['NDArrayOperatorsMixin']
def _disables_array_ufunc(obj):
@@ -69,9 +69,6 @@ class NDArrayOperatorsMixin(object):
deferring to the ``__array_ufunc__`` method, which subclasses must
implement.
- This class does not yet implement the special operators corresponding
- to ``matmul`` (``@``), because ``np.matmul`` is not yet a NumPy ufunc.
-
It is useful for writing classes that do not inherit from `numpy.ndarray`,
but that should support arithmetic and numpy universal functions like
arrays as described in `A Mechanism for Overriding Ufuncs
@@ -155,6 +152,8 @@ class NDArrayOperatorsMixin(object):
__add__, __radd__, __iadd__ = _numeric_methods(um.add, 'add')
__sub__, __rsub__, __isub__ = _numeric_methods(um.subtract, 'sub')
__mul__, __rmul__, __imul__ = _numeric_methods(um.multiply, 'mul')
+ __matmul__, __rmatmul__, __imatmul__ = _numeric_methods(
+ um.matmul, 'matmul')
if sys.version_info.major < 3:
# Python 3 uses only __truediv__ and __floordiv__
__div__, __rdiv__, __idiv__ = _numeric_methods(um.divide, 'div')
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 8d6b0f139..6cffab6ac 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -22,9 +22,15 @@ Functions
"""
from __future__ import division, absolute_import, print_function
+import functools
import warnings
import numpy as np
from numpy.lib import function_base
+from numpy.core import overrides
+
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
__all__ = [
@@ -34,6 +40,33 @@ __all__ = [
]
+def _nan_mask(a, out=None):
+ """
+ Parameters
+ ----------
+ a : array-like
+ Input array with at least 1 dimension.
+ out : ndarray, optional
+ Alternate output array in which to place the result. The default
+ is ``None``; if provided, it must have the same shape as the
+ expected output and will prevent the allocation of a new array.
+
+ Returns
+ -------
+ y : bool ndarray or True
+ A bool array where ``np.nan`` positions are marked with ``False``
+ and other positions are marked with ``True``. If the type of ``a``
+ is such that it can't possibly contain ``np.nan``, returns ``True``.
+ """
+ # we assume that a is an array for this private function
+
+ if a.dtype.kind not in 'fc':
+ return True
+
+ y = np.isnan(a, out=out)
+ y = np.invert(y, out=y)
+ return y
+
def _replace_nan(a, val):
"""
If `a` is of inexact type, make a copy of `a`, replace NaNs with
@@ -132,7 +165,8 @@ def _remove_nan_1d(arr1d, overwrite_input=False):
c = np.isnan(arr1d)
s = np.nonzero(c)[0]
if s.size == arr1d.size:
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=4)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=5)
return arr1d[:0], True
elif s.size == 0:
return arr1d, overwrite_input
@@ -188,6 +222,11 @@ def _divide_by_count(a, b, out=None):
return np.divide(a, b, out=out, casting='unsafe')
+def _nanmin_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmin_dispatcher)
def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return minimum of an array or minimum along an axis, ignoring any NaNs.
@@ -260,9 +299,9 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.nanmin(a)
1.0
>>> np.nanmin(a, axis=0)
- array([ 1., 2.])
+ array([1., 2.])
>>> np.nanmin(a, axis=1)
- array([ 1., 3.])
+ array([1., 3.])
When positive infinity and negative infinity are present:
@@ -280,7 +319,8 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
# which do not implement isnan (gh-9009), or fmin correctly (gh-8975)
res = np.fmin.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=3)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, +np.inf)
@@ -292,10 +332,16 @@ def nanmin(a, axis=None, out=None, keepdims=np._NoValue):
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
- warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=3)
return res
+def _nanmax_dispatcher(a, axis=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmax_dispatcher)
def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
"""
Return the maximum of an array or maximum along an axis, ignoring any
@@ -368,9 +414,9 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
>>> np.nanmax(a)
3.0
>>> np.nanmax(a, axis=0)
- array([ 3., 2.])
+ array([3., 2.])
>>> np.nanmax(a, axis=1)
- array([ 2., 3.])
+ array([2., 3.])
When positive infinity and negative infinity are present:
@@ -388,7 +434,8 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
# which do not implement isnan (gh-9009), or fmax correctly (gh-8975)
res = np.fmax.reduce(a, axis=axis, out=out, **kwargs)
if np.isnan(res).any():
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=3)
else:
# Slow, but safe for subclasses of ndarray
a, mask = _replace_nan(a, -np.inf)
@@ -400,10 +447,16 @@ def nanmax(a, axis=None, out=None, keepdims=np._NoValue):
mask = np.all(mask, axis=axis, **kwargs)
if np.any(mask):
res = _copyto(res, np.nan, mask)
- warnings.warn("All-NaN axis encountered", RuntimeWarning, stacklevel=2)
+ warnings.warn("All-NaN axis encountered", RuntimeWarning,
+ stacklevel=3)
return res
+def _nanargmin_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmin_dispatcher)
def nanargmin(a, axis=None):
"""
Return the indices of the minimum values in the specified axis ignoring
@@ -448,6 +501,11 @@ def nanargmin(a, axis=None):
return res
+def _nanargmax_dispatcher(a, axis=None):
+ return (a,)
+
+
+@array_function_dispatch(_nanargmax_dispatcher)
def nanargmax(a, axis=None):
"""
Return the indices of the maximum values in the specified axis ignoring
@@ -493,6 +551,11 @@ def nanargmax(a, axis=None):
return res
+def _nansum_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nansum_dispatcher)
def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the sum of array elements over a given axis treating Not a
@@ -570,12 +633,15 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.nansum(a)
3.0
>>> np.nansum(a, axis=0)
- array([ 2., 1.])
+ array([2., 1.])
>>> np.nansum([1, np.nan, np.inf])
inf
>>> np.nansum([1, np.nan, np.NINF])
-inf
- >>> np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
+ >>> from numpy.testing import suppress_warnings
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(RuntimeWarning)
+ ... np.nansum([1, np.nan, np.inf, -np.inf]) # both +/- infinity present
nan
"""
@@ -583,6 +649,11 @@ def nansum(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
return np.sum(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+def _nanprod_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanprod_dispatcher)
def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Return the product of array elements over a given axis treating Not a
@@ -641,13 +712,18 @@ def nanprod(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.nanprod(a)
6.0
>>> np.nanprod(a, axis=0)
- array([ 3., 2.])
+ array([3., 2.])
"""
a, mask = _replace_nan(a, 1)
return np.prod(a, axis=axis, dtype=dtype, out=out, keepdims=keepdims)
+def _nancumsum_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumsum_dispatcher)
def nancumsum(a, axis=None, dtype=None, out=None):
"""
Return the cumulative sum of array elements over a given axis treating Not a
@@ -697,22 +773,27 @@ def nancumsum(a, axis=None, dtype=None, out=None):
>>> np.nancumsum([1])
array([1])
>>> np.nancumsum([1, np.nan])
- array([ 1., 1.])
+ array([1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumsum(a)
- array([ 1., 3., 6., 6.])
+ array([1., 3., 6., 6.])
>>> np.nancumsum(a, axis=0)
- array([[ 1., 2.],
- [ 4., 2.]])
+ array([[1., 2.],
+ [4., 2.]])
>>> np.nancumsum(a, axis=1)
- array([[ 1., 3.],
- [ 3., 3.]])
+ array([[1., 3.],
+ [3., 3.]])
"""
a, mask = _replace_nan(a, 0)
return np.cumsum(a, axis=axis, dtype=dtype, out=out)
+def _nancumprod_dispatcher(a, axis=None, dtype=None, out=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nancumprod_dispatcher)
def nancumprod(a, axis=None, dtype=None, out=None):
"""
Return the cumulative product of array elements over a given axis treating Not a
@@ -759,22 +840,27 @@ def nancumprod(a, axis=None, dtype=None, out=None):
>>> np.nancumprod([1])
array([1])
>>> np.nancumprod([1, np.nan])
- array([ 1., 1.])
+ array([1., 1.])
>>> a = np.array([[1, 2], [3, np.nan]])
>>> np.nancumprod(a)
- array([ 1., 2., 6., 6.])
+ array([1., 2., 6., 6.])
>>> np.nancumprod(a, axis=0)
- array([[ 1., 2.],
- [ 3., 2.]])
+ array([[1., 2.],
+ [3., 2.]])
>>> np.nancumprod(a, axis=1)
- array([[ 1., 2.],
- [ 3., 3.]])
+ array([[1., 2.],
+ [3., 3.]])
"""
a, mask = _replace_nan(a, 1)
return np.cumprod(a, axis=axis, dtype=dtype, out=out)
+def _nanmean_dispatcher(a, axis=None, dtype=None, out=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmean_dispatcher)
def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
"""
Compute the arithmetic mean along the specified axis, ignoring NaNs.
@@ -844,9 +930,9 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
>>> np.nanmean(a)
2.6666666666666665
>>> np.nanmean(a, axis=0)
- array([ 2., 4.])
+ array([2., 4.])
>>> np.nanmean(a, axis=1)
- array([ 1., 3.5])
+ array([1., 3.5]) # may vary
"""
arr, mask = _replace_nan(a, 0)
@@ -866,7 +952,7 @@ def nanmean(a, axis=None, dtype=None, out=None, keepdims=np._NoValue):
isbad = (cnt == 0)
if isbad.any():
- warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=2)
+ warnings.warn("Mean of empty slice", RuntimeWarning, stacklevel=3)
# NaN is the only possible bad value, so no further
# action is needed to handle bad results.
return avg
@@ -878,7 +964,7 @@ def _nanmedian1d(arr1d, overwrite_input=False):
See nanmedian for parameter usage
"""
arr1d, overwrite_input = _remove_nan_1d(arr1d,
- overwrite_input=overwrite_input)
+ overwrite_input=overwrite_input)
if arr1d.size == 0:
return np.nan
@@ -921,13 +1007,20 @@ def _nanmedian_small(a, axis=None, out=None, overwrite_input=False):
a = np.ma.masked_array(a, np.isnan(a))
m = np.ma.median(a, axis=axis, overwrite_input=overwrite_input)
for i in range(np.count_nonzero(m.mask.ravel())):
- warnings.warn("All-NaN slice encountered", RuntimeWarning, stacklevel=3)
+ warnings.warn("All-NaN slice encountered", RuntimeWarning,
+ stacklevel=4)
if out is not None:
out[...] = m.filled(np.nan)
return out
return m.filled(np.nan)
+def _nanmedian_dispatcher(
+ a, axis=None, out=None, overwrite_input=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanmedian_dispatcher)
def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValue):
"""
Compute the median along the specified axis, while ignoring NaNs.
@@ -992,19 +1085,19 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu
>>> a = np.array([[10.0, 7, 4], [3, 2, 1]])
>>> a[0, 1] = np.nan
>>> a
- array([[ 10., nan, 4.],
- [ 3., 2., 1.]])
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
>>> np.median(a)
nan
>>> np.nanmedian(a)
3.0
>>> np.nanmedian(a, axis=0)
- array([ 6.5, 2., 2.5])
+ array([6.5, 2. , 2.5])
>>> np.median(a, axis=1)
- array([ 7., 2.])
+ array([nan, 2.])
>>> b = a.copy()
>>> np.nanmedian(b, axis=1, overwrite_input=True)
- array([ 7., 2.])
+ array([7., 2.])
>>> assert not np.all(a==b)
>>> b = a.copy()
>>> np.nanmedian(b, axis=None, overwrite_input=True)
@@ -1026,6 +1119,12 @@ def nanmedian(a, axis=None, out=None, overwrite_input=False, keepdims=np._NoValu
return r
+def _nanpercentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_nanpercentile_dispatcher)
def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
@@ -1114,27 +1213,27 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
- array([[ 10., nan, 4.],
- [ 3., 2., 1.]])
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
>>> np.percentile(a, 50)
nan
>>> np.nanpercentile(a, 50)
- 3.5
+ 3.0
>>> np.nanpercentile(a, 50, axis=0)
- array([ 6.5, 2., 2.5])
+ array([6.5, 2. , 2.5])
>>> np.nanpercentile(a, 50, axis=1, keepdims=True)
- array([[ 7.],
- [ 2.]])
+ array([[7.],
+ [2.]])
>>> m = np.nanpercentile(a, 50, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanpercentile(a, 50, axis=0, out=out)
- array([ 6.5, 2., 2.5])
+ array([6.5, 2. , 2.5])
>>> m
- array([ 6.5, 2. , 2.5])
+ array([6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanpercentile(b, 50, axis=1, overwrite_input=True)
- array([ 7., 2.])
+ array([7., 2.])
>>> assert not np.all(a==b)
"""
@@ -1146,12 +1245,19 @@ def nanpercentile(a, q, axis=None, out=None, overwrite_input=False,
a, q, axis, out, overwrite_input, interpolation, keepdims)
+def _nanquantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None,
+ interpolation=None, keepdims=None):
+ return (a, q, out)
+
+
+@array_function_dispatch(_nanquantile_dispatcher)
def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
interpolation='linear', keepdims=np._NoValue):
"""
Compute the qth quantile of the data along the specified axis,
while ignoring nan values.
Returns the qth quantile(s) of the array elements.
+
.. versionadded:: 1.15.0
Parameters
@@ -1222,26 +1328,26 @@ def nanquantile(a, q, axis=None, out=None, overwrite_input=False,
>>> a = np.array([[10., 7., 4.], [3., 2., 1.]])
>>> a[0][1] = np.nan
>>> a
- array([[ 10., nan, 4.],
- [ 3., 2., 1.]])
+ array([[10., nan, 4.],
+ [ 3., 2., 1.]])
>>> np.quantile(a, 0.5)
nan
>>> np.nanquantile(a, 0.5)
- 3.5
+ 3.0
>>> np.nanquantile(a, 0.5, axis=0)
- array([ 6.5, 2., 2.5])
+ array([6.5, 2. , 2.5])
>>> np.nanquantile(a, 0.5, axis=1, keepdims=True)
- array([[ 7.],
- [ 2.]])
+ array([[7.],
+ [2.]])
>>> m = np.nanquantile(a, 0.5, axis=0)
>>> out = np.zeros_like(m)
>>> np.nanquantile(a, 0.5, axis=0, out=out)
- array([ 6.5, 2., 2.5])
+ array([6.5, 2. , 2.5])
>>> m
- array([ 6.5, 2. , 2.5])
+ array([6.5, 2. , 2.5])
>>> b = a.copy()
>>> np.nanquantile(b, 0.5, axis=1, overwrite_input=True)
- array([ 7., 2.])
+ array([7., 2.])
>>> assert not np.all(a==b)
"""
a = np.asanyarray(a)
@@ -1308,6 +1414,12 @@ def _nanquantile_1d(arr1d, q, overwrite_input=False, interpolation='linear'):
arr1d, q, overwrite_input=overwrite_input, interpolation=interpolation)
+def _nanvar_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanvar_dispatcher)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the variance along the specified axis, while ignoring NaNs.
@@ -1331,7 +1443,7 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
the variance of the flattened array.
dtype : data-type, optional
Type to use in computing the variance. For arrays of integer type
- the default is `float32`; for arrays of float types it is the same as
+ the default is `float64`; for arrays of float types it is the same as
the array type.
out : ndarray, optional
Alternate output array in which to place the result. It must have
@@ -1390,12 +1502,12 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
Examples
--------
>>> a = np.array([[1, np.nan], [3, 4]])
- >>> np.var(a)
+ >>> np.nanvar(a)
1.5555555555555554
>>> np.nanvar(a, axis=0)
- array([ 1., 0.])
+ array([1., 0.])
>>> np.nanvar(a, axis=1)
- array([ 0., 0.25])
+ array([0., 0.25]) # may vary
"""
arr, mask = _replace_nan(a, 0)
@@ -1442,13 +1554,20 @@ def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
isbad = (dof <= 0)
if np.any(isbad):
- warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning, stacklevel=2)
+ warnings.warn("Degrees of freedom <= 0 for slice.", RuntimeWarning,
+ stacklevel=3)
# NaN, inf, or negative numbers are all possible bad
# values, so explicitly replace them with NaN.
var = _copyto(var, np.nan, isbad)
return var
+def _nanstd_dispatcher(
+ a, axis=None, dtype=None, out=None, ddof=None, keepdims=None):
+ return (a, out)
+
+
+@array_function_dispatch(_nanstd_dispatcher)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
"""
Compute the standard deviation along the specified axis, while
@@ -1538,9 +1657,9 @@ def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=np._NoValue):
>>> np.nanstd(a)
1.247219128924647
>>> np.nanstd(a, axis=0)
- array([ 1., 0.])
+ array([1., 0.])
>>> np.nanstd(a, axis=1)
- array([ 0., 0.5])
+ array([0., 0.5]) # may vary
"""
var = nanvar(a, axis=axis, dtype=dtype, out=out, ddof=ddof,
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 7eb203868..e57a6dd47 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -3,15 +3,19 @@ from __future__ import division, absolute_import, print_function
import sys
import os
import re
+import functools
import itertools
import warnings
import weakref
+import contextlib
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
+from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
+from numpy.core.overrides import set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
@@ -20,19 +24,18 @@ from ._iotools import (
)
from numpy.compat import (
- asbytes, asstr, asunicode, asbytes_nested, bytes, basestring, unicode,
- is_pathlib_path
+ asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
+ pickle, contextlib_nullcontext
)
if sys.version_info[0] >= 3:
- import pickle
from collections.abc import Mapping
else:
- import cPickle as pickle
from future_builtins import map
from collections import Mapping
+@set_module('numpy')
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
@@ -48,6 +51,10 @@ __all__ = [
]
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
class BagObj(object):
"""
BagObj(obj)
@@ -105,8 +112,8 @@ def zipfile_factory(file, *args, **kwargs):
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
- if is_pathlib_path(file):
- file = str(file)
+ if not hasattr(file, 'read'):
+ file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
@@ -139,7 +146,11 @@ class NpzFile(Mapping):
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
- Allow loading pickled data. Default: True
+ Allow loading pickled data. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
@@ -161,13 +172,13 @@ class NpzFile(Mapping):
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
- >>> outfile.seek(0)
+ >>> _ = outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
- >>> npz.files
- ['y', 'x']
+ >>> sorted(npz.files)
+ ['x', 'y']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
@@ -175,7 +186,7 @@ class NpzFile(Mapping):
"""
- def __init__(self, fid, own_fid=False, allow_pickle=True,
+ def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
@@ -277,11 +288,18 @@ class NpzFile(Mapping):
return self.keys()
-def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
+@set_module('numpy')
+def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
+ .. warning:: Loading files that contain object arrays uses the ``pickle``
+ module, which is not secure against erroneous or maliciously
+ constructed data. Consider passing ``allow_pickle=False`` to
+ load data that is known not to contain object arrays for the
+ safer handling of untrusted sources.
+
Parameters
----------
file : file-like object, string, or pathlib.Path
@@ -299,8 +317,11 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
- arrays will fail.
- Default: True
+ arrays will fail. Default: False
+
+ .. versionchanged:: 1.16.3
+ Made default False in response to CVE-2019-6446.
+
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
@@ -400,15 +421,12 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
pickle_kwargs = {}
# TODO: Use contextlib.ExitStack once we drop Python 2
- if isinstance(file, basestring):
- fid = open(file, "rb")
- own_fid = True
- elif is_pathlib_path(file):
- fid = file.open("rb")
- own_fid = True
- else:
+ if hasattr(file, 'read'):
fid = file
own_fid = False
+ else:
+ fid = open(os_fspath(file), "rb")
+ own_fid = True
try:
# Code to distinguish from NumPy binary files and pickles.
@@ -436,8 +454,8 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
else:
# Try a pickle
if not allow_pickle:
- raise ValueError("allow_pickle=False, but file does not contain "
- "non-pickled data")
+ raise ValueError("Cannot load file containing pickled data "
+ "when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
@@ -448,6 +466,11 @@ def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
fid.close()
+def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
+ return (arr,)
+
+
+@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
@@ -483,7 +506,9 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
-
+
+ Any data saved to the file is appended to the end of the file.
+
Examples
--------
>>> from tempfile import TemporaryFile
@@ -492,24 +517,29 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
>>> x = np.arange(10)
>>> np.save(outfile, x)
- >>> outfile.seek(0) # Only needed here to simulate closing & reopening file
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
+
+ >>> with open('test.npy', 'wb') as f:
+ ... np.save(f, np.array([1, 2]))
+ ... np.save(f, np.array([1, 3]))
+ >>> with open('test.npy', 'rb') as f:
+ ... a = np.load(f)
+ ... b = np.load(f)
+ >>> print(a, b)
+ # [1 2] [1 3]
"""
own_fid = False
- if isinstance(file, basestring):
+ if hasattr(file, 'write'):
+ fid = file
+ else:
+ file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
- elif is_pathlib_path(file):
- if not file.name.endswith('.npy'):
- file = file.parent / (file.name + '.npy')
- fid = file.open("wb")
- own_fid = True
- else:
- fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
@@ -526,6 +556,14 @@ def save(file, arr, allow_pickle=True, fix_imports=True):
fid.close()
+def _savez_dispatcher(file, *args, **kwds):
+ for a in args:
+ yield a
+ for v in kwds.values():
+ yield v
+
+
+@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
@@ -583,10 +621,10 @@ def savez(file, *args, **kwds):
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
- >>> outfile.seek(0) # Only needed here to simulate closing & reopening file
+ >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
- ['arr_1', 'arr_0']
+ ['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
@@ -594,10 +632,10 @@ def savez(file, *args, **kwds):
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
- >>> outfile.seek(0)
+ >>> _ = outfile.seek(0)
>>> npzfile = np.load(outfile)
- >>> npzfile.files
- ['y', 'x']
+ >>> sorted(npzfile.files)
+ ['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
@@ -605,6 +643,14 @@ def savez(file, *args, **kwds):
_savez(file, args, kwds, False)
+def _savez_compressed_dispatcher(file, *args, **kwds):
+ for a in args:
+ yield a
+ for v in kwds.values():
+ yield v
+
+
+@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
@@ -674,12 +720,10 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# component of the so-called standard library.
import zipfile
- if isinstance(file, basestring):
+ if not hasattr(file, 'write'):
+ file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
- elif is_pathlib_path(file):
- if not file.name.endswith('.npz'):
- file = file.parent / (file.name + '.npz')
namedict = kwds
for i, val in enumerate(args):
@@ -701,8 +745,8 @@ def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
- force_zip64 = val.nbytes >= 2**30
- with zipf.open(fname, 'w', force_zip64=force_zip64) as fid:
+ # always force zip64, gh-10776
+ with zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
@@ -772,6 +816,8 @@ def _getconv(dtype):
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
+
+@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
@@ -807,7 +853,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
- Skip the first `skiprows` lines; default: 0.
+ Skip the first `skiprows` lines, including comments; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
@@ -869,21 +915,21 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
- array([[ 0., 1.],
- [ 2., 3.]])
+ array([[0., 1.],
+ [2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
- array([('M', 21, 72.0), ('F', 35, 58.0)],
- dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
+ array([(b'M', 21, 72.), (b'F', 35, 58.)],
+ dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
- array([ 1., 3.])
+ array([1., 3.])
>>> y
- array([ 2., 4.])
+ array([2., 4.])
"""
# Type conversions for Py3 convenience
@@ -927,8 +973,8 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fown = False
try:
- if is_pathlib_path(fname):
- fname = str(fname)
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
@@ -1096,7 +1142,6 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
- import functools
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
@@ -1155,6 +1200,13 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
return X
+def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
+ header=None, footer=None, comments=None,
+ encoding=None):
+ return (X,)
+
+
+@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
@@ -1316,8 +1368,8 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
self.write = self.write_bytes
own_fh = False
- if is_pathlib_path(fname):
- fname = str(fname)
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
@@ -1347,7 +1399,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
# Complex dtype -- each field indicates a separate column
else:
- ncol = len(X.dtype.descr)
+ ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
@@ -1358,7 +1410,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
- elif isinstance(fmt, str):
+ elif isinstance(fmt, basestring):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
@@ -1405,6 +1457,7 @@ def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
fh.close()
+@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
@@ -1451,17 +1504,17 @@ def fromregex(file, regexp, dtype, encoding=None):
Examples
--------
>>> f = open('test.dat', 'w')
- >>> f.write("1312 foo\\n1534 bar\\n444 qux")
+ >>> _ = f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
- array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
- dtype=[('num', '<i8'), ('key', '|S3')])
+ array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
+ dtype=[('num', '<i8'), ('key', 'S3')])
>>> output['num']
- array([1312, 1534, 444], dtype=int64)
+ array([1312, 1534, 444])
"""
own_fh = False
@@ -1503,10 +1556,12 @@ def fromregex(file, regexp, dtype, encoding=None):
#####--------------------------------------------------------------------------
+@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
- names=None, excludelist=None, deletechars=None,
+ names=None, excludelist=None,
+ deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
@@ -1643,26 +1698,26 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
- array((1, 1.3, 'abcde'),
- dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Using dtype = None
- >>> s.seek(0) # needed for StringIO example only
+ >>> _ = s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
- array((1, 1.3, 'abcde'),
- dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Specifying dtype and names
- >>> s.seek(0)
+ >>> _ = s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
- array((1, 1.3, 'abcde'),
- dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
+ array((1, 1.3, b'abcde'),
+ dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
An example with fixed-width columns
@@ -1670,8 +1725,18 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
- array((1, 1.3, 'abcde'),
- dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
+ array((1, 1.3, b'abcde'),
+ dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
+
+ An example to show comments
+
+ >>> f = StringIO('''
+ ... text,# of chars
+ ... hello world,11
+ ... numpy,5''')
+ >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
+ array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
+ dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
if max_rows is not None:
@@ -1698,301 +1763,300 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
- own_fhd = False
try:
- if is_pathlib_path(fname):
- fname = str(fname)
+ if isinstance(fname, os_PathLike):
+ fname = os_fspath(fname)
if isinstance(fname, basestring):
- fhd = iter(np.lib._datasource.open(fname, 'rt', encoding=encoding))
- own_fhd = True
+ fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
+ fid_ctx = contextlib.closing(fid)
else:
- fhd = iter(fname)
+ fid = fname
+ fid_ctx = contextlib_nullcontext(fid)
+ fhd = iter(fid)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
- split_line = LineSplitter(delimiter=delimiter, comments=comments,
- autostrip=autostrip, encoding=encoding)
- validate_names = NameValidator(excludelist=excludelist,
- deletechars=deletechars,
- case_sensitive=case_sensitive,
- replace_space=replace_space)
-
- # Skip the first `skip_header` rows
- for i in range(skip_header):
- next(fhd)
+ with fid_ctx:
+ split_line = LineSplitter(delimiter=delimiter, comments=comments,
+ autostrip=autostrip, encoding=encoding)
+ validate_names = NameValidator(excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
- # Keep on until we find the first valid values
- first_values = None
- try:
- while not first_values:
- first_line = _decode_line(next(fhd), encoding)
- if (names is True) and (comments is not None):
- if comments in first_line:
- first_line = (
- ''.join(first_line.split(comments)[1:]))
- first_values = split_line(first_line)
- except StopIteration:
- # return an empty array if the datafile is empty
- first_line = ''
- first_values = []
- warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
-
- # Should we take the first values as names ?
- if names is True:
- fval = first_values[0].strip()
- if comments is not None:
- if fval in comments:
- del first_values[0]
-
- # Check the columns to use: make sure `usecols` is a list
- if usecols is not None:
+ # Skip the first `skip_header` rows
try:
- usecols = [_.strip() for _ in usecols.split(",")]
- except AttributeError:
+ for i in range(skip_header):
+ next(fhd)
+
+ # Keep on until we find the first valid values
+ first_values = None
+
+ while not first_values:
+ first_line = _decode_line(next(fhd), encoding)
+ if (names is True) and (comments is not None):
+ if comments in first_line:
+ first_line = (
+ ''.join(first_line.split(comments)[1:]))
+ first_values = split_line(first_line)
+ except StopIteration:
+ # return an empty array if the datafile is empty
+ first_line = ''
+ first_values = []
+ warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
+
+ # Should we take the first values as names ?
+ if names is True:
+ fval = first_values[0].strip()
+ if comments is not None:
+ if fval in comments:
+ del first_values[0]
+
+ # Check the columns to use: make sure `usecols` is a list
+ if usecols is not None:
try:
- usecols = list(usecols)
- except TypeError:
- usecols = [usecols, ]
- nbcols = len(usecols or first_values)
-
- # Check the names and overwrite the dtype.names if needed
- if names is True:
- names = validate_names([str(_.strip()) for _ in first_values])
- first_line = ''
- elif _is_string_like(names):
- names = validate_names([_.strip() for _ in names.split(',')])
- elif names:
- names = validate_names(names)
- # Get the dtype
- if dtype is not None:
- dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
- excludelist=excludelist,
- deletechars=deletechars,
- case_sensitive=case_sensitive,
- replace_space=replace_space)
- # Make sure the names is a list (for 2.5)
- if names is not None:
- names = list(names)
-
- if usecols:
- for (i, current) in enumerate(usecols):
- # if usecols is a list of names, convert to a list of indices
- if _is_string_like(current):
- usecols[i] = names.index(current)
- elif current < 0:
- usecols[i] = current + len(first_values)
- # If the dtype is not None, make sure we update it
- if (dtype is not None) and (len(dtype) > nbcols):
- descr = dtype.descr
- dtype = np.dtype([descr[_] for _ in usecols])
- names = list(dtype.names)
- # If `names` is not None, update the names
- elif (names is not None) and (len(names) > nbcols):
- names = [names[_] for _ in usecols]
- elif (names is not None) and (dtype is not None):
- names = list(dtype.names)
-
- # Process the missing values ...............................
- # Rename missing_values for convenience
- user_missing_values = missing_values or ()
- if isinstance(user_missing_values, bytes):
- user_missing_values = user_missing_values.decode('latin1')
-
- # Define the list of missing_values (one column: one list)
- missing_values = [list(['']) for _ in range(nbcols)]
-
- # We have a dictionary: process it field by field
- if isinstance(user_missing_values, dict):
- # Loop on the items
- for (key, val) in user_missing_values.items():
- # Is the key a string ?
- if _is_string_like(key):
+ usecols = [_.strip() for _ in usecols.split(",")]
+ except AttributeError:
try:
- # Transform it into an integer
- key = names.index(key)
- except ValueError:
- # We couldn't find it: the name must have been dropped
- continue
- # Redefine the key as needed if it's a column number
- if usecols:
- try:
- key = usecols.index(key)
- except ValueError:
- pass
- # Transform the value as a list of string
- if isinstance(val, (list, tuple)):
- val = [str(_) for _ in val]
+ usecols = list(usecols)
+ except TypeError:
+ usecols = [usecols, ]
+ nbcols = len(usecols or first_values)
+
+ # Check the names and overwrite the dtype.names if needed
+ if names is True:
+ names = validate_names([str(_.strip()) for _ in first_values])
+ first_line = ''
+ elif _is_string_like(names):
+ names = validate_names([_.strip() for _ in names.split(',')])
+ elif names:
+ names = validate_names(names)
+ # Get the dtype
+ if dtype is not None:
+ dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
+ excludelist=excludelist,
+ deletechars=deletechars,
+ case_sensitive=case_sensitive,
+ replace_space=replace_space)
+ # Make sure the names is a list (for 2.5)
+ if names is not None:
+ names = list(names)
+
+ if usecols:
+ for (i, current) in enumerate(usecols):
+ # if usecols is a list of names, convert to a list of indices
+ if _is_string_like(current):
+ usecols[i] = names.index(current)
+ elif current < 0:
+ usecols[i] = current + len(first_values)
+ # If the dtype is not None, make sure we update it
+ if (dtype is not None) and (len(dtype) > nbcols):
+ descr = dtype.descr
+ dtype = np.dtype([descr[_] for _ in usecols])
+ names = list(dtype.names)
+ # If `names` is not None, update the names
+ elif (names is not None) and (len(names) > nbcols):
+ names = [names[_] for _ in usecols]
+ elif (names is not None) and (dtype is not None):
+ names = list(dtype.names)
+
+ # Process the missing values ...............................
+ # Rename missing_values for convenience
+ user_missing_values = missing_values or ()
+ if isinstance(user_missing_values, bytes):
+ user_missing_values = user_missing_values.decode('latin1')
+
+ # Define the list of missing_values (one column: one list)
+ missing_values = [list(['']) for _ in range(nbcols)]
+
+ # We have a dictionary: process it field by field
+ if isinstance(user_missing_values, dict):
+ # Loop on the items
+ for (key, val) in user_missing_values.items():
+ # Is the key a string ?
+ if _is_string_like(key):
+ try:
+ # Transform it into an integer
+ key = names.index(key)
+ except ValueError:
+ # We couldn't find it: the name must have been dropped
+ continue
+ # Redefine the key as needed if it's a column number
+ if usecols:
+ try:
+ key = usecols.index(key)
+ except ValueError:
+ pass
+ # Transform the value as a list of string
+ if isinstance(val, (list, tuple)):
+ val = [str(_) for _ in val]
+ else:
+ val = [str(val), ]
+ # Add the value(s) to the current list of missing
+ if key is None:
+ # None acts as default
+ for miss in missing_values:
+ miss.extend(val)
+ else:
+ missing_values[key].extend(val)
+ # We have a sequence : each item matches a column
+ elif isinstance(user_missing_values, (list, tuple)):
+ for (value, entry) in zip(user_missing_values, missing_values):
+ value = str(value)
+ if value not in entry:
+ entry.append(value)
+ # We have a string : apply it to all entries
+ elif isinstance(user_missing_values, basestring):
+ user_value = user_missing_values.split(",")
+ for entry in missing_values:
+ entry.extend(user_value)
+ # We have something else: apply it to all entries
+ else:
+ for entry in missing_values:
+ entry.extend([str(user_missing_values)])
+
+ # Process the filling_values ...............................
+ # Rename the input for convenience
+ user_filling_values = filling_values
+ if user_filling_values is None:
+ user_filling_values = []
+ # Define the default
+ filling_values = [None] * nbcols
+ # We have a dictionary : update each entry individually
+ if isinstance(user_filling_values, dict):
+ for (key, val) in user_filling_values.items():
+ if _is_string_like(key):
+ try:
+ # Transform it into an integer
+ key = names.index(key)
+ except ValueError:
+ # We couldn't find it: the name must have been dropped,
+ continue
+ # Redefine the key if it's a column number and usecols is defined
+ if usecols:
+ try:
+ key = usecols.index(key)
+ except ValueError:
+ pass
+ # Add the value to the list
+ filling_values[key] = val
+ # We have a sequence : update on a one-to-one basis
+ elif isinstance(user_filling_values, (list, tuple)):
+ n = len(user_filling_values)
+ if (n <= nbcols):
+ filling_values[:n] = user_filling_values
else:
- val = [str(val), ]
- # Add the value(s) to the current list of missing
- if key is None:
- # None acts as default
- for miss in missing_values:
- miss.extend(val)
+ filling_values = user_filling_values[:nbcols]
+ # We have something else : use it for all entries
+ else:
+ filling_values = [user_filling_values] * nbcols
+
+ # Initialize the converters ................................
+ if dtype is None:
+ # Note: we can't use a [...]*nbcols, as we would have 3 times the same
+ # ... converter, instead of 3 different converters.
+ converters = [StringConverter(None, missing_values=miss, default=fill)
+ for (miss, fill) in zip(missing_values, filling_values)]
+ else:
+ dtype_flat = flatten_dtype(dtype, flatten_base=True)
+ # Initialize the converters
+ if len(dtype_flat) > 1:
+ # Flexible type : get a converter from each dtype
+ zipit = zip(dtype_flat, missing_values, filling_values)
+ converters = [StringConverter(dt, locked=True,
+ missing_values=miss, default=fill)
+ for (dt, miss, fill) in zipit]
else:
- missing_values[key].extend(val)
- # We have a sequence : each item matches a column
- elif isinstance(user_missing_values, (list, tuple)):
- for (value, entry) in zip(user_missing_values, missing_values):
- value = str(value)
- if value not in entry:
- entry.append(value)
- # We have a string : apply it to all entries
- elif isinstance(user_missing_values, basestring):
- user_value = user_missing_values.split(",")
- for entry in missing_values:
- entry.extend(user_value)
- # We have something else: apply it to all entries
- else:
- for entry in missing_values:
- entry.extend([str(user_missing_values)])
-
- # Process the filling_values ...............................
- # Rename the input for convenience
- user_filling_values = filling_values
- if user_filling_values is None:
- user_filling_values = []
- # Define the default
- filling_values = [None] * nbcols
- # We have a dictionary : update each entry individually
- if isinstance(user_filling_values, dict):
- for (key, val) in user_filling_values.items():
- if _is_string_like(key):
+ # Set to a default converter (but w/ different missing values)
+ zipit = zip(missing_values, filling_values)
+ converters = [StringConverter(dtype, locked=True,
+ missing_values=miss, default=fill)
+ for (miss, fill) in zipit]
+ # Update the converters to use the user-defined ones
+ uc_update = []
+ for (j, conv) in user_converters.items():
+ # If the converter is specified by column names, use the index instead
+ if _is_string_like(j):
try:
- # Transform it into an integer
- key = names.index(key)
+ j = names.index(j)
+ i = j
except ValueError:
- # We couldn't find it: the name must have been dropped,
continue
- # Redefine the key if it's a column number and usecols is defined
- if usecols:
+ elif usecols:
try:
- key = usecols.index(key)
+ i = usecols.index(j)
except ValueError:
- pass
- # Add the value to the list
- filling_values[key] = val
- # We have a sequence : update on a one-to-one basis
- elif isinstance(user_filling_values, (list, tuple)):
- n = len(user_filling_values)
- if (n <= nbcols):
- filling_values[:n] = user_filling_values
- else:
- filling_values = user_filling_values[:nbcols]
- # We have something else : use it for all entries
- else:
- filling_values = [user_filling_values] * nbcols
-
- # Initialize the converters ................................
- if dtype is None:
- # Note: we can't use a [...]*nbcols, as we would have 3 times the same
- # ... converter, instead of 3 different converters.
- converters = [StringConverter(None, missing_values=miss, default=fill)
- for (miss, fill) in zip(missing_values, filling_values)]
- else:
- dtype_flat = flatten_dtype(dtype, flatten_base=True)
- # Initialize the converters
- if len(dtype_flat) > 1:
- # Flexible type : get a converter from each dtype
- zipit = zip(dtype_flat, missing_values, filling_values)
- converters = [StringConverter(dt, locked=True,
- missing_values=miss, default=fill)
- for (dt, miss, fill) in zipit]
- else:
- # Set to a default converter (but w/ different missing values)
- zipit = zip(missing_values, filling_values)
- converters = [StringConverter(dtype, locked=True,
- missing_values=miss, default=fill)
- for (miss, fill) in zipit]
- # Update the converters to use the user-defined ones
- uc_update = []
- for (j, conv) in user_converters.items():
- # If the converter is specified by column names, use the index instead
- if _is_string_like(j):
- try:
- j = names.index(j)
+ # Unused converter specified
+ continue
+ else:
i = j
- except ValueError:
- continue
- elif usecols:
- try:
- i = usecols.index(j)
- except ValueError:
- # Unused converter specified
+ # Find the value to test - first_line is not filtered by usecols:
+ if len(first_line):
+ testing_value = first_values[j]
+ else:
+ testing_value = None
+ if conv is bytes:
+ user_conv = asbytes
+ elif byte_converters:
+ # converters may use decode to workaround numpy's old behaviour,
+ # so encode the string again before passing to the user converter
+ def tobytes_first(x, conv):
+ if type(x) is bytes:
+ return conv(x)
+ return conv(x.encode("latin1"))
+ user_conv = functools.partial(tobytes_first, conv=conv)
+ else:
+ user_conv = conv
+ converters[i].update(user_conv, locked=True,
+ testing_value=testing_value,
+ default=filling_values[i],
+ missing_values=missing_values[i],)
+ uc_update.append((i, user_conv))
+ # Make sure we have the corrected keys in user_converters...
+ user_converters.update(uc_update)
+
+ # Fixme: possible error as following variable never used.
+ # miss_chars = [_.missing_values for _ in converters]
+
+ # Initialize the output lists ...
+ # ... rows
+ rows = []
+ append_to_rows = rows.append
+ # ... masks
+ if usemask:
+ masks = []
+ append_to_masks = masks.append
+ # ... invalid
+ invalid = []
+ append_to_invalid = invalid.append
+
+ # Parse each line
+ for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
+ values = split_line(line)
+ nbvalues = len(values)
+ # Skip an empty line
+ if nbvalues == 0:
continue
- else:
- i = j
- # Find the value to test - first_line is not filtered by usecols:
- if len(first_line):
- testing_value = first_values[j]
- else:
- testing_value = None
- if conv is bytes:
- user_conv = asbytes
- elif byte_converters:
- # converters may use decode to workaround numpy's old behaviour,
- # so encode the string again before passing to the user converter
- def tobytes_first(x, conv):
- if type(x) is bytes:
- return conv(x)
- return conv(x.encode("latin1"))
- import functools
- user_conv = functools.partial(tobytes_first, conv=conv)
- else:
- user_conv = conv
- converters[i].update(user_conv, locked=True,
- testing_value=testing_value,
- default=filling_values[i],
- missing_values=missing_values[i],)
- uc_update.append((i, user_conv))
- # Make sure we have the corrected keys in user_converters...
- user_converters.update(uc_update)
-
- # Fixme: possible error as following variable never used.
- # miss_chars = [_.missing_values for _ in converters]
-
- # Initialize the output lists ...
- # ... rows
- rows = []
- append_to_rows = rows.append
- # ... masks
- if usemask:
- masks = []
- append_to_masks = masks.append
- # ... invalid
- invalid = []
- append_to_invalid = invalid.append
-
- # Parse each line
- for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
- values = split_line(line)
- nbvalues = len(values)
- # Skip an empty line
- if nbvalues == 0:
- continue
- if usecols:
- # Select only the columns we need
- try:
- values = [values[_] for _ in usecols]
- except IndexError:
+ if usecols:
+ # Select only the columns we need
+ try:
+ values = [values[_] for _ in usecols]
+ except IndexError:
+ append_to_invalid((i + skip_header + 1, nbvalues))
+ continue
+ elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
- elif nbvalues != nbcols:
- append_to_invalid((i + skip_header + 1, nbvalues))
- continue
- # Store the values
- append_to_rows(tuple(values))
- if usemask:
- append_to_masks(tuple([v.strip() in m
- for (v, m) in zip(values,
- missing_values)]))
- if len(rows) == max_rows:
- break
-
- if own_fhd:
- fhd.close()
+ # Store the values
+ append_to_rows(tuple(values))
+ if usemask:
+ append_to_masks(tuple([v.strip() in m
+ for (v, m) in zip(values,
+ missing_values)]))
+ if len(rows) == max_rows:
+ break
# Upgrade the converters (if needed)
if dtype is None:
@@ -2095,10 +2159,10 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
if names is None:
# If the dtype is uniform (before sizing strings)
- base = set([
+ base = {
c_type
for c, c_type in zip(converters, column_types)
- if c._checked])
+ if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
@@ -2116,7 +2180,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
- if names and dtype.names:
+ if names and dtype.names is not None:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
@@ -2166,7 +2230,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
#
output = np.array(data, dtype)
if usemask:
- if dtype.names:
+ if dtype.names is not None:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
@@ -2192,6 +2256,12 @@ def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
+ .. deprecated:: 1.17
+ ndfromtxt` is a deprecated alias of `genfromtxt` which
+ overwrites the ``usemask`` argument with `False` even when
+ explicitly called as ``ndfromtxt(..., usemask=True)``.
+ Use `genfromtxt` instead.
+
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
@@ -2202,6 +2272,11 @@ def ndfromtxt(fname, **kwargs):
"""
kwargs['usemask'] = False
+ # Numpy 1.17
+ warnings.warn(
+ "np.ndfromtxt is a deprecated alias of np.genfromtxt, "
+ "prefer the latter.",
+ DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
@@ -2209,6 +2284,12 @@ def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
+ .. deprecated:: 1.17
+ np.mafromtxt is a deprecated alias of `genfromtxt` which
+ overwrites the ``usemask`` argument with `True` even when
+ explicitly called as ``mafromtxt(..., usemask=False)``.
+ Use `genfromtxt` instead.
+
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
@@ -2219,6 +2300,11 @@ def mafromtxt(fname, **kwargs):
"""
kwargs['usemask'] = True
+ # Numpy 1.17
+ warnings.warn(
+ "np.mafromtxt is a deprecated alias of np.genfromtxt, "
+ "prefer the latter.",
+ DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
diff --git a/numpy/lib/polynomial.py b/numpy/lib/polynomial.py
index 9f3b84732..2c72f623c 100644
--- a/numpy/lib/polynomial.py
+++ b/numpy/lib/polynomial.py
@@ -8,17 +8,26 @@ __all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
+import functools
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
+from numpy.core import overrides
+from numpy.core.overrides import set_module
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
+@set_module('numpy')
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
@@ -29,6 +38,12 @@ class RankWarning(UserWarning):
"""
pass
+
+def _poly_dispatcher(seq_of_zeros):
+ return seq_of_zeros
+
+
+@array_function_dispatch(_poly_dispatcher)
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
@@ -95,7 +110,7 @@ def poly(seq_of_zeros):
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
- array([1, 0, 0, 0])
+ array([1., 0., 0., 0.])
The line above represents z**3 + 0*z**2 + 0*z + 0.
@@ -104,14 +119,14 @@ def poly(seq_of_zeros):
The line above represents z**3 - z/4
- >>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
- array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
+ >>> np.poly((np.random.random(1)[0], 0, np.random.random(1)[0]))
+ array([ 1. , -0.77086955, 0.08618131, 0. ]) # random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
- array([ 1. , 0. , 0.16666667])
+ array([1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
@@ -145,6 +160,12 @@ def poly(seq_of_zeros):
return a
+
+def _roots_dispatcher(p):
+ return p
+
+
+@array_function_dispatch(_roots_dispatcher)
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
@@ -229,6 +250,12 @@ def roots(p):
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
+
+def _polyint_dispatcher(p, m=None, k=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyint_dispatcher)
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
@@ -245,7 +272,7 @@ def polyint(p, m=1, k=None):
Parameters
----------
p : array_like or poly1d
- Polynomial to differentiate.
+ Polynomial to integrate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
@@ -268,7 +295,7 @@ def polyint(p, m=1, k=None):
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
- poly1d([ 0.33333333, 0.5 , 1. , 0. ])
+ poly1d([ 0.33333333, 0.5 , 1. , 0. ]) # may vary
>>> np.polyder(P) == p
True
@@ -283,7 +310,7 @@ def polyint(p, m=1, k=None):
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
- poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
+ poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ]) # may vary
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
@@ -322,6 +349,12 @@ def polyint(p, m=1, k=None):
return poly1d(val)
return val
+
+def _polyder_dispatcher(p, m=None):
+ return (p,)
+
+
+@array_function_dispatch(_polyder_dispatcher)
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
@@ -371,7 +404,7 @@ def polyder(p, m=1):
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
- poly1d([ 0.])
+ poly1d([0.])
"""
m = int(m)
@@ -390,6 +423,12 @@ def polyder(p, m=1):
val = poly1d(val)
return val
+
+def _polyfit_dispatcher(x, y, deg, rcond=None, full=None, w=None, cov=None):
+ return (x, y, w)
+
+
+@array_function_dispatch(_polyfit_dispatcher)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
@@ -424,9 +463,14 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
- cov : bool, optional
- Return the estimate and the covariance matrix of the estimate
- If full is True, then cov is not returned.
+ cov : bool or str, optional
+ If given and not `False`, return not just the estimate but also its
+ covariance matrix. By default, the covariance are scaled by
+ chi2/sqrt(N-dof), i.e., the weights are presumed to be unreliable
+ except in a relative sense and everything is scaled such that the
+ reduced chi2 is unity. This scaling is omitted if ``cov='unscaled'``,
+ as is relevant for the case that the weights are 1/sigma**2, with
+ sigma known to be a reliable estimate of the uncertainty.
Returns
-------
@@ -504,32 +548,35 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
Examples
--------
+ >>> import warnings
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
- array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
+ array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254]) # may vary
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
- 0.6143849206349179
+ 0.6143849206349179 # may vary
>>> p(3.5)
- -0.34732142857143039
+ -0.34732142857143039 # may vary
>>> p(10)
- 22.579365079365115
+ 22.579365079365115 # may vary
High-order polynomials may oscillate wildly:
- >>> p30 = np.poly1d(np.polyfit(x, y, 30))
- /... RankWarning: Polyfit may be poorly conditioned...
+ >>> with warnings.catch_warnings():
+ ... warnings.simplefilter('ignore', np.RankWarning)
+ ... p30 = np.poly1d(np.polyfit(x, y, 30))
+ ...
>>> p30(4)
- -0.80000000000000204
+ -0.80000000000000204 # may vary
>>> p30(5)
- -0.99999999999999445
+ -0.99999999999999445 # may vary
>>> p30(4.5)
- -0.10547061179440398
+ -0.10547061179440398 # may vary
Illustration:
@@ -587,21 +634,24 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
- warnings.warn(msg, RankWarning, stacklevel=2)
+ warnings.warn(msg, RankWarning, stacklevel=4)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
- # Some literature ignores the extra -2.0 factor in the denominator, but
- # it is included here because the covariance of Multivariate Student-T
- # (which is implied by a Bayesian uncertainty analysis) includes it.
- # Plus, it gives a slightly more conservative estimate of uncertainty.
- if len(x) <= order + 2:
- raise ValueError("the number of data points must exceed order + 2 "
- "for Bayesian estimate the covariance matrix")
- fac = resids / (len(x) - order - 2.0)
+ if cov == "unscaled":
+ fac = 1
+ else:
+ if len(x) <= order:
+ raise ValueError("the number of data points must exceed order "
+ "to scale the covariance matrix")
+ # note, this used to be: fac = resids / (len(x) - order - 2.0)
+ # it was deciced that the "- 2" (originally justified by "Bayesian
+ # uncertainty analysis") is not was the user expects
+ # (see gh-11196 and gh-11197)
+ fac = resids / (len(x) - order)
if y.ndim == 1:
return c, Vbase * fac
else:
@@ -610,6 +660,11 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
return c
+def _polyval_dispatcher(p, x):
+ return (p, x)
+
+
+@array_function_dispatch(_polyval_dispatcher)
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
@@ -651,6 +706,8 @@ def polyval(p, x):
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
+ If `x` is a subtype of `ndarray` the return value will be of the same type.
+
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
@@ -662,23 +719,29 @@ def polyval(p, x):
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
- poly1d([ 76.])
+ poly1d([76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
- poly1d([ 76.])
+ poly1d([76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
- x = NX.asarray(x)
+ x = NX.asanyarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
+
+def _binary_op_dispatcher(a1, a2):
+ return (a1, a2)
+
+
+@array_function_dispatch(_binary_op_dispatcher)
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
@@ -739,6 +802,8 @@ def polyadd(a1, a2):
val = poly1d(val)
return val
+
+@array_function_dispatch(_binary_op_dispatcher)
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
@@ -786,6 +851,7 @@ def polysub(a1, a2):
return val
+@array_function_dispatch(_binary_op_dispatcher)
def polymul(a1, a2):
"""
Find the product of two polynomials.
@@ -810,8 +876,7 @@ def polymul(a1, a2):
See Also
--------
poly1d : A one-dimensional polynomial class.
- poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
- polyval
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
@@ -842,6 +907,12 @@ def polymul(a1, a2):
val = poly1d(val)
return val
+
+def _polydiv_dispatcher(u, v):
+ return (u, v)
+
+
+@array_function_dispatch(_polydiv_dispatcher)
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
@@ -867,7 +938,7 @@ def polydiv(u, v):
See Also
--------
- poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
+ poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub
polyval
Notes
@@ -884,7 +955,7 @@ def polydiv(u, v):
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
- (array([ 1.5 , 1.75]), array([ 0.25]))
+ (array([1.5 , 1.75]), array([0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
@@ -935,6 +1006,7 @@ def _raise_power(astr, wrap=70):
return output + astr[n:]
+@set_module('numpy')
class poly1d(object):
"""
A one-dimensional polynomial class.
@@ -978,7 +1050,7 @@ class poly1d(object):
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
- array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
+ array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j]) # may vary
These numbers in the previous line represent (0, 0) to machine precision
@@ -1005,7 +1077,7 @@ class poly1d(object):
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
- (poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
+ (poly1d([ 1., 4., 10., 12., 9.]), poly1d([4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
@@ -1027,7 +1099,7 @@ class poly1d(object):
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
- poly1d([ 1, -3, 2])
+ poly1d([ 1., -3., 2.])
This is the same polynomial as obtained by:
@@ -1039,8 +1111,14 @@ class poly1d(object):
@property
def coeffs(self):
- """ A copy of the polynomial coefficients """
- return self._coeffs.copy()
+ """ The polynomial coefficients """
+ return self._coeffs
+
+ @coeffs.setter
+ def coeffs(self, value):
+ # allowing this makes p.coeffs *= 2 legal
+ if value is not self._coeffs:
+ raise AttributeError("Cannot set attribute")
@property
def variable(self):
diff --git a/numpy/lib/recfunctions.py b/numpy/lib/recfunctions.py
index b6453d5a2..927161ddb 100644
--- a/numpy/lib/recfunctions.py
+++ b/numpy/lib/recfunctions.py
@@ -14,8 +14,10 @@ import numpy.ma as ma
from numpy import ndarray, recarray
from numpy.ma import MaskedArray
from numpy.ma.mrecords import MaskedRecords
+from numpy.core.overrides import array_function_dispatch
from numpy.lib._iotools import _is_string_like
from numpy.compat import basestring
+from numpy.testing import suppress_warnings
if sys.version_info[0] < 3:
from future_builtins import zip
@@ -24,13 +26,21 @@ _check_fill_value = np.ma.core._check_fill_value
__all__ = [
- 'append_fields', 'drop_fields', 'find_duplicates',
- 'get_fieldstructure', 'join_by', 'merge_arrays',
- 'rec_append_fields', 'rec_drop_fields', 'rec_join',
- 'recursive_fill_fields', 'rename_fields', 'stack_arrays',
+ 'append_fields', 'apply_along_fields', 'assign_fields_by_name',
+ 'drop_fields', 'find_duplicates', 'flatten_descr',
+ 'get_fieldstructure', 'get_names', 'get_names_flat',
+ 'join_by', 'merge_arrays', 'rec_append_fields',
+ 'rec_drop_fields', 'rec_join', 'recursive_fill_fields',
+ 'rename_fields', 'repack_fields', 'require_fields',
+ 'stack_arrays', 'structured_to_unstructured', 'unstructured_to_structured',
]
+def _recursive_fill_fields_dispatcher(input, output):
+ return (input, output)
+
+
+@array_function_dispatch(_recursive_fill_fields_dispatcher)
def recursive_fill_fields(input, output):
"""
Fills fields from output with fields from input,
@@ -50,11 +60,10 @@ def recursive_fill_fields(input, output):
Examples
--------
>>> from numpy.lib import recfunctions as rfn
- >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', int), ('B', float)])
+ >>> a = np.array([(1, 10.), (2, 20.)], dtype=[('A', np.int64), ('B', np.float64)])
>>> b = np.zeros((3,), dtype=a.dtype)
>>> rfn.recursive_fill_fields(a, b)
- array([(1, 10.0), (2, 20.0), (0, 0.0)],
- dtype=[('A', '<i4'), ('B', '<f8')])
+ array([(1, 10.), (2, 20.), (0, 0.)], dtype=[('A', '<i8'), ('B', '<f8')])
"""
newdtype = output.dtype
@@ -63,14 +72,14 @@ def recursive_fill_fields(input, output):
current = input[field]
except ValueError:
continue
- if current.dtype.names:
+ if current.dtype.names is not None:
recursive_fill_fields(current, output[field])
else:
output[field][:len(current)] = current
return output
-def get_fieldspec(dtype):
+def _get_fieldspec(dtype):
"""
Produce a list of name/dtype pairs corresponding to the dtype fields
@@ -82,11 +91,11 @@ def get_fieldspec(dtype):
Examples
--------
- >>> dt = np.dtype([(('a', 'A'), int), ('b', float, 3)])
+ >>> dt = np.dtype([(('a', 'A'), np.int64), ('b', np.double, 3)])
>>> dt.descr
- [(('a', 'A'), '<i4'), ('b', '<f8', (3,))]
- >>> get_fieldspec(dt)
- [(('a', 'A'), dtype('int32')), ('b', dtype(('<f8', (3,))))]
+ [(('a', 'A'), '<i8'), ('b', '<f8', (3,))]
+ >>> _get_fieldspec(dt)
+ [(('a', 'A'), dtype('int64')), ('b', dtype(('<f8', (3,))))]
"""
if dtype.names is None:
@@ -96,7 +105,7 @@ def get_fieldspec(dtype):
fields = ((name, dtype.fields[name]) for name in dtype.names)
# keep any titles, if present
return [
- (name if len(f) == 2 else (f[2], name), f[0])
+ (name if len(f) == 2 else (f[2], name), f[0])
for name, f in fields
]
@@ -113,10 +122,15 @@ def get_names(adtype):
Examples
--------
>>> from numpy.lib import recfunctions as rfn
- >>> rfn.get_names(np.empty((1,), dtype=int)) is None
- True
+ >>> rfn.get_names(np.empty((1,), dtype=int))
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
+
>>> rfn.get_names(np.empty((1,), dtype=[('A',int), ('B', float)]))
- ('A', 'B')
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names(adtype)
('a', ('b', ('ba', 'bb')))
@@ -125,17 +139,17 @@ def get_names(adtype):
names = adtype.names
for name in names:
current = adtype[name]
- if current.names:
+ if current.names is not None:
listnames.append((name, tuple(get_names(current))))
else:
listnames.append(name)
- return tuple(listnames) or None
+ return tuple(listnames)
def get_names_flat(adtype):
"""
Returns the field names of the input datatype as a tuple. Nested structure
- are flattend beforehand.
+ are flattened beforehand.
Parameters
----------
@@ -146,9 +160,13 @@ def get_names_flat(adtype):
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.get_names_flat(np.empty((1,), dtype=int)) is None
- True
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> rfn.get_names_flat(np.empty((1,), dtype=[('A',int), ('B', float)]))
- ('A', 'B')
+ Traceback (most recent call last):
+ ...
+ AttributeError: 'numpy.ndarray' object has no attribute 'names'
>>> adtype = np.dtype([('a', int), ('b', [('ba', int), ('bb', int)])])
>>> rfn.get_names_flat(adtype)
('a', 'b', 'ba', 'bb')
@@ -158,9 +176,9 @@ def get_names_flat(adtype):
for name in names:
listnames.append(name)
current = adtype[name]
- if current.names:
+ if current.names is not None:
listnames.extend(get_names_flat(current))
- return tuple(listnames) or None
+ return tuple(listnames)
def flatten_descr(ndtype):
@@ -182,14 +200,14 @@ def flatten_descr(ndtype):
descr = []
for field in names:
(typ, _) = ndtype.fields[field]
- if typ.names:
+ if typ.names is not None:
descr.extend(flatten_descr(typ))
else:
descr.append((field, typ))
return tuple(descr)
-def zip_dtype(seqarrays, flatten=False):
+def _zip_dtype(seqarrays, flatten=False):
newdtype = []
if flatten:
for a in seqarrays:
@@ -197,15 +215,15 @@ def zip_dtype(seqarrays, flatten=False):
else:
for a in seqarrays:
current = a.dtype
- if current.names and len(current.names) <= 1:
- # special case - dtypes of 0 or 1 field are flattened
- newdtype.extend(get_fieldspec(current))
+ if current.names is not None and len(current.names) == 1:
+ # special case - dtypes of 1 field are flattened
+ newdtype.extend(_get_fieldspec(current))
else:
newdtype.append(('', current))
return np.dtype(newdtype)
-def zip_descr(seqarrays, flatten=False):
+def _zip_descr(seqarrays, flatten=False):
"""
Combine the dtype description of a series of arrays.
@@ -216,7 +234,7 @@ def zip_descr(seqarrays, flatten=False):
flatten : {boolean}, optional
Whether to collapse nested descriptions.
"""
- return zip_dtype(seqarrays, flatten=flatten).descr
+ return _zip_dtype(seqarrays, flatten=flatten).descr
def get_fieldstructure(adtype, lastname=None, parents=None,):
@@ -250,7 +268,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
names = adtype.names
for name in names:
current = adtype[name]
- if current.names:
+ if current.names is not None:
if lastname:
parents[name] = [lastname, ]
else:
@@ -263,7 +281,7 @@ def get_fieldstructure(adtype, lastname=None, parents=None,):
elif lastname:
lastparent = [lastname, ]
parents[name] = lastparent or []
- return parents or None
+ return parents
def _izip_fields_flat(iterable):
@@ -297,7 +315,7 @@ def _izip_fields(iterable):
yield element
-def izip_records(seqarrays, fill_value=None, flatten=True):
+def _izip_records(seqarrays, fill_value=None, flatten=True):
"""
Returns an iterator of concatenated items from a sequence of arrays.
@@ -357,6 +375,12 @@ def _fix_defaults(output, defaults=None):
return output
+def _merge_arrays_dispatcher(seqarrays, fill_value=None, flatten=None,
+ usemask=None, asrecarray=None):
+ return seqarrays
+
+
+@array_function_dispatch(_merge_arrays_dispatcher)
def merge_arrays(seqarrays, fill_value=-1, flatten=False,
usemask=False, asrecarray=False):
"""
@@ -379,20 +403,18 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
--------
>>> from numpy.lib import recfunctions as rfn
>>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])))
- masked_array(data = [(1, 10.0) (2, 20.0) (--, 30.0)],
- mask = [(False, False) (False, False) (True, False)],
- fill_value = (999999, 1e+20),
- dtype = [('f0', '<i4'), ('f1', '<f8')])
-
- >>> rfn.merge_arrays((np.array([1, 2]), np.array([10., 20., 30.])),
- ... usemask=False)
- array([(1, 10.0), (2, 20.0), (-1, 30.0)],
- dtype=[('f0', '<i4'), ('f1', '<f8')])
- >>> rfn.merge_arrays((np.array([1, 2]).view([('a', int)]),
+ array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+
+ >>> rfn.merge_arrays((np.array([1, 2], dtype=np.int64),
+ ... np.array([10., 20., 30.])), usemask=False)
+ array([(1, 10.0), (2, 20.0), (-1, 30.0)],
+ dtype=[('f0', '<i8'), ('f1', '<f8')])
+ >>> rfn.merge_arrays((np.array([1, 2]).view([('a', np.int64)]),
... np.array([10., 20., 30.])),
... usemask=False, asrecarray=True)
- rec.array([(1, 10.0), (2, 20.0), (-1, 30.0)],
- dtype=[('a', '<i4'), ('f1', '<f8')])
+ rec.array([( 1, 10.), ( 2, 20.), (-1, 30.)],
+ dtype=[('a', '<i8'), ('f1', '<f8')])
Notes
-----
@@ -413,9 +435,9 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
if isinstance(seqarrays, (ndarray, np.void)):
seqdtype = seqarrays.dtype
# Make sure we have named fields
- if not seqdtype.names:
+ if seqdtype.names is None:
seqdtype = np.dtype([('', seqdtype)])
- if not flatten or zip_dtype((seqarrays,), flatten=True) == seqdtype:
+ if not flatten or _zip_dtype((seqarrays,), flatten=True) == seqdtype:
# Minimal processing needed: just make sure everythng's a-ok
seqarrays = seqarrays.ravel()
# Find what type of array we must return
@@ -438,7 +460,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
sizes = tuple(a.size for a in seqarrays)
maxlength = max(sizes)
# Get the dtype of the output (flattening if needed)
- newdtype = zip_dtype(seqarrays, flatten=flatten)
+ newdtype = _zip_dtype(seqarrays, flatten=flatten)
# Initialize the sequences for data and mask
seqdata = []
seqmask = []
@@ -466,9 +488,9 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
seqdata.append(itertools.chain(data, [fval] * nbmissing))
seqmask.append(itertools.chain(mask, [fmsk] * nbmissing))
# Create an iterator for the data
- data = tuple(izip_records(seqdata, flatten=flatten))
+ data = tuple(_izip_records(seqdata, flatten=flatten))
output = ma.array(np.fromiter(data, dtype=newdtype, count=maxlength),
- mask=list(izip_records(seqmask, flatten=flatten)))
+ mask=list(_izip_records(seqmask, flatten=flatten)))
if asrecarray:
output = output.view(MaskedRecords)
else:
@@ -486,7 +508,7 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
else:
fval = None
seqdata.append(itertools.chain(data, [fval] * nbmissing))
- output = np.fromiter(tuple(izip_records(seqdata, flatten=flatten)),
+ output = np.fromiter(tuple(_izip_records(seqdata, flatten=flatten)),
dtype=newdtype, count=maxlength)
if asrecarray:
output = output.view(recarray)
@@ -494,12 +516,21 @@ def merge_arrays(seqarrays, fill_value=-1, flatten=False,
return output
+def _drop_fields_dispatcher(base, drop_names, usemask=None, asrecarray=None):
+ return (base,)
+
+
+@array_function_dispatch(_drop_fields_dispatcher)
def drop_fields(base, drop_names, usemask=True, asrecarray=False):
"""
Return a new array with fields in `drop_names` dropped.
Nested fields are supported.
+ ..versionchanged: 1.18.0
+ `drop_fields` returns an array with 0 fields if all fields are dropped,
+ rather than returning ``None`` as it did previously.
+
Parameters
----------
base : array
@@ -518,16 +549,14 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
--------
>>> from numpy.lib import recfunctions as rfn
>>> a = np.array([(1, (2, 3.0)), (4, (5, 6.0))],
- ... dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ ... dtype=[('a', np.int64), ('b', [('ba', np.double), ('bb', np.int64)])])
>>> rfn.drop_fields(a, 'a')
- array([((2.0, 3),), ((5.0, 6),)],
- dtype=[('b', [('ba', '<f8'), ('bb', '<i4')])])
+ array([((2., 3),), ((5., 6),)],
+ dtype=[('b', [('ba', '<f8'), ('bb', '<i8')])])
>>> rfn.drop_fields(a, 'ba')
- array([(1, (3,)), (4, (6,))],
- dtype=[('a', '<i4'), ('b', [('bb', '<i4')])])
+ array([(1, (3,)), (4, (6,))], dtype=[('a', '<i8'), ('b', [('bb', '<i8')])])
>>> rfn.drop_fields(a, ['ba', 'bb'])
- array([(1,), (4,)],
- dtype=[('a', '<i4')])
+ array([(1,), (4,)], dtype=[('a', '<i8')])
"""
if _is_string_like(drop_names):
drop_names = [drop_names]
@@ -541,7 +570,7 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
current = ndtype[name]
if name in drop_names:
continue
- if current.names:
+ if current.names is not None:
descr = _drop_descr(current, drop_names)
if descr:
newdtype.append((name, descr))
@@ -550,8 +579,6 @@ def drop_fields(base, drop_names, usemask=True, asrecarray=False):
return newdtype
newdtype = _drop_descr(base.dtype, drop_names)
- if not newdtype:
- return None
output = np.empty(base.shape, dtype=newdtype)
output = recursive_fill_fields(base, output)
@@ -583,6 +610,11 @@ def _keep_fields(base, keep_names, usemask=True, asrecarray=False):
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+def _rec_drop_fields_dispatcher(base, drop_names):
+ return (base,)
+
+
+@array_function_dispatch(_rec_drop_fields_dispatcher)
def rec_drop_fields(base, drop_names):
"""
Returns a new numpy.recarray with fields in `drop_names` dropped.
@@ -590,6 +622,11 @@ def rec_drop_fields(base, drop_names):
return drop_fields(base, drop_names, usemask=False, asrecarray=True)
+def _rename_fields_dispatcher(base, namemapper):
+ return (base,)
+
+
+@array_function_dispatch(_rename_fields_dispatcher)
def rename_fields(base, namemapper):
"""
Rename the fields from a flexible-datatype ndarray or recarray.
@@ -609,8 +646,8 @@ def rename_fields(base, namemapper):
>>> a = np.array([(1, (2, [3.0, 30.])), (4, (5, [6.0, 60.]))],
... dtype=[('a', int),('b', [('ba', float), ('bb', (float, 2))])])
>>> rfn.rename_fields(a, {'a':'A', 'bb':'BB'})
- array([(1, (2.0, [3.0, 30.0])), (4, (5.0, [6.0, 60.0]))],
- dtype=[('A', '<i4'), ('b', [('ba', '<f8'), ('BB', '<f8', 2)])])
+ array([(1, (2., [ 3., 30.])), (4, (5., [ 6., 60.]))],
+ dtype=[('A', '<i8'), ('b', [('ba', '<f8'), ('BB', '<f8', (2,))])])
"""
def _recursive_rename_fields(ndtype, namemapper):
@@ -618,7 +655,7 @@ def rename_fields(base, namemapper):
for name in ndtype.names:
newname = namemapper.get(name, name)
current = ndtype[name]
- if current.names:
+ if current.names is not None:
newdtype.append(
(newname, _recursive_rename_fields(current, namemapper))
)
@@ -629,6 +666,14 @@ def rename_fields(base, namemapper):
return base.view(newdtype)
+def _append_fields_dispatcher(base, names, data, dtypes=None,
+ fill_value=None, usemask=None, asrecarray=None):
+ yield base
+ for d in data:
+ yield d
+
+
+@array_function_dispatch(_append_fields_dispatcher)
def append_fields(base, names, data, dtypes=None,
fill_value=-1, usemask=True, asrecarray=False):
"""
@@ -692,13 +737,20 @@ def append_fields(base, names, data, dtypes=None,
#
output = ma.masked_all(
max(len(base), len(data)),
- dtype=get_fieldspec(base.dtype) + get_fieldspec(data.dtype))
+ dtype=_get_fieldspec(base.dtype) + _get_fieldspec(data.dtype))
output = recursive_fill_fields(base, output)
output = recursive_fill_fields(data, output)
#
return _fix_output(output, usemask=usemask, asrecarray=asrecarray)
+def _rec_append_fields_dispatcher(base, names, data, dtypes=None):
+ yield base
+ for d in data:
+ yield d
+
+
+@array_function_dispatch(_rec_append_fields_dispatcher)
def rec_append_fields(base, names, data, dtypes=None):
"""
Add new fields to an existing array.
@@ -732,6 +784,12 @@ def rec_append_fields(base, names, data, dtypes=None):
return append_fields(base, names, data=data, dtypes=dtypes,
asrecarray=True, usemask=False)
+
+def _repack_fields_dispatcher(a, align=None, recurse=None):
+ return (a,)
+
+
+@array_function_dispatch(_repack_fields_dispatcher)
def repack_fields(a, align=False, recurse=False):
"""
Re-pack the fields of a structured array or dtype in memory.
@@ -770,22 +828,23 @@ def repack_fields(a, align=False, recurse=False):
Examples
--------
+ >>> from numpy.lib import recfunctions as rfn
>>> def print_offsets(d):
... print("offsets:", [d.fields[name][1] for name in d.names])
... print("itemsize:", d.itemsize)
...
- >>> dt = np.dtype('u1,i4,f4', align=True)
+ >>> dt = np.dtype('u1, <i8, <f8', align=True)
>>> dt
- dtype({'names':['f0','f1','f2'], 'formats':['u1','<i4','<f8'], 'offsets':[0,4,8], 'itemsize':16}, align=True)
+ dtype({'names':['f0','f1','f2'], 'formats':['u1','<i8','<f8'], 'offsets':[0,8,16], 'itemsize':24}, align=True)
>>> print_offsets(dt)
- offsets: [0, 4, 8]
- itemsize: 16
- >>> packed_dt = repack_fields(dt)
+ offsets: [0, 8, 16]
+ itemsize: 24
+ >>> packed_dt = rfn.repack_fields(dt)
>>> packed_dt
- dtype([('f0', 'u1'), ('f1', '<i4'), ('f2', '<f8')])
+ dtype([('f0', 'u1'), ('f1', '<i8'), ('f2', '<f8')])
>>> print_offsets(packed_dt)
- offsets: [0, 1, 5]
- itemsize: 13
+ offsets: [0, 1, 9]
+ itemsize: 17
"""
if not isinstance(a, np.dtype):
@@ -811,6 +870,388 @@ def repack_fields(a, align=False, recurse=False):
dt = np.dtype(fieldinfo, align=align)
return np.dtype((a.type, dt))
+def _get_fields_and_offsets(dt, offset=0):
+ """
+ Returns a flat list of (dtype, count, offset) tuples of all the
+ scalar fields in the dtype "dt", including nested fields, in left
+ to right order.
+ """
+
+ # counts up elements in subarrays, including nested subarrays, and returns
+ # base dtype and count
+ def count_elem(dt):
+ count = 1
+ while dt.shape != ():
+ for size in dt.shape:
+ count *= size
+ dt = dt.base
+ return dt, count
+
+ fields = []
+ for name in dt.names:
+ field = dt.fields[name]
+ f_dt, f_offset = field[0], field[1]
+ f_dt, n = count_elem(f_dt)
+
+ if f_dt.names is None:
+ fields.append((np.dtype((f_dt, (n,))), n, f_offset + offset))
+ else:
+ subfields = _get_fields_and_offsets(f_dt, f_offset + offset)
+ size = f_dt.itemsize
+
+ for i in range(n):
+ if i == 0:
+ # optimization: avoid list comprehension if no subarray
+ fields.extend(subfields)
+ else:
+ fields.extend([(d, c, o + i*size) for d, c, o in subfields])
+ return fields
+
+
+def _structured_to_unstructured_dispatcher(arr, dtype=None, copy=None,
+ casting=None):
+ return (arr,)
+
+@array_function_dispatch(_structured_to_unstructured_dispatcher)
+def structured_to_unstructured(arr, dtype=None, copy=False, casting='unsafe'):
+ """
+ Converts and n-D structured array into an (n+1)-D unstructured array.
+
+ The new array will have a new last dimension equal in size to the
+ number of field-elements of the input array. If not supplied, the output
+ datatype is determined from the numpy type promotion rules applied to all
+ the field datatypes.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ as a single field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Structured array or dtype to convert. Cannot contain object datatype.
+ dtype : dtype, optional
+ The dtype of the output unstructured array.
+ copy : bool, optional
+ See copy argument to `ndarray.astype`. If true, always return a copy.
+ If false, and `dtype` requirements are satisfied, a view is returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `ndarray.astype`. Controls what kind of data
+ casting may occur.
+
+ Returns
+ -------
+ unstructured : ndarray
+ Unstructured array with one more dimension.
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a
+ array([(0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.]),
+ (0, (0., 0), [0., 0.]), (0, (0., 0), [0., 0.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+ >>> rfn.structured_to_unstructured(a)
+ array([[0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.],
+ [0., 0., 0., 0., 0.]])
+
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> np.mean(rfn.structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ fields = _get_fields_and_offsets(arr.dtype)
+ n_fields = len(fields)
+ if n_fields == 0 and dtype is None:
+ raise ValueError("arr has no fields. Unable to guess dtype")
+ elif n_fields == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("arr with no fields is not supported")
+
+ dts, counts, offsets = zip(*fields)
+ names = ['f{}'.format(n) for n in range(n_fields)]
+
+ if dtype is None:
+ out_dtype = np.result_type(*[dt.base for dt in dts])
+ else:
+ out_dtype = dtype
+
+ # Use a series of views and casts to convert to an unstructured array:
+
+ # first view using flattened fields (doesn't work for object arrays)
+ # Note: dts may include a shape for subarrays
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': arr.dtype.itemsize})
+ with suppress_warnings() as sup: # until 1.16 (gh-12447)
+ sup.filter(FutureWarning, "Numpy has detected")
+ arr = arr.view(flattened_fields)
+
+ # next cast to a packed format with all fields converted to new dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(out_dtype, dt.shape) for dt in dts]})
+ arr = arr.astype(packed_fields, copy=copy, casting=casting)
+
+ # finally is it safe to view the packed fields as the unstructured type
+ return arr.view((out_dtype, (sum(counts),)))
+
+
+def _unstructured_to_structured_dispatcher(arr, dtype=None, names=None,
+ align=None, copy=None, casting=None):
+ return (arr,)
+
+@array_function_dispatch(_unstructured_to_structured_dispatcher)
+def unstructured_to_structured(arr, dtype=None, names=None, align=False,
+ copy=False, casting='unsafe'):
+ """
+ Converts and n-D unstructured array into an (n-1)-D structured array.
+
+ The last dimension of the input array is converted into a structure, with
+ number of field-elements equal to the size of the last dimension of the
+ input array. By default all output fields have the input array's dtype, but
+ an output structured dtype with an equal number of fields-elements can be
+ supplied instead.
+
+ Nested fields, as well as each element of any subarray fields, all count
+ towards the number of field-elements.
+
+ Parameters
+ ----------
+ arr : ndarray
+ Unstructured array or dtype to convert.
+ dtype : dtype, optional
+ The structured dtype of the output array
+ names : list of strings, optional
+ If dtype is not supplied, this specifies the field names for the output
+ dtype, in order. The field dtypes will be the same as the input array.
+ align : boolean, optional
+ Whether to create an aligned memory layout.
+ copy : bool, optional
+ See copy argument to `ndarray.astype`. If true, always return a copy.
+ If false, and `dtype` requirements are satisfied, a view is returned.
+ casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
+ See casting argument of `ndarray.astype`. Controls what kind of data
+ casting may occur.
+
+ Returns
+ -------
+ structured : ndarray
+ Structured array with fewer dimensions.
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> dt = np.dtype([('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ >>> a = np.arange(20).reshape((4,5))
+ >>> a
+ array([[ 0, 1, 2, 3, 4],
+ [ 5, 6, 7, 8, 9],
+ [10, 11, 12, 13, 14],
+ [15, 16, 17, 18, 19]])
+ >>> rfn.unstructured_to_structured(a, dt)
+ array([( 0, ( 1., 2), [ 3., 4.]), ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]), (15, (16., 17), [18., 19.])],
+ dtype=[('a', '<i4'), ('b', [('f0', '<f4'), ('f1', '<u2')]), ('c', '<f4', (2,))])
+
+ """
+ if arr.shape == ():
+ raise ValueError('arr must have at least one dimension')
+ n_elem = arr.shape[-1]
+ if n_elem == 0:
+ # too many bugs elsewhere for this to work now
+ raise NotImplementedError("last axis with size 0 is not supported")
+
+ if dtype is None:
+ if names is None:
+ names = ['f{}'.format(n) for n in range(n_elem)]
+ out_dtype = np.dtype([(n, arr.dtype) for n in names], align=align)
+ fields = _get_fields_and_offsets(out_dtype)
+ dts, counts, offsets = zip(*fields)
+ else:
+ if names is not None:
+ raise ValueError("don't supply both dtype and names")
+ # sanity check of the input dtype
+ fields = _get_fields_and_offsets(dtype)
+ if len(fields) == 0:
+ dts, counts, offsets = [], [], []
+ else:
+ dts, counts, offsets = zip(*fields)
+
+ if n_elem != sum(counts):
+ raise ValueError('The length of the last dimension of arr must '
+ 'be equal to the number of fields in dtype')
+ out_dtype = dtype
+ if align and not out_dtype.isalignedstruct:
+ raise ValueError("align was True but dtype is not aligned")
+
+ names = ['f{}'.format(n) for n in range(len(fields))]
+
+ # Use a series of views and casts to convert to a structured array:
+
+ # first view as a packed structured array of one dtype
+ packed_fields = np.dtype({'names': names,
+ 'formats': [(arr.dtype, dt.shape) for dt in dts]})
+ arr = np.ascontiguousarray(arr).view(packed_fields)
+
+ # next cast to an unpacked but flattened format with varied dtypes
+ flattened_fields = np.dtype({'names': names,
+ 'formats': dts,
+ 'offsets': offsets,
+ 'itemsize': out_dtype.itemsize})
+ arr = arr.astype(flattened_fields, copy=copy, casting=casting)
+
+ # finally view as the final nested dtype and remove the last axis
+ return arr.view(out_dtype)[..., 0]
+
+def _apply_along_fields_dispatcher(func, arr):
+ return (arr,)
+
+@array_function_dispatch(_apply_along_fields_dispatcher)
+def apply_along_fields(func, arr):
+ """
+ Apply function 'func' as a reduction across fields of a structured array.
+
+ This is similar to `apply_along_axis`, but treats the fields of a
+ structured array as an extra axis. The fields are all first cast to a
+ common type following the type-promotion rules from `numpy.result_type`
+ applied to the field's dtypes.
+
+ Parameters
+ ----------
+ func : function
+ Function to apply on the "field" dimension. This function must
+ support an `axis` argument, like np.mean, np.sum, etc.
+ arr : ndarray
+ Structured array for which to apply func.
+
+ Returns
+ -------
+ out : ndarray
+ Result of the recution operation
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ ... dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ >>> rfn.apply_along_fields(np.mean, b)
+ array([ 2.66666667, 5.33333333, 8.66666667, 11. ])
+ >>> rfn.apply_along_fields(np.mean, b[['x', 'z']])
+ array([ 3. , 5.5, 9. , 11. ])
+
+ """
+ if arr.dtype.names is None:
+ raise ValueError('arr must be a structured array')
+
+ uarr = structured_to_unstructured(arr)
+ return func(uarr, axis=-1)
+ # works and avoids axis requirement, but very, very slow:
+ #return np.apply_along_axis(func, -1, uarr)
+
+def _assign_fields_by_name_dispatcher(dst, src, zero_unassigned=None):
+ return dst, src
+
+@array_function_dispatch(_assign_fields_by_name_dispatcher)
+def assign_fields_by_name(dst, src, zero_unassigned=True):
+ """
+ Assigns values from one structured array to another by field name.
+
+ Normally in numpy >= 1.14, assignment of one structured array to another
+ copies fields "by position", meaning that the first field from the src is
+ copied to the first field of the dst, and so on, regardless of field name.
+
+ This function instead copies "by field name", such that fields in the dst
+ are assigned from the identically named field in the src. This applies
+ recursively for nested structures. This is how structure assignment worked
+ in numpy >= 1.6 to <= 1.13.
+
+ Parameters
+ ----------
+ dst : ndarray
+ src : ndarray
+ The source and destination arrays during assignment.
+ zero_unassigned : bool, optional
+ If True, fields in the dst for which there was no matching
+ field in the src are filled with the value 0 (zero). This
+ was the behavior of numpy <= 1.13. If False, those fields
+ are not modified.
+ """
+
+ if dst.dtype.names is None:
+ dst[...] = src
+ return
+
+ for name in dst.dtype.names:
+ if name not in src.dtype.names:
+ if zero_unassigned:
+ dst[name] = 0
+ else:
+ assign_fields_by_name(dst[name], src[name],
+ zero_unassigned)
+
+def _require_fields_dispatcher(array, required_dtype):
+ return (array,)
+
+@array_function_dispatch(_require_fields_dispatcher)
+def require_fields(array, required_dtype):
+ """
+ Casts a structured array to a new dtype using assignment by field-name.
+
+ This function assigns from the old to the new array by name, so the
+ value of a field in the output array is the value of the field with the
+ same name in the source array. This has the effect of creating a new
+ ndarray containing only the fields "required" by the required_dtype.
+
+ If a field name in the required_dtype does not exist in the
+ input array, that field is created and set to 0 in the output array.
+
+ Parameters
+ ----------
+ a : ndarray
+ array to cast
+ required_dtype : dtype
+ datatype for output array
+
+ Returns
+ -------
+ out : ndarray
+ array with the new dtype, with field values copied from the fields in
+ the input array with the same name
+
+ Examples
+ --------
+
+ >>> from numpy.lib import recfunctions as rfn
+ >>> a = np.ones(4, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('c', 'u1')])
+ array([(1., 1), (1., 1), (1., 1), (1., 1)],
+ dtype=[('b', '<f4'), ('c', 'u1')])
+ >>> rfn.require_fields(a, [('b', 'f4'), ('newf', 'u1')])
+ array([(1., 0), (1., 0), (1., 0), (1., 0)],
+ dtype=[('b', '<f4'), ('newf', 'u1')])
+
+ """
+ out = np.empty(array.shape, dtype=required_dtype)
+ assign_fields_by_name(out, array)
+ return out
+
+
+def _stack_arrays_dispatcher(arrays, defaults=None, usemask=None,
+ asrecarray=None, autoconvert=None):
+ return arrays
+
+
+@array_function_dispatch(_stack_arrays_dispatcher)
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
autoconvert=False):
"""
@@ -839,15 +1280,16 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
True
>>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)])
>>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)],
- ... dtype=[('A', '|S3'), ('B', float), ('C', float)])
+ ... dtype=[('A', '|S3'), ('B', np.double), ('C', np.double)])
>>> test = rfn.stack_arrays((z,zz))
>>> test
- masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0)
- ('c', 30.0, 300.0)],
- mask = [(False, False, True) (False, False, True) (False, False, False)
- (False, False, False) (False, False, False)],
- fill_value = ('N/A', 1e+20, 1e+20),
- dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')])
+ masked_array(data=[(b'A', 1.0, --), (b'B', 2.0, --), (b'a', 10.0, 100.0),
+ (b'b', 20.0, 200.0), (b'c', 30.0, 300.0)],
+ mask=[(False, False, True), (False, False, True),
+ (False, False, False), (False, False, False),
+ (False, False, False)],
+ fill_value=(b'N/A', 1.e+20, 1.e+20),
+ dtype=[('A', 'S3'), ('B', '<f8'), ('C', '<f8')])
"""
if isinstance(arrays, ndarray):
@@ -860,10 +1302,10 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
fldnames = [d.names for d in ndtype]
#
dtype_l = ndtype[0]
- newdescr = get_fieldspec(dtype_l)
+ newdescr = _get_fieldspec(dtype_l)
names = [n for n, d in newdescr]
for dtype_n in ndtype[1:]:
- for fname, fdtype in get_fieldspec(dtype_n):
+ for fname, fdtype in _get_fieldspec(dtype_n):
if fname not in names:
newdescr.append((fname, fdtype))
names.append(fname)
@@ -897,6 +1339,12 @@ def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False,
usemask=usemask, asrecarray=asrecarray)
+def _find_duplicates_dispatcher(
+ a, key=None, ignoremask=None, return_index=None):
+ return (a,)
+
+
+@array_function_dispatch(_find_duplicates_dispatcher)
def find_duplicates(a, key=None, ignoremask=True, return_index=False):
"""
Find the duplicates in a structured array along a given key
@@ -920,7 +1368,10 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
>>> a = np.ma.array([1, 1, 1, 2, 2, 3, 3],
... mask=[0, 0, 1, 0, 0, 0, 1]).view(ndtype)
>>> rfn.find_duplicates(a, ignoremask=True, return_index=True)
- ... # XXX: judging by the output, the ignoremask flag has no effect
+ (masked_array(data=[(1,), (1,), (2,), (2,)],
+ mask=[(False,), (False,), (False,), (False,)],
+ fill_value=(999999,),
+ dtype=[('a', '<i8')]), array([0, 1, 3, 4]))
"""
a = np.asanyarray(a).ravel()
# Get a dictionary of fields
@@ -951,8 +1402,15 @@ def find_duplicates(a, key=None, ignoremask=True, return_index=False):
return duplicates
+def _join_by_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None, usemask=None, asrecarray=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_join_by_dispatcher)
def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
- defaults=None, usemask=True, asrecarray=False):
+ defaults=None, usemask=True, asrecarray=False):
"""
Join arrays `r1` and `r2` on key `key`.
@@ -1070,15 +1528,15 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
#
# Build the new description of the output array .......
# Start with the key fields
- ndtype = get_fieldspec(r1k.dtype)
+ ndtype = _get_fieldspec(r1k.dtype)
# Add the fields from r1
- for fname, fdtype in get_fieldspec(r1.dtype):
+ for fname, fdtype in _get_fieldspec(r1.dtype):
if fname not in key:
ndtype.append((fname, fdtype))
# Add the fields from r2
- for fname, fdtype in get_fieldspec(r2.dtype):
+ for fname, fdtype in _get_fieldspec(r2.dtype):
# Have we seen the current name already ?
# we need to rebuild this list every time
names = list(name for name, dtype in ndtype)
@@ -1130,6 +1588,13 @@ def join_by(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
return _fix_output(_fix_defaults(output, defaults), **kwargs)
+def _rec_join_dispatcher(
+ key, r1, r2, jointype=None, r1postfix=None, r2postfix=None,
+ defaults=None):
+ return (r1, r2)
+
+
+@array_function_dispatch(_rec_join_dispatcher)
def rec_join(key, r1, r2, jointype='inner', r1postfix='1', r2postfix='2',
defaults=None):
"""
diff --git a/numpy/lib/scimath.py b/numpy/lib/scimath.py
index f1838fee6..5ac790ce9 100644
--- a/numpy/lib/scimath.py
+++ b/numpy/lib/scimath.py
@@ -20,6 +20,7 @@ from __future__ import division, absolute_import, print_function
import numpy.core.numeric as nx
import numpy.core.numerictypes as nt
from numpy.core.numeric import asarray, any
+from numpy.core.overrides import array_function_dispatch
from numpy.lib.type_check import isreal
@@ -58,7 +59,7 @@ def _tocomplex(arr):
>>> a = np.array([1,2,3],np.short)
>>> ac = np.lib.scimath._tocomplex(a); ac
- array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> ac.dtype
dtype('complex64')
@@ -69,7 +70,7 @@ def _tocomplex(arr):
>>> b = np.array([1,2,3],np.double)
>>> bc = np.lib.scimath._tocomplex(b); bc
- array([ 1.+0.j, 2.+0.j, 3.+0.j])
+ array([1.+0.j, 2.+0.j, 3.+0.j])
>>> bc.dtype
dtype('complex128')
@@ -80,13 +81,13 @@ def _tocomplex(arr):
>>> c = np.array([1,2,3],np.csingle)
>>> cc = np.lib.scimath._tocomplex(c); cc
- array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
>>> c *= 2; c
- array([ 2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
+ array([2.+0.j, 4.+0.j, 6.+0.j], dtype=complex64)
>>> cc
- array([ 1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
+ array([1.+0.j, 2.+0.j, 3.+0.j], dtype=complex64)
"""
if issubclass(arr.dtype.type, (nt.single, nt.byte, nt.short, nt.ubyte,
nt.ushort, nt.csingle)):
@@ -94,6 +95,7 @@ def _tocomplex(arr):
else:
return arr.astype(nt.cdouble)
+
def _fix_real_lt_zero(x):
"""Convert `x` to complex if it has real, negative components.
@@ -121,6 +123,7 @@ def _fix_real_lt_zero(x):
x = _tocomplex(x)
return x
+
def _fix_int_lt_zero(x):
"""Convert `x` to double if it has real, negative components.
@@ -147,6 +150,7 @@ def _fix_int_lt_zero(x):
x = x * 1.0
return x
+
def _fix_real_abs_gt_1(x):
"""Convert `x` to complex if it has real components x_i with abs(x_i)>1.
@@ -166,13 +170,19 @@ def _fix_real_abs_gt_1(x):
array([0, 1])
>>> np.lib.scimath._fix_real_abs_gt_1([0,2])
- array([ 0.+0.j, 2.+0.j])
+ array([0.+0.j, 2.+0.j])
"""
x = asarray(x)
if any(isreal(x) & (abs(x) > 1)):
x = _tocomplex(x)
return x
+
+def _unary_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_unary_dispatcher)
def sqrt(x):
"""
Compute the square root of x.
@@ -202,19 +212,21 @@ def sqrt(x):
>>> np.lib.scimath.sqrt(1)
1.0
>>> np.lib.scimath.sqrt([1, 4])
- array([ 1., 2.])
+ array([1., 2.])
But it automatically handles negative inputs:
>>> np.lib.scimath.sqrt(-1)
- (0.0+1.0j)
+ 1j
>>> np.lib.scimath.sqrt([-1,4])
- array([ 0.+1.j, 2.+0.j])
+ array([0.+1.j, 2.+0.j])
"""
x = _fix_real_lt_zero(x)
return nx.sqrt(x)
+
+@array_function_dispatch(_unary_dispatcher)
def log(x):
"""
Compute the natural logarithm of `x`.
@@ -261,6 +273,8 @@ def log(x):
x = _fix_real_lt_zero(x)
return nx.log(x)
+
+@array_function_dispatch(_unary_dispatcher)
def log10(x):
"""
Compute the logarithm base 10 of `x`.
@@ -303,12 +317,18 @@ def log10(x):
1.0
>>> np.emath.log10([-10**1, -10**2, 10**2])
- array([ 1.+1.3644j, 2.+1.3644j, 2.+0.j ])
+ array([1.+1.3644j, 2.+1.3644j, 2.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log10(x)
+
+def _logn_dispatcher(n, x):
+ return (n, x,)
+
+
+@array_function_dispatch(_logn_dispatcher)
def logn(n, x):
"""
Take log base n of x.
@@ -318,8 +338,8 @@ def logn(n, x):
Parameters
----------
- n : int
- The base in which the log is taken.
+ n : array_like
+ The integer base(s) in which the log is taken.
x : array_like
The value(s) whose log base `n` is (are) required.
@@ -334,15 +354,17 @@ def logn(n, x):
>>> np.set_printoptions(precision=4)
>>> np.lib.scimath.logn(2, [4, 8])
- array([ 2., 3.])
+ array([2., 3.])
>>> np.lib.scimath.logn(2, [-4, -8, 8])
- array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
n = _fix_real_lt_zero(n)
return nx.log(x)/nx.log(n)
+
+@array_function_dispatch(_unary_dispatcher)
def log2(x):
"""
Compute the logarithm base 2 of `x`.
@@ -383,12 +405,18 @@ def log2(x):
>>> np.emath.log2(8)
3.0
>>> np.emath.log2([-4, -8, 8])
- array([ 2.+4.5324j, 3.+4.5324j, 3.+0.j ])
+ array([2.+4.5324j, 3.+4.5324j, 3.+0.j ])
"""
x = _fix_real_lt_zero(x)
return nx.log2(x)
+
+def _power_dispatcher(x, p):
+ return (x, p)
+
+
+@array_function_dispatch(_power_dispatcher)
def power(x, p):
"""
Return x to the power p, (x**p).
@@ -423,15 +451,17 @@ def power(x, p):
>>> np.lib.scimath.power([2, 4], 2)
array([ 4, 16])
>>> np.lib.scimath.power([2, 4], -2)
- array([ 0.25 , 0.0625])
+ array([0.25 , 0.0625])
>>> np.lib.scimath.power([-2, 4], 2)
- array([ 4.+0.j, 16.+0.j])
+ array([ 4.-0.j, 16.+0.j])
"""
x = _fix_real_lt_zero(x)
p = _fix_int_lt_zero(p)
return nx.power(x, p)
+
+@array_function_dispatch(_unary_dispatcher)
def arccos(x):
"""
Compute the inverse cosine of x.
@@ -469,12 +499,14 @@ def arccos(x):
0.0
>>> np.emath.arccos([1,2])
- array([ 0.-0.j , 0.+1.317j])
+ array([0.-0.j , 0.-1.317j])
"""
x = _fix_real_abs_gt_1(x)
return nx.arccos(x)
+
+@array_function_dispatch(_unary_dispatcher)
def arcsin(x):
"""
Compute the inverse sine of x.
@@ -513,12 +545,14 @@ def arcsin(x):
0.0
>>> np.emath.arcsin([0,1])
- array([ 0. , 1.5708])
+ array([0. , 1.5708])
"""
x = _fix_real_abs_gt_1(x)
return nx.arcsin(x)
+
+@array_function_dispatch(_unary_dispatcher)
def arctanh(x):
"""
Compute the inverse hyperbolic tangent of `x`.
@@ -555,11 +589,14 @@ def arctanh(x):
--------
>>> np.set_printoptions(precision=4)
- >>> np.emath.arctanh(np.eye(2))
- array([[ Inf, 0.],
- [ 0., Inf]])
+ >>> from numpy.testing import suppress_warnings
+ >>> with suppress_warnings() as sup:
+ ... sup.filter(RuntimeWarning)
+ ... np.emath.arctanh(np.eye(2))
+ array([[inf, 0.],
+ [ 0., inf]])
>>> np.emath.arctanh([1j])
- array([ 0.+0.7854j])
+ array([0.+0.7854j])
"""
x = _fix_real_abs_gt_1(x)
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 66f534734..92d52109e 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,14 +1,17 @@
from __future__ import division, absolute_import, print_function
+import functools
import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
asarray, zeros, outer, concatenate, array, asanyarray
)
-from numpy.core.fromnumeric import product, reshape, transpose
+from numpy.core.fromnumeric import reshape, transpose
from numpy.core.multiarray import normalize_axis_index
+from numpy.core import overrides
from numpy.core import vstack, atleast_3d
+from numpy.core.shape_base import _arrays_for_stack_dispatcher
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
@@ -21,6 +24,10 @@ __all__ = [
]
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
def _make_along_axis_idx(arr_shape, indices, axis):
# compute dimensions to iterate over
if not _nx.issubdtype(indices.dtype, _nx.integer):
@@ -44,6 +51,11 @@ def _make_along_axis_idx(arr_shape, indices, axis):
return tuple(fancy_index)
+def _take_along_axis_dispatcher(arr, indices, axis):
+ return (arr, indices)
+
+
+@array_function_dispatch(_take_along_axis_dispatcher)
def take_along_axis(arr, indices, axis):
"""
Take values from the input array by matching 1d index and data slices.
@@ -82,7 +94,7 @@ def take_along_axis(arr, indices, axis):
Ni, M, Nk = a.shape[:axis], a.shape[axis], a.shape[axis+1:]
J = indices.shape[axis] # Need not equal M
- out = np.empty(Nk + (J,) + Nk)
+ out = np.empty(Ni + (J,) + Nk)
for ii in ndindex(Ni):
for kk in ndindex(Nk):
@@ -116,7 +128,7 @@ def take_along_axis(arr, indices, axis):
[40, 50, 60]])
>>> ai = np.argsort(a, axis=1); ai
array([[0, 2, 1],
- [1, 2, 0]], dtype=int64)
+ [1, 2, 0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[10, 20, 30],
[40, 50, 60]])
@@ -129,7 +141,7 @@ def take_along_axis(arr, indices, axis):
>>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
>>> ai
array([[1],
- [0], dtype=int64)
+ [0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[30],
[60]])
@@ -139,10 +151,10 @@ def take_along_axis(arr, indices, axis):
>>> ai_min = np.expand_dims(np.argmin(a, axis=1), axis=1)
>>> ai_max = np.expand_dims(np.argmax(a, axis=1), axis=1)
- >>> ai = np.concatenate([ai_min, ai_max], axis=axis)
- >> ai
+ >>> ai = np.concatenate([ai_min, ai_max], axis=1)
+ >>> ai
array([[0, 1],
- [1, 0]], dtype=int64)
+ [1, 0]])
>>> np.take_along_axis(a, ai, axis=1)
array([[10, 30],
[40, 60]])
@@ -160,6 +172,11 @@ def take_along_axis(arr, indices, axis):
return arr[_make_along_axis_idx(arr_shape, indices, axis)]
+def _put_along_axis_dispatcher(arr, indices, values, axis):
+ return (arr, indices, values)
+
+
+@array_function_dispatch(_put_along_axis_dispatcher)
def put_along_axis(arr, indices, values, axis):
"""
Put values into the destination array by matching 1d index and data slices.
@@ -225,7 +242,7 @@ def put_along_axis(arr, indices, values, axis):
>>> ai = np.expand_dims(np.argmax(a, axis=1), axis=1)
>>> ai
array([[1],
- [0]], dtype=int64)
+ [0]])
>>> np.put_along_axis(a, ai, 99, axis=1)
>>> a
array([[10, 99, 20],
@@ -245,6 +262,11 @@ def put_along_axis(arr, indices, values, axis):
arr[_make_along_axis_idx(arr_shape, indices, axis)] = values
+def _apply_along_axis_dispatcher(func1d, axis, arr, *args, **kwargs):
+ return (arr,)
+
+
+@array_function_dispatch(_apply_along_axis_dispatcher)
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
Apply a function to 1-D slices along the given axis.
@@ -307,9 +329,9 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
- array([ 4., 5., 6.])
+ array([4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
- array([ 2., 5., 8.])
+ array([2., 5., 8.])
For a function that returns a 1D array, the number of dimensions in
`outarr` is the same as `arr`.
@@ -392,6 +414,11 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
return res.__array_wrap__(out_arr)
+def _apply_over_axes_dispatcher(func, a, axes):
+ return (a,)
+
+
+@array_function_dispatch(_apply_over_axes_dispatcher)
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
@@ -474,9 +501,15 @@ def apply_over_axes(func, a, axes):
val = res
else:
raise ValueError("function is not returning "
- "an array of the correct shape")
+ "an array of the correct shape")
return val
+
+def _expand_dims_dispatcher(a, axis):
+ return (a,)
+
+
+@array_function_dispatch(_expand_dims_dispatcher)
def expand_dims(a, axis):
"""
Expand the shape of an array.
@@ -499,8 +532,7 @@ def expand_dims(a, axis):
Returns
-------
res : ndarray
- Output array. The number of dimensions is one greater than that of
- the input array.
+ View of `a` with the number of dimensions increased by one.
See Also
--------
@@ -546,7 +578,7 @@ def expand_dims(a, axis):
# 2017-05-17, 1.13.0
warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
"deprecated and will raise an AxisError in the future.",
- DeprecationWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=3)
# When the deprecation period expires, delete this if block,
if axis < 0:
axis = axis + a.ndim + 1
@@ -554,8 +586,15 @@ def expand_dims(a, axis):
# axis = normalize_axis_index(axis, a.ndim + 1)
return a.reshape(shape[:axis] + (1,) + shape[axis:])
+
row_stack = vstack
+
+def _column_stack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_column_stack_dispatcher)
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array.
@@ -589,6 +628,10 @@ def column_stack(tup):
[3, 4]])
"""
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+
arrays = []
for v in tup:
arr = array(v, copy=False, subok=True)
@@ -597,6 +640,12 @@ def column_stack(tup):
arrays.append(arr)
return _nx.concatenate(arrays, 1)
+
+def _dstack_dispatcher(tup):
+ return _arrays_for_stack_dispatcher(tup)
+
+
+@array_function_dispatch(_dstack_dispatcher)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis).
@@ -647,7 +696,15 @@ def dstack(tup):
[[3, 4]]])
"""
- return _nx.concatenate([atleast_3d(_m) for _m in tup], 2)
+ if not overrides.ARRAY_FUNCTION_ENABLED:
+ # raise warning if necessary
+ _arrays_for_stack_dispatcher(tup, stacklevel=2)
+
+ arrs = atleast_3d(*tup)
+ if not isinstance(arrs, list):
+ arrs = [arrs]
+ return _nx.concatenate(arrs, 2)
+
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
@@ -657,6 +714,12 @@ def _replace_zero_by_x_arrays(sub_arys):
sub_arys[i] = _nx.empty(0, dtype=sub_arys[i].dtype)
return sub_arys
+
+def _array_split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_array_split_dispatcher)
def array_split(ary, indices_or_sections, axis=0):
"""
Split an array into multiple sub-arrays.
@@ -676,11 +739,11 @@ def array_split(ary, indices_or_sections, axis=0):
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
- [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7.])]
>>> x = np.arange(7.0)
>>> np.array_split(x, 3)
- [array([ 0., 1., 2.]), array([ 3., 4.]), array([ 5., 6.])]
+ [array([0., 1., 2.]), array([3., 4.]), array([5., 6.])]
"""
try:
@@ -712,9 +775,14 @@ def array_split(ary, indices_or_sections, axis=0):
return sub_arys
-def split(ary,indices_or_sections,axis=0):
+def _split_dispatcher(ary, indices_or_sections, axis=None):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_split_dispatcher)
+def split(ary, indices_or_sections, axis=0):
"""
- Split an array into multiple sub-arrays.
+ Split an array into multiple sub-arrays as views into `ary`.
Parameters
----------
@@ -741,7 +809,7 @@ def split(ary,indices_or_sections,axis=0):
Returns
-------
sub-arrays : list of ndarrays
- A list of sub-arrays.
+ A list of sub-arrays as views into `ary`.
Raises
------
@@ -767,14 +835,14 @@ def split(ary,indices_or_sections,axis=0):
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
- [array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
+ [array([0., 1., 2.]), array([3., 4., 5.]), array([6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
- [array([ 0., 1., 2.]),
- array([ 3., 4.]),
- array([ 5.]),
- array([ 6., 7.]),
+ [array([0., 1., 2.]),
+ array([3., 4.]),
+ array([5.]),
+ array([6., 7.]),
array([], dtype=float64)]
"""
@@ -786,9 +854,14 @@ def split(ary,indices_or_sections,axis=0):
if N % sections:
raise ValueError(
'array split does not result in an equal division')
- res = array_split(ary, indices_or_sections, axis)
- return res
+ return array_split(ary, indices_or_sections, axis)
+
+def _hvdsplit_dispatcher(ary, indices_or_sections):
+ return (ary, indices_or_sections)
+
+
+@array_function_dispatch(_hvdsplit_dispatcher)
def hsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays horizontally (column-wise).
@@ -805,43 +878,43 @@ def hsplit(ary, indices_or_sections):
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
- array([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [ 12., 13., 14., 15.]])
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
>>> np.hsplit(x, 2)
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
- [ 12., 13.]]),
+ [12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
- [ 10., 11.],
- [ 14., 15.]])]
+ [10., 11.],
+ [14., 15.]])]
>>> np.hsplit(x, np.array([3, 6]))
- [array([[ 0., 1., 2.],
- [ 4., 5., 6.],
- [ 8., 9., 10.],
- [ 12., 13., 14.]]),
- array([[ 3.],
- [ 7.],
- [ 11.],
- [ 15.]]),
- array([], dtype=float64)]
+ [array([[ 0., 1., 2.],
+ [ 4., 5., 6.],
+ [ 8., 9., 10.],
+ [12., 13., 14.]]),
+ array([[ 3.],
+ [ 7.],
+ [11.],
+ [15.]]),
+ array([], shape=(4, 0), dtype=float64)]
With a higher dimensional array the split is still along the second axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
- array([[[ 0., 1.],
- [ 2., 3.]],
- [[ 4., 5.],
- [ 6., 7.]]])
+ array([[[0., 1.],
+ [2., 3.]],
+ [[4., 5.],
+ [6., 7.]]])
>>> np.hsplit(x, 2)
- [array([[[ 0., 1.]],
- [[ 4., 5.]]]),
- array([[[ 2., 3.]],
- [[ 6., 7.]]])]
+ [array([[[0., 1.]],
+ [[4., 5.]]]),
+ array([[[2., 3.]],
+ [[6., 7.]]])]
"""
if _nx.ndim(ary) == 0:
@@ -851,6 +924,8 @@ def hsplit(ary, indices_or_sections):
else:
return split(ary, indices_or_sections, 0)
+
+@array_function_dispatch(_hvdsplit_dispatcher)
def vsplit(ary, indices_or_sections):
"""
Split an array into multiple sub-arrays vertically (row-wise).
@@ -867,41 +942,39 @@ def vsplit(ary, indices_or_sections):
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> x
- array([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.],
- [ 12., 13., 14., 15.]])
+ array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])
>>> np.vsplit(x, 2)
- [array([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.]]),
- array([[ 8., 9., 10., 11.],
- [ 12., 13., 14., 15.]])]
+ [array([[0., 1., 2., 3.],
+ [4., 5., 6., 7.]]), array([[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]])]
>>> np.vsplit(x, np.array([3, 6]))
- [array([[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.],
- [ 8., 9., 10., 11.]]),
- array([[ 12., 13., 14., 15.]]),
- array([], dtype=float64)]
+ [array([[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.],
+ [ 8., 9., 10., 11.]]), array([[12., 13., 14., 15.]]), array([], shape=(0, 4), dtype=float64)]
With a higher dimensional array the split is still along the first axis.
>>> x = np.arange(8.0).reshape(2, 2, 2)
>>> x
- array([[[ 0., 1.],
- [ 2., 3.]],
- [[ 4., 5.],
- [ 6., 7.]]])
+ array([[[0., 1.],
+ [2., 3.]],
+ [[4., 5.],
+ [6., 7.]]])
>>> np.vsplit(x, 2)
- [array([[[ 0., 1.],
- [ 2., 3.]]]),
- array([[[ 4., 5.],
- [ 6., 7.]]])]
+ [array([[[0., 1.],
+ [2., 3.]]]), array([[[4., 5.],
+ [6., 7.]]])]
"""
if _nx.ndim(ary) < 2:
raise ValueError('vsplit only works on arrays of 2 or more dimensions')
return split(ary, indices_or_sections, 0)
+
+@array_function_dispatch(_hvdsplit_dispatcher)
def dsplit(ary, indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
@@ -918,30 +991,28 @@ def dsplit(ary, indices_or_sections):
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> x
- array([[[ 0., 1., 2., 3.],
- [ 4., 5., 6., 7.]],
- [[ 8., 9., 10., 11.],
- [ 12., 13., 14., 15.]]])
+ array([[[ 0., 1., 2., 3.],
+ [ 4., 5., 6., 7.]],
+ [[ 8., 9., 10., 11.],
+ [12., 13., 14., 15.]]])
>>> np.dsplit(x, 2)
- [array([[[ 0., 1.],
- [ 4., 5.]],
- [[ 8., 9.],
- [ 12., 13.]]]),
- array([[[ 2., 3.],
- [ 6., 7.]],
- [[ 10., 11.],
- [ 14., 15.]]])]
+ [array([[[ 0., 1.],
+ [ 4., 5.]],
+ [[ 8., 9.],
+ [12., 13.]]]), array([[[ 2., 3.],
+ [ 6., 7.]],
+ [[10., 11.],
+ [14., 15.]]])]
>>> np.dsplit(x, np.array([3, 6]))
- [array([[[ 0., 1., 2.],
- [ 4., 5., 6.]],
- [[ 8., 9., 10.],
- [ 12., 13., 14.]]]),
- array([[[ 3.],
- [ 7.]],
- [[ 11.],
- [ 15.]]]),
- array([], dtype=float64)]
-
+ [array([[[ 0., 1., 2.],
+ [ 4., 5., 6.]],
+ [[ 8., 9., 10.],
+ [12., 13., 14.]]]),
+ array([[[ 3.],
+ [ 7.]],
+ [[11.],
+ [15.]]]),
+ array([], shape=(2, 2, 0), dtype=float64)]
"""
if _nx.ndim(ary) < 3:
raise ValueError('dsplit only works on arrays of 3 or more dimensions')
@@ -971,6 +1042,12 @@ def get_array_wrap(*args):
return wrappers[-1][-1]
return None
+
+def _kron_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_kron_dispatcher)
def kron(a, b):
"""
Kronecker product of two arrays.
@@ -1015,15 +1092,15 @@ def kron(a, b):
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
- array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
+ array([ 5, 6, 7, ..., 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
- array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
+ array([ 5, 50, 500, ..., 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
- array([[ 1., 1., 0., 0.],
- [ 1., 1., 0., 0.],
- [ 0., 0., 1., 1.],
- [ 0., 0., 1., 1.]])
+ array([[1., 1., 0., 0.],
+ [1., 1., 0., 0.],
+ [0., 0., 1., 1.],
+ [0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
@@ -1070,6 +1147,11 @@ def kron(a, b):
return result
+def _tile_dispatcher(A, reps):
+ return (A, reps)
+
+
+@array_function_dispatch(_tile_dispatcher)
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
diff --git a/numpy/lib/stride_tricks.py b/numpy/lib/stride_tricks.py
index ca13738c1..8aafd094b 100644
--- a/numpy/lib/stride_tricks.py
+++ b/numpy/lib/stride_tricks.py
@@ -8,6 +8,7 @@ NumPy reference guide.
from __future__ import division, absolute_import, print_function
import numpy as np
+from numpy.core.overrides import array_function_dispatch
__all__ = ['broadcast_to', 'broadcast_arrays']
@@ -120,21 +121,26 @@ def _broadcast_to(array, shape, subok, readonly):
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
- needs_writeable = not readonly and array.flags.writeable
- extras = ['reduce_ok'] if needs_writeable else []
- op_flag = 'readwrite' if needs_writeable else 'readonly'
+ extras = []
it = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'] + extras,
- op_flags=[op_flag], itershape=shape, order='C')
+ op_flags=['readonly'], itershape=shape, order='C')
with it:
# never really has writebackifcopy semantics
broadcast = it.itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
- if needs_writeable and not result.flags.writeable:
+ # In a future version this will go away
+ if not readonly and array.flags._writeable_no_warn:
result.flags.writeable = True
+ result.flags._warn_on_write = True
return result
+def _broadcast_to_dispatcher(array, shape, subok=None):
+ return (array,)
+
+
+@array_function_dispatch(_broadcast_to_dispatcher, module='numpy')
def broadcast_to(array, shape, subok=False):
"""Broadcast an array to a new shape.
@@ -180,8 +186,6 @@ def _broadcast_shape(*args):
"""Returns the shape of the arrays that would result from broadcasting the
supplied arrays against each other.
"""
- if not args:
- return ()
# use the old-iterator because np.nditer does not handle size 0 arrays
# consistently
b = np.broadcast(*args[:32])
@@ -195,6 +199,11 @@ def _broadcast_shape(*args):
return b.shape
+def _broadcast_arrays_dispatcher(*args, **kwargs):
+ return args
+
+
+@array_function_dispatch(_broadcast_arrays_dispatcher, module='numpy')
def broadcast_arrays(*args, **kwargs):
"""
Broadcast any number of arrays against each other.
@@ -213,8 +222,15 @@ def broadcast_arrays(*args, **kwargs):
broadcasted : list of arrays
These arrays are views on the original arrays. They are typically
not contiguous. Furthermore, more than one element of a
- broadcasted array may refer to a single memory location. If you
- need to write to the arrays, make copies first.
+ broadcasted array may refer to a single memory location. If you need
+ to write to the arrays, make copies first. While you can set the
+ ``writable`` flag True, writing to a single output value may end up
+ changing more than one location in the output array.
+
+ .. deprecated:: 1.17
+ The output is currently marked so that if written to, a deprecation
+ warning will be emitted. A future version will set the
+ ``writable`` flag False so writing to it will raise an error.
Examples
--------
@@ -251,7 +267,5 @@ def broadcast_arrays(*args, **kwargs):
# Common case where nothing needs to be broadcasted.
return args
- # TODO: consider making the results of broadcast_arrays readonly to match
- # broadcast_to. This will require a deprecation cycle.
return [_broadcast_to(array, shape, subok=subok, readonly=False)
for array in args]
diff --git a/numpy/lib/tests/test__datasource.py b/numpy/lib/tests/test__datasource.py
index 1df8bebf6..8eac16b58 100644
--- a/numpy/lib/tests/test__datasource.py
+++ b/numpy/lib/tests/test__datasource.py
@@ -361,3 +361,18 @@ class TestOpenFunc(object):
fp = datasource.open(local_file)
assert_(fp)
fp.close()
+
+def test_del_attr_handling():
+ # DataSource __del__ can be called
+ # even if __init__ fails when the
+ # Exception object is caught by the
+ # caller as happens in refguide_check
+ # is_deprecated() function
+
+ ds = datasource.DataSource()
+ # simulate failed __init__ by removing key attribute
+ # produced within __init__ and expected by __del__
+ del ds._istmpdest
+ # should not raise an AttributeError if __del__
+ # gracefully handles failed __init__:
+ ds.__del__()
diff --git a/numpy/lib/tests/test__iotools.py b/numpy/lib/tests/test__iotools.py
index b4888f1bd..15cd3ad9d 100644
--- a/numpy/lib/tests/test__iotools.py
+++ b/numpy/lib/tests/test__iotools.py
@@ -1,6 +1,5 @@
from __future__ import division, absolute_import, print_function
-import sys
import time
from datetime import date
@@ -205,14 +204,18 @@ class TestStringConverter(object):
def test_upgrademapper(self):
"Tests updatemapper"
dateparser = _bytes_to_date
- StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
- convert = StringConverter(dateparser, date(2000, 1, 1))
- test = convert('2001-01-01')
- assert_equal(test, date(2001, 1, 1))
- test = convert('2009-01-01')
- assert_equal(test, date(2009, 1, 1))
- test = convert('')
- assert_equal(test, date(2000, 1, 1))
+ _original_mapper = StringConverter._mapper[:]
+ try:
+ StringConverter.upgrade_mapper(dateparser, date(2000, 1, 1))
+ convert = StringConverter(dateparser, date(2000, 1, 1))
+ test = convert('2001-01-01')
+ assert_equal(test, date(2001, 1, 1))
+ test = convert('2009-01-01')
+ assert_equal(test, date(2009, 1, 1))
+ test = convert('')
+ assert_equal(test, date(2000, 1, 1))
+ finally:
+ StringConverter._mapper = _original_mapper
def test_string_to_object(self):
"Make sure that string-to-object functions are properly recognized"
@@ -246,7 +249,7 @@ class TestStringConverter(object):
converter = StringConverter(int, default=0,
missing_values="N/A")
assert_equal(
- converter.missing_values, set(['', 'N/A']))
+ converter.missing_values, {'', 'N/A'})
def test_int64_dtype(self):
"Check that int64 integer types can be specified"
diff --git a/numpy/lib/tests/test_arraypad.py b/numpy/lib/tests/test_arraypad.py
index e62fccaa0..65593dd29 100644
--- a/numpy/lib/tests/test_arraypad.py
+++ b/numpy/lib/tests/test_arraypad.py
@@ -6,58 +6,140 @@ from __future__ import division, absolute_import, print_function
import pytest
import numpy as np
-from numpy.testing import (assert_array_equal, assert_raises, assert_allclose,
- assert_equal)
-from numpy.lib import pad
+from numpy.testing import assert_array_equal, assert_allclose, assert_equal
+from numpy.lib.arraypad import _as_pairs
+
+
+_numeric_dtypes = (
+ np.sctypes["uint"]
+ + np.sctypes["int"]
+ + np.sctypes["float"]
+ + np.sctypes["complex"]
+)
+_all_modes = {
+ 'constant': {'constant_values': 0},
+ 'edge': {},
+ 'linear_ramp': {'end_values': 0},
+ 'maximum': {'stat_length': None},
+ 'mean': {'stat_length': None},
+ 'median': {'stat_length': None},
+ 'minimum': {'stat_length': None},
+ 'reflect': {'reflect_type': 'even'},
+ 'symmetric': {'reflect_type': 'even'},
+ 'wrap': {},
+ 'empty': {}
+}
+
+
+class TestAsPairs(object):
+ def test_single_value(self):
+ """Test casting for a single value."""
+ expected = np.array([[3, 3]] * 10)
+ for x in (3, [3], [[3]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # Test with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(obj, 10),
+ np.array([[obj, obj]] * 10)
+ )
+
+ def test_two_values(self):
+ """Test proper casting for two different values."""
+ # Broadcasting in the first dimension with numbers
+ expected = np.array([[3, 4]] * 10)
+ for x in ([3, 4], [[3, 4]]):
+ result = _as_pairs(x, 10)
+ assert_equal(result, expected)
+ # and with dtype=object
+ obj = object()
+ assert_equal(
+ _as_pairs(["a", obj], 10),
+ np.array([["a", obj]] * 10)
+ )
+
+ # Broadcasting in the second / last dimension with numbers
+ assert_equal(
+ _as_pairs([[3], [4]], 2),
+ np.array([[3, 3], [4, 4]])
+ )
+ # and with dtype=object
+ assert_equal(
+ _as_pairs([["a"], [obj]], 2),
+ np.array([["a", "a"], [obj, obj]])
+ )
+
+ def test_with_none(self):
+ expected = ((None, None), (None, None), (None, None))
+ assert_equal(
+ _as_pairs(None, 3, as_index=False),
+ expected
+ )
+ assert_equal(
+ _as_pairs(None, 3, as_index=True),
+ expected
+ )
+
+ def test_pass_through(self):
+ """Test if `x` already matching desired output are passed through."""
+ expected = np.arange(12).reshape((6, 2))
+ assert_equal(
+ _as_pairs(expected, 6),
+ expected
+ )
+
+ def test_as_index(self):
+ """Test results if `as_index=True`."""
+ assert_equal(
+ _as_pairs([2.6, 3.3], 10, as_index=True),
+ np.array([[3, 3]] * 10, dtype=np.intp)
+ )
+ assert_equal(
+ _as_pairs([2.6, 4.49], 10, as_index=True),
+ np.array([[3, 4]] * 10, dtype=np.intp)
+ )
+ for x in (-3, [-3], [[-3]], [-3, 4], [3, -4], [[-3, 4]], [[4, -3]],
+ [[1, 2]] * 9 + [[1, -2]]):
+ with pytest.raises(ValueError, match="negative values"):
+ _as_pairs(x, 10, as_index=True)
+
+ def test_exceptions(self):
+ """Ensure faulty usage is discovered."""
+ with pytest.raises(ValueError, match="more dimensions than allowed"):
+ _as_pairs([[[3]]], 10)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs([[1, 2], [3, 4]], 3)
+ with pytest.raises(ValueError, match="could not be broadcast"):
+ _as_pairs(np.ones((2, 3)), 3)
class TestConditionalShortcuts(object):
- def test_zero_padding_shortcuts(self):
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_zero_padding_shortcuts(self, mode):
test = np.arange(120).reshape(4, 5, 6)
- pad_amt = [(0, 0) for axis in test.shape]
- modes = ['constant',
- 'edge',
- 'linear_ramp',
- 'maximum',
- 'mean',
- 'median',
- 'minimum',
- 'reflect',
- 'symmetric',
- 'wrap',
- ]
- for mode in modes:
- assert_array_equal(test, pad(test, pad_amt, mode=mode))
-
- def test_shallow_statistic_range(self):
+ pad_amt = [(0, 0) for _ in test.shape]
+ assert_array_equal(test, np.pad(test, pad_amt, mode=mode))
+
+ @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
+ def test_shallow_statistic_range(self, mode):
test = np.arange(120).reshape(4, 5, 6)
- pad_amt = [(1, 1) for axis in test.shape]
- modes = ['maximum',
- 'mean',
- 'median',
- 'minimum',
- ]
- for mode in modes:
- assert_array_equal(pad(test, pad_amt, mode='edge'),
- pad(test, pad_amt, mode=mode, stat_length=1))
-
- def test_clip_statistic_range(self):
+ pad_amt = [(1, 1) for _ in test.shape]
+ assert_array_equal(np.pad(test, pad_amt, mode='edge'),
+ np.pad(test, pad_amt, mode=mode, stat_length=1))
+
+ @pytest.mark.parametrize("mode", ['maximum', 'mean', 'median', 'minimum',])
+ def test_clip_statistic_range(self, mode):
test = np.arange(30).reshape(5, 6)
- pad_amt = [(3, 3) for axis in test.shape]
- modes = ['maximum',
- 'mean',
- 'median',
- 'minimum',
- ]
- for mode in modes:
- assert_array_equal(pad(test, pad_amt, mode=mode),
- pad(test, pad_amt, mode=mode, stat_length=30))
+ pad_amt = [(3, 3) for _ in test.shape]
+ assert_array_equal(np.pad(test, pad_amt, mode=mode),
+ np.pad(test, pad_amt, mode=mode, stat_length=30))
class TestStatistic(object):
def test_check_mean_stat_length(self):
a = np.arange(100).astype('f')
- a = pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
+ a = np.pad(a, ((25, 20), ), 'mean', stat_length=((2, 3), ))
b = np.array(
[0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5, 0.5,
@@ -81,7 +163,7 @@ class TestStatistic(object):
def test_check_maximum_1(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'maximum')
+ a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
99, 99, 99, 99, 99, 99, 99, 99, 99, 99,
@@ -105,7 +187,7 @@ class TestStatistic(object):
def test_check_maximum_2(self):
a = np.arange(100) + 1
- a = pad(a, (25, 20), 'maximum')
+ a = np.pad(a, (25, 20), 'maximum')
b = np.array(
[100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
@@ -129,7 +211,7 @@ class TestStatistic(object):
def test_check_maximum_stat_length(self):
a = np.arange(100) + 1
- a = pad(a, (25, 20), 'maximum', stat_length=10)
+ a = np.pad(a, (25, 20), 'maximum', stat_length=10)
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
@@ -153,7 +235,7 @@ class TestStatistic(object):
def test_check_minimum_1(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'minimum')
+ a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -177,7 +259,7 @@ class TestStatistic(object):
def test_check_minimum_2(self):
a = np.arange(100) + 2
- a = pad(a, (25, 20), 'minimum')
+ a = np.pad(a, (25, 20), 'minimum')
b = np.array(
[2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
@@ -201,7 +283,7 @@ class TestStatistic(object):
def test_check_minimum_stat_length(self):
a = np.arange(100) + 1
- a = pad(a, (25, 20), 'minimum', stat_length=10)
+ a = np.pad(a, (25, 20), 'minimum', stat_length=10)
b = np.array(
[ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
@@ -225,7 +307,7 @@ class TestStatistic(object):
def test_check_median(self):
a = np.arange(100).astype('f')
- a = pad(a, (25, 20), 'median')
+ a = np.pad(a, (25, 20), 'median')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
@@ -249,7 +331,7 @@ class TestStatistic(object):
def test_check_median_01(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
- a = pad(a, 1, 'median')
+ a = np.pad(a, 1, 'median')
b = np.array(
[[4, 4, 5, 4, 4],
@@ -263,7 +345,7 @@ class TestStatistic(object):
def test_check_median_02(self):
a = np.array([[3, 1, 4], [4, 5, 9], [9, 8, 2]])
- a = pad(a.T, 1, 'median').T
+ a = np.pad(a.T, 1, 'median').T
b = np.array(
[[5, 4, 5, 4, 5],
@@ -279,7 +361,7 @@ class TestStatistic(object):
a = np.arange(100).astype('f')
a[1] = 2.
a[97] = 96.
- a = pad(a, (25, 20), 'median', stat_length=(3, 5))
+ a = np.pad(a, (25, 20), 'median', stat_length=(3, 5))
b = np.array(
[ 2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
2., 2., 2., 2., 2., 2., 2., 2., 2., 2.,
@@ -303,7 +385,7 @@ class TestStatistic(object):
def test_check_mean_shape_one(self):
a = [[4, 5, 6]]
- a = pad(a, (5, 7), 'mean', stat_length=2)
+ a = np.pad(a, (5, 7), 'mean', stat_length=2)
b = np.array(
[[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
[4, 4, 4, 4, 4, 4, 5, 6, 6, 6, 6, 6, 6, 6, 6],
@@ -325,7 +407,7 @@ class TestStatistic(object):
def test_check_mean_2(self):
a = np.arange(100).astype('f')
- a = pad(a, (25, 20), 'mean')
+ a = np.pad(a, (25, 20), 'mean')
b = np.array(
[49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5, 49.5,
@@ -348,7 +430,7 @@ class TestStatistic(object):
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", [
- pytest.param("mean", marks=pytest.mark.xfail(reason="gh-11216")),
+ "mean",
"median",
"minimum",
"maximum"
@@ -361,11 +443,65 @@ class TestStatistic(object):
a = np.pad(a, (1, 1), mode)
assert_equal(a[0], a[-1])
+ @pytest.mark.parametrize("mode", ["mean", "median", "minimum", "maximum"])
+ @pytest.mark.parametrize(
+ "stat_length", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))]
+ )
+ def test_check_negative_stat_length(self, mode, stat_length):
+ arr = np.arange(30).reshape((6, 5))
+ match = "index can't contain negative values"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, 2, mode, stat_length=stat_length)
+
+ def test_simple_stat_length(self):
+ a = np.arange(30)
+ a = np.reshape(a, (6, 5))
+ a = np.pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
+ b = np.array(
+ [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+
+ [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
+ [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
+ [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
+ [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
+
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
+ [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
+ )
+ assert_array_equal(a, b)
+
+ @pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning")
+ @pytest.mark.filterwarnings(
+ "ignore:invalid value encountered in (true_divide|double_scalars):"
+ "RuntimeWarning"
+ )
+ @pytest.mark.parametrize("mode", ["mean", "median"])
+ def test_zero_stat_length_valid(self, mode):
+ arr = np.pad([1., 2.], (1, 2), mode, stat_length=0)
+ expected = np.array([np.nan, 1., 2., np.nan, np.nan])
+ assert_equal(arr, expected)
+
+ @pytest.mark.parametrize("mode", ["minimum", "maximum"])
+ def test_zero_stat_length_invalid(self, mode):
+ match = "stat_length of 0 yields no value for padding"
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 0, mode, stat_length=(1, 0))
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=0)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1., 2.], 1, mode, stat_length=(1, 0))
+
class TestConstant(object):
def test_check_constant(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'constant', constant_values=(10, 20))
+ a = np.pad(a, (25, 20), 'constant', constant_values=(10, 20))
b = np.array(
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
@@ -389,7 +525,7 @@ class TestConstant(object):
def test_check_constant_zeros(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'constant')
+ a = np.pad(a, (25, 20), 'constant')
b = np.array(
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
@@ -415,7 +551,7 @@ class TestConstant(object):
# If input array is int, but constant_values are float, the dtype of
# the array to be padded is kept
arr = np.arange(30).reshape(5, 6)
- test = pad(arr, (1, 2), mode='constant',
+ test = np.pad(arr, (1, 2), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1, 1, 1, 1, 1, 1, 1, 1, 1],
@@ -436,7 +572,7 @@ class TestConstant(object):
# the array to be padded is kept - here retaining the float constants
arr = np.arange(30).reshape(5, 6)
arr_float = arr.astype(np.float64)
- test = pad(arr_float, ((1, 2), (1, 2)), mode='constant',
+ test = np.pad(arr_float, ((1, 2), (1, 2)), mode='constant',
constant_values=1.1)
expected = np.array(
[[ 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1],
@@ -454,7 +590,7 @@ class TestConstant(object):
def test_check_constant_float3(self):
a = np.arange(100, dtype=float)
- a = pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
+ a = np.pad(a, (25, 20), 'constant', constant_values=(-1.1, -1.2))
b = np.array(
[-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
-1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1, -1.1,
@@ -478,7 +614,7 @@ class TestConstant(object):
def test_check_constant_odd_pad_amount(self):
arr = np.arange(30).reshape(5, 6)
- test = pad(arr, ((1,), (2,)), mode='constant',
+ test = np.pad(arr, ((1,), (2,)), mode='constant',
constant_values=3)
expected = np.array(
[[ 3, 3, 3, 3, 3, 3, 3, 3, 3, 3],
@@ -535,10 +671,16 @@ class TestConstant(object):
assert_array_equal(arr, expected)
+ def test_pad_empty_dimension(self):
+ arr = np.zeros((3, 0, 2))
+ result = np.pad(arr, [(0,), (2,), (1,)], mode="constant")
+ assert result.shape == (3, 4, 4)
+
+
class TestLinearRamp(object):
def test_check_simple(self):
a = np.arange(100).astype('f')
- a = pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
+ a = np.pad(a, (25, 20), 'linear_ramp', end_values=(4, 5))
b = np.array(
[4.00, 3.84, 3.68, 3.52, 3.36, 3.20, 3.04, 2.88, 2.72, 2.56,
2.40, 2.24, 2.08, 1.92, 1.76, 1.60, 1.44, 1.28, 1.12, 0.96,
@@ -562,7 +704,7 @@ class TestLinearRamp(object):
def test_check_2d(self):
arr = np.arange(20).reshape(4, 5).astype(np.float64)
- test = pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
+ test = np.pad(arr, (2, 2), mode='linear_ramp', end_values=(0, 0))
expected = np.array(
[[0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0.5, 1., 1.5, 2., 1., 0.],
@@ -593,11 +735,37 @@ class TestLinearRamp(object):
])
assert_equal(actual, expected)
+ def test_end_values(self):
+ """Ensure that end values are exact."""
+ a = np.pad(np.ones(10).reshape(2, 5), (223, 123), mode="linear_ramp")
+ assert_equal(a[:, 0], 0.)
+ assert_equal(a[:, -1], 0.)
+ assert_equal(a[0, :], 0.)
+ assert_equal(a[-1, :], 0.)
+
+ @pytest.mark.parametrize("dtype", _numeric_dtypes)
+ def test_negative_difference(self, dtype):
+ """
+ Check correct behavior of unsigned dtypes if there is a negative
+ difference between the edge to pad and `end_values`. Check both cases
+ to be independent of implementation. Test behavior for all other dtypes
+ in case dtype casting interferes with complex dtypes. See gh-14191.
+ """
+ x = np.array([3], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=0)
+ expected = np.array([0, 1, 2, 3, 2, 1, 0], dtype=dtype)
+ assert_equal(result, expected)
+
+ x = np.array([0], dtype=dtype)
+ result = np.pad(x, 3, mode="linear_ramp", end_values=3)
+ expected = np.array([3, 2, 1, 0, 1, 2, 3], dtype=dtype)
+ assert_equal(result, expected)
+
class TestReflect(object):
def test_check_simple(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'reflect')
+ a = np.pad(a, (25, 20), 'reflect')
b = np.array(
[25, 24, 23, 22, 21, 20, 19, 18, 17, 16,
15, 14, 13, 12, 11, 10, 9, 8, 7, 6,
@@ -621,7 +789,7 @@ class TestReflect(object):
def test_check_odd_method(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'reflect', reflect_type='odd')
+ a = np.pad(a, (25, 20), 'reflect', reflect_type='odd')
b = np.array(
[-25, -24, -23, -22, -21, -20, -19, -18, -17, -16,
-15, -14, -13, -12, -11, -10, -9, -8, -7, -6,
@@ -645,7 +813,7 @@ class TestReflect(object):
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
- a = pad(a, (5, 7), 'reflect')
+ a = np.pad(a, (5, 7), 'reflect')
b = np.array(
[[7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7, 8, 7, 6, 7],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
@@ -668,7 +836,7 @@ class TestReflect(object):
def test_check_shape(self):
a = [[4, 5, 6]]
- a = pad(a, (5, 7), 'reflect')
+ a = np.pad(a, (5, 7), 'reflect')
b = np.array(
[[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
[5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5, 6, 5, 4, 5],
@@ -689,30 +857,49 @@ class TestReflect(object):
assert_array_equal(a, b)
def test_check_01(self):
- a = pad([1, 2, 3], 2, 'reflect')
+ a = np.pad([1, 2, 3], 2, 'reflect')
b = np.array([3, 2, 1, 2, 3, 2, 1])
assert_array_equal(a, b)
def test_check_02(self):
- a = pad([1, 2, 3], 3, 'reflect')
+ a = np.pad([1, 2, 3], 3, 'reflect')
b = np.array([2, 3, 2, 1, 2, 3, 2, 1, 2])
assert_array_equal(a, b)
def test_check_03(self):
- a = pad([1, 2, 3], 4, 'reflect')
+ a = np.pad([1, 2, 3], 4, 'reflect')
b = np.array([1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3])
assert_array_equal(a, b)
- def test_check_padding_an_empty_array(self):
- a = pad(np.zeros((0, 3)), ((0,), (1,)), mode='reflect')
- b = np.zeros((0, 5))
- assert_array_equal(a, b)
+
+class TestEmptyArray(object):
+ """Check how padding behaves on arrays with an empty dimension."""
+
+ @pytest.mark.parametrize(
+ # Keep parametrization ordered, otherwise pytest-xdist might believe
+ # that different tests were collected during parallelization
+ "mode", sorted(_all_modes.keys() - {"constant", "empty"})
+ )
+ def test_pad_empty_dimension(self, mode):
+ match = ("can't extend empty axis 0 using modes other than 'constant' "
+ "or 'empty'")
+ with pytest.raises(ValueError, match=match):
+ np.pad([], 4, mode=mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad(np.ndarray(0), 4, mode=mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad(np.zeros((0, 3)), ((1,), (0,)), mode=mode)
+
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_pad_non_empty_dimension(self, mode):
+ result = np.pad(np.ones((2, 0, 2)), ((3,), (0,), (1,)), mode=mode)
+ assert result.shape == (8, 0, 4)
class TestSymmetric(object):
def test_check_simple(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'symmetric')
+ a = np.pad(a, (25, 20), 'symmetric')
b = np.array(
[24, 23, 22, 21, 20, 19, 18, 17, 16, 15,
14, 13, 12, 11, 10, 9, 8, 7, 6, 5,
@@ -736,7 +923,7 @@ class TestSymmetric(object):
def test_check_odd_method(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'symmetric', reflect_type='odd')
+ a = np.pad(a, (25, 20), 'symmetric', reflect_type='odd')
b = np.array(
[-24, -23, -22, -21, -20, -19, -18, -17, -16, -15,
-14, -13, -12, -11, -10, -9, -8, -7, -6, -5,
@@ -760,7 +947,7 @@ class TestSymmetric(object):
def test_check_large_pad(self):
a = [[4, 5, 6], [6, 7, 8]]
- a = pad(a, (5, 7), 'symmetric')
+ a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
@@ -784,7 +971,7 @@ class TestSymmetric(object):
def test_check_large_pad_odd(self):
a = [[4, 5, 6], [6, 7, 8]]
- a = pad(a, (5, 7), 'symmetric', reflect_type='odd')
+ a = np.pad(a, (5, 7), 'symmetric', reflect_type='odd')
b = np.array(
[[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
[-3, -2, -2, -1, 0, 0, 1, 2, 2, 3, 4, 4, 5, 6, 6],
@@ -807,7 +994,7 @@ class TestSymmetric(object):
def test_check_shape(self):
a = [[4, 5, 6]]
- a = pad(a, (5, 7), 'symmetric')
+ a = np.pad(a, (5, 7), 'symmetric')
b = np.array(
[[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
[5, 6, 6, 5, 4, 4, 5, 6, 6, 5, 4, 4, 5, 6, 6],
@@ -828,17 +1015,17 @@ class TestSymmetric(object):
assert_array_equal(a, b)
def test_check_01(self):
- a = pad([1, 2, 3], 2, 'symmetric')
+ a = np.pad([1, 2, 3], 2, 'symmetric')
b = np.array([2, 1, 1, 2, 3, 3, 2])
assert_array_equal(a, b)
def test_check_02(self):
- a = pad([1, 2, 3], 3, 'symmetric')
+ a = np.pad([1, 2, 3], 3, 'symmetric')
b = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1])
assert_array_equal(a, b)
def test_check_03(self):
- a = pad([1, 2, 3], 6, 'symmetric')
+ a = np.pad([1, 2, 3], 6, 'symmetric')
b = np.array([1, 2, 3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(a, b)
@@ -846,7 +1033,7 @@ class TestSymmetric(object):
class TestWrap(object):
def test_check_simple(self):
a = np.arange(100)
- a = pad(a, (25, 20), 'wrap')
+ a = np.pad(a, (25, 20), 'wrap')
b = np.array(
[75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
@@ -871,7 +1058,7 @@ class TestWrap(object):
def test_check_large_pad(self):
a = np.arange(12)
a = np.reshape(a, (3, 4))
- a = pad(a, (10, 12), 'wrap')
+ a = np.pad(a, (10, 12), 'wrap')
b = np.array(
[[10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10, 11, 8, 9, 10,
11, 8, 9, 10, 11, 8, 9, 10, 11],
@@ -929,12 +1116,12 @@ class TestWrap(object):
assert_array_equal(a, b)
def test_check_01(self):
- a = pad([1, 2, 3], 3, 'wrap')
+ a = np.pad([1, 2, 3], 3, 'wrap')
b = np.array([1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(a, b)
def test_check_02(self):
- a = pad([1, 2, 3], 4, 'wrap')
+ a = np.pad([1, 2, 3], 4, 'wrap')
b = np.array([3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1])
assert_array_equal(a, b)
@@ -943,35 +1130,25 @@ class TestWrap(object):
b = np.pad(a, (0, 5), mode="wrap")
assert_array_equal(a, b[:-5, :-5])
+ def test_repeated_wrapping(self):
+ """
+ Check wrapping on each side individually if the wrapped area is longer
+ than the original array.
+ """
+ a = np.arange(5)
+ b = np.pad(a, (12, 0), mode="wrap")
+ assert_array_equal(np.r_[a, a, a, a][3:], b)
-class TestStatLen(object):
- def test_check_simple(self):
- a = np.arange(30)
- a = np.reshape(a, (6, 5))
- a = pad(a, ((2, 3), (3, 2)), mode='mean', stat_length=(3,))
- b = np.array(
- [[6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
- [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
-
- [1, 1, 1, 0, 1, 2, 3, 4, 3, 3],
- [6, 6, 6, 5, 6, 7, 8, 9, 8, 8],
- [11, 11, 11, 10, 11, 12, 13, 14, 13, 13],
- [16, 16, 16, 15, 16, 17, 18, 19, 18, 18],
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
- [26, 26, 26, 25, 26, 27, 28, 29, 28, 28],
-
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23],
- [21, 21, 21, 20, 21, 22, 23, 24, 23, 23]]
- )
- assert_array_equal(a, b)
+ a = np.arange(5)
+ b = np.pad(a, (0, 12), mode="wrap")
+ assert_array_equal(np.r_[a, a, a, a][:-3], b)
class TestEdge(object):
def test_check_simple(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
- a = pad(a, ((2, 3), (3, 2)), 'edge')
+ a = np.pad(a, ((2, 3), (3, 2)), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
@@ -991,56 +1168,123 @@ class TestEdge(object):
# Check a pad_width of the form ((1, 2),).
# Regression test for issue gh-7808.
a = np.array([1, 2, 3])
- padded = pad(a, ((1, 2),), 'edge')
+ padded = np.pad(a, ((1, 2),), 'edge')
expected = np.array([1, 1, 2, 3, 3, 3])
assert_array_equal(padded, expected)
a = np.array([[1, 2, 3], [4, 5, 6]])
- padded = pad(a, ((1, 2),), 'edge')
- expected = pad(a, ((1, 2), (1, 2)), 'edge')
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.pad(a, ((1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
a = np.arange(24).reshape(2, 3, 4)
- padded = pad(a, ((1, 2),), 'edge')
- expected = pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
+ padded = np.pad(a, ((1, 2),), 'edge')
+ expected = np.pad(a, ((1, 2), (1, 2), (1, 2)), 'edge')
assert_array_equal(padded, expected)
-class TestZeroPadWidth(object):
- def test_zero_pad_width(self):
- arr = np.arange(30)
- arr = np.reshape(arr, (6, 5))
- for pad_width in (0, (0, 0), ((0, 0), (0, 0))):
- assert_array_equal(arr, pad(arr, pad_width, mode='constant'))
+class TestEmpty(object):
+ def test_simple(self):
+ arr = np.arange(24).reshape(4, 6)
+ result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
+ assert result.shape == (9, 10)
+ assert_equal(arr, result[2:-3, 3:-1])
+ def test_pad_empty_dimension(self):
+ arr = np.zeros((3, 0, 2))
+ result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
+ assert result.shape == (3, 4, 4)
-class TestLegacyVectorFunction(object):
- def test_legacy_vector_functionality(self):
- def _padwithtens(vector, pad_width, iaxis, kwargs):
- vector[:pad_width[0]] = 10
- vector[-pad_width[1]:] = 10
- return vector
- a = np.arange(6).reshape(2, 3)
- a = pad(a, 2, _padwithtens)
- b = np.array(
- [[10, 10, 10, 10, 10, 10, 10],
- [10, 10, 10, 10, 10, 10, 10],
+def test_legacy_vector_functionality():
+ def _padwithtens(vector, pad_width, iaxis, kwargs):
+ vector[:pad_width[0]] = 10
+ vector[-pad_width[1]:] = 10
- [10, 10, 0, 1, 2, 10, 10],
- [10, 10, 3, 4, 5, 10, 10],
+ a = np.arange(6).reshape(2, 3)
+ a = np.pad(a, 2, _padwithtens)
+ b = np.array(
+ [[10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
- [10, 10, 10, 10, 10, 10, 10],
- [10, 10, 10, 10, 10, 10, 10]]
- )
- assert_array_equal(a, b)
+ [10, 10, 0, 1, 2, 10, 10],
+ [10, 10, 3, 4, 5, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10],
+ [10, 10, 10, 10, 10, 10, 10]]
+ )
+ assert_array_equal(a, b)
-class TestNdarrayPadWidth(object):
- def test_check_simple(self):
+
+def test_unicode_mode():
+ a = np.pad([1], 2, mode=u'constant')
+ b = np.array([0, 0, 1, 0, 0])
+ assert_array_equal(a, b)
+
+
+@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
+def test_object_input(mode):
+ # Regression test for issue gh-11395.
+ a = np.full((4, 3), fill_value=None)
+ pad_amt = ((2, 3), (3, 2))
+ b = np.full((9, 8), fill_value=None)
+ assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
+
+
+class TestPadWidth(object):
+ @pytest.mark.parametrize("pad_width", [
+ (4, 5, 6, 7),
+ ((1,), (2,), (3,)),
+ ((1, 2), (3, 4), (5, 6)),
+ ((3, 4, 5), (0, 1, 2)),
+ ])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_misshaped_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "operands could not be broadcast together"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, pad_width, mode)
+
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_misshaped_pad_width_2(self, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = ("input operand has more dimensions than allowed by the axis "
+ "remapping")
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, (((3,), (4,), (5,)), ((0,), (1,), (2,))), mode)
+
+ @pytest.mark.parametrize(
+ "pad_width", [-2, (-2,), (3, -1), ((5, 2), (-2, 3)), ((-4,), (2,))])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_negative_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "index can't contain negative values"
+ with pytest.raises(ValueError, match=match):
+ np.pad(arr, pad_width, mode)
+
+ @pytest.mark.parametrize("pad_width", [
+ "3",
+ "word",
+ None,
+ object(),
+ 3.4,
+ ((2, 3, 4), (3, 2)), # dtype=object (tuple)
+ complex(1, -1),
+ ((-2.1, 3), (3, 2)),
+ ])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_bad_type(self, pad_width, mode):
+ arr = np.arange(30).reshape((6, 5))
+ match = "`pad_width` must be of integral type."
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, pad_width, mode)
+ with pytest.raises(TypeError, match=match):
+ np.pad(arr, np.array(pad_width), mode)
+
+ def test_pad_width_as_ndarray(self):
a = np.arange(12)
a = np.reshape(a, (4, 3))
- a = pad(a, np.array(((2, 3), (3, 2))), 'edge')
+ a = np.pad(a, np.array(((2, 3), (3, 2))), 'edge')
b = np.array(
[[0, 0, 0, 0, 1, 2, 2, 2],
[0, 0, 0, 0, 1, 2, 2, 2],
@@ -1056,121 +1300,62 @@ class TestNdarrayPadWidth(object):
)
assert_array_equal(a, b)
-
-class TestUnicodeInput(object):
- def test_unicode_mode(self):
- constant_mode = u'constant'
- a = np.pad([1], 2, mode=constant_mode)
- b = np.array([0, 0, 1, 0, 0])
- assert_array_equal(a, b)
-
-
-class TestObjectInput(object):
- def test_object_input(self):
- # Regression test for issue gh-11395.
- a = np.full((4, 3), None)
- pad_amt = ((2, 3), (3, 2))
- b = np.full((9, 8), None)
- modes = ['edge',
- 'symmetric',
- 'reflect',
- 'wrap',
- ]
- for mode in modes:
- assert_array_equal(pad(a, pad_amt, mode=mode), b)
-
-
-class TestValueError1(object):
- def test_check_simple(self):
- arr = np.arange(30)
- arr = np.reshape(arr, (6, 5))
- kwargs = dict(mode='mean', stat_length=(3, ))
- assert_raises(ValueError, pad, arr, ((2, 3), (3, 2), (4, 5)),
- **kwargs)
-
- def test_check_negative_stat_length(self):
- arr = np.arange(30)
- arr = np.reshape(arr, (6, 5))
- kwargs = dict(mode='mean', stat_length=(-3, ))
- assert_raises(ValueError, pad, arr, ((2, 3), (3, 2)),
- **kwargs)
-
- def test_check_negative_pad_width(self):
- arr = np.arange(30)
- arr = np.reshape(arr, (6, 5))
- kwargs = dict(mode='mean', stat_length=(3, ))
- assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
- **kwargs)
-
- def test_check_empty_array(self):
- assert_raises(ValueError, pad, [], 4, mode='reflect')
- assert_raises(ValueError, pad, np.ndarray(0), 4, mode='reflect')
- assert_raises(ValueError, pad, np.zeros((0, 3)), ((1,), (0,)),
- mode='reflect')
-
-
-class TestValueError2(object):
- def test_check_negative_pad_amount(self):
- arr = np.arange(30)
- arr = np.reshape(arr, (6, 5))
- kwargs = dict(mode='mean', stat_length=(3, ))
- assert_raises(ValueError, pad, arr, ((-2, 3), (3, 2)),
- **kwargs)
-
-
-class TestValueError3(object):
- def test_check_kwarg_not_allowed(self):
- arr = np.arange(30).reshape(5, 6)
- assert_raises(ValueError, pad, arr, 4, mode='mean',
- reflect_type='odd')
-
- def test_mode_not_set(self):
- arr = np.arange(30).reshape(5, 6)
- assert_raises(TypeError, pad, arr, 4)
-
- def test_malformed_pad_amount(self):
- arr = np.arange(30).reshape(5, 6)
- assert_raises(ValueError, pad, arr, (4, 5, 6, 7), mode='constant')
-
- def test_malformed_pad_amount2(self):
- arr = np.arange(30).reshape(5, 6)
- assert_raises(ValueError, pad, arr, ((3, 4, 5), (0, 1, 2)),
- mode='constant')
-
- def test_pad_too_many_axes(self):
- arr = np.arange(30).reshape(5, 6)
-
- # Attempt to pad using a 3D array equivalent
- bad_shape = (((3,), (4,), (5,)), ((0,), (1,), (2,)))
- assert_raises(ValueError, pad, arr, bad_shape,
- mode='constant')
-
-
-class TestTypeError1(object):
- def test_float(self):
- arr = np.arange(30)
- assert_raises(TypeError, pad, arr, ((-2.1, 3), (3, 2)))
- assert_raises(TypeError, pad, arr, np.array(((-2.1, 3), (3, 2))))
-
- def test_str(self):
- arr = np.arange(30)
- assert_raises(TypeError, pad, arr, 'foo')
- assert_raises(TypeError, pad, arr, np.array('foo'))
-
- def test_object(self):
- class FooBar(object):
- pass
- arr = np.arange(30)
- assert_raises(TypeError, pad, arr, FooBar())
-
- def test_complex(self):
- arr = np.arange(30)
- assert_raises(TypeError, pad, arr, complex(1, -1))
- assert_raises(TypeError, pad, arr, np.array(complex(1, -1)))
-
- def test_check_wrong_pad_amount(self):
- arr = np.arange(30)
- arr = np.reshape(arr, (6, 5))
- kwargs = dict(mode='mean', stat_length=(3, ))
- assert_raises(TypeError, pad, arr, ((2, 3, 4), (3, 2)),
- **kwargs)
+ @pytest.mark.parametrize("pad_width", [0, (0, 0), ((0, 0), (0, 0))])
+ @pytest.mark.parametrize("mode", _all_modes.keys())
+ def test_zero_pad_width(self, pad_width, mode):
+ arr = np.arange(30).reshape(6, 5)
+ assert_array_equal(arr, np.pad(arr, pad_width, mode=mode))
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_kwargs(mode):
+ """Test behavior of pad's kwargs for the given mode."""
+ allowed = _all_modes[mode]
+ not_allowed = {}
+ for kwargs in _all_modes.values():
+ if kwargs != allowed:
+ not_allowed.update(kwargs)
+ # Test if allowed keyword arguments pass
+ np.pad([1, 2, 3], 1, mode, **allowed)
+ # Test if prohibited keyword arguments of other modes raise an error
+ for key, value in not_allowed.items():
+ match = "unsupported keyword arguments for mode '{}'".format(mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1, 2, 3], 1, mode, **{key: value})
+
+
+def test_constant_zero_default():
+ arr = np.array([1, 1])
+ assert_array_equal(np.pad(arr, 2), [0, 0, 1, 1, 0, 0])
+
+
+@pytest.mark.parametrize("mode", [1, "const", object(), None, True, False])
+def test_unsupported_mode(mode):
+ match= "mode '{}' is not supported".format(mode)
+ with pytest.raises(ValueError, match=match):
+ np.pad([1, 2, 3], 4, mode=mode)
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_non_contiguous_array(mode):
+ arr = np.arange(24).reshape(4, 6)[::2, ::2]
+ result = np.pad(arr, (2, 3), mode)
+ assert result.shape == (7, 8)
+ assert_equal(result[2:-3, 2:-3], arr)
+
+
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_memory_layout_persistence(mode):
+ """Test if C and F order is preserved for all pad modes."""
+ x = np.ones((5, 10), order='C')
+ assert np.pad(x, 5, mode).flags["C_CONTIGUOUS"]
+ x = np.ones((5, 10), order='F')
+ assert np.pad(x, 5, mode).flags["F_CONTIGUOUS"]
+
+
+@pytest.mark.parametrize("dtype", _numeric_dtypes)
+@pytest.mark.parametrize("mode", _all_modes.keys())
+def test_dtype_persistence(dtype, mode):
+ arr = np.zeros((3, 2, 1), dtype=dtype)
+ result = np.pad(arr, 1, mode=mode)
+ assert result.dtype == dtype
diff --git a/numpy/lib/tests/test_arraysetops.py b/numpy/lib/tests/test_arraysetops.py
index 4b61726d2..fd21a7f76 100644
--- a/numpy/lib/tests/test_arraysetops.py
+++ b/numpy/lib/tests/test_arraysetops.py
@@ -4,7 +4,6 @@
from __future__ import division, absolute_import, print_function
import numpy as np
-import sys
from numpy.testing import (assert_array_equal, assert_equal,
assert_raises, assert_raises_regex)
@@ -137,8 +136,8 @@ class TestSetOps(object):
np.nan),
# should fail because attempting
# to downcast to smaller int type:
- (np.array([1, 2, 3], dtype=np.int32),
- np.array([5, 7, 2], dtype=np.int64),
+ (np.array([1, 2, 3], dtype=np.int16),
+ np.array([5, 1<<20, 2], dtype=np.int32),
None),
# should fail because attempting to cast
# two special floating point values
@@ -153,8 +152,8 @@ class TestSetOps(object):
# specifically, raise an appropriate
# Exception when attempting to append or
# prepend with an incompatible type
- msg = 'must be compatible'
- with assert_raises_regex(TypeError, msg):
+ msg = 'cannot convert'
+ with assert_raises_regex(ValueError, msg):
ediff1d(ary=ary,
to_end=append,
to_begin=prepend)
@@ -388,6 +387,13 @@ class TestSetOps(object):
a = np.array((), np.uint32)
assert_equal(setdiff1d(a, []).dtype, np.uint32)
+ def test_setdiff1d_unique(self):
+ a = np.array([3, 2, 1])
+ b = np.array([7, 5, 2])
+ expected = np.array([3, 1])
+ actual = setdiff1d(a, b, assume_unique=True)
+ assert_equal(actual, expected)
+
def test_setdiff1d_char_array(self):
a = np.array(['a', 'b', 'c'])
b = np.array(['a', 'b', 's'])
@@ -416,41 +422,41 @@ class TestUnique(object):
assert_array_equal(v, b, msg)
msg = base_msg.format('return_index', dt)
- v, j = unique(a, 1, 0, 0)
+ v, j = unique(a, True, False, False)
assert_array_equal(v, b, msg)
assert_array_equal(j, i1, msg)
msg = base_msg.format('return_inverse', dt)
- v, j = unique(a, 0, 1, 0)
+ v, j = unique(a, False, True, False)
assert_array_equal(v, b, msg)
assert_array_equal(j, i2, msg)
msg = base_msg.format('return_counts', dt)
- v, j = unique(a, 0, 0, 1)
+ v, j = unique(a, False, False, True)
assert_array_equal(v, b, msg)
assert_array_equal(j, c, msg)
msg = base_msg.format('return_index and return_inverse', dt)
- v, j1, j2 = unique(a, 1, 1, 0)
+ v, j1, j2 = unique(a, True, True, False)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
msg = base_msg.format('return_index and return_counts', dt)
- v, j1, j2 = unique(a, 1, 0, 1)
+ v, j1, j2 = unique(a, True, False, True)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, c, msg)
msg = base_msg.format('return_inverse and return_counts', dt)
- v, j1, j2 = unique(a, 0, 1, 1)
+ v, j1, j2 = unique(a, False, True, True)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i2, msg)
assert_array_equal(j2, c, msg)
msg = base_msg.format(('return_index, return_inverse '
'and return_counts'), dt)
- v, j1, j2, j3 = unique(a, 1, 1, 1)
+ v, j1, j2, j3 = unique(a, True, True, True)
assert_array_equal(v, b, msg)
assert_array_equal(j1, i1, msg)
assert_array_equal(j2, i2, msg)
@@ -594,8 +600,11 @@ class TestUnique(object):
assert_array_equal(unique(data, axis=1), result.astype(dtype), msg)
msg = 'Unique with 3d array and axis=2 failed'
- data3d = np.dstack([data] * 3)
- result = data3d[..., :1]
+ data3d = np.array([[[1, 1],
+ [1, 0]],
+ [[0, 1],
+ [0, 0]]]).astype(dtype)
+ result = np.take(data3d, [1, 0], axis=2)
assert_array_equal(unique(data3d, axis=2), result, msg)
uniq, idx, inv, cnt = unique(data, axis=0, return_index=True,
diff --git a/numpy/lib/tests/test_financial.py b/numpy/lib/tests/test_financial.py
index 524915041..21088765f 100644
--- a/numpy/lib/tests/test_financial.py
+++ b/numpy/lib/tests/test_financial.py
@@ -9,6 +9,12 @@ from numpy.testing import (
class TestFinancial(object):
+ def test_npv_irr_congruence(self):
+ # IRR is defined as the rate required for the present value of a
+ # a series of cashflows to be zero i.e. NPV(IRR(x), x) = 0
+ cashflows = np.array([-40000, 5000, 8000, 12000, 30000])
+ assert_allclose(np.npv(np.irr(cashflows), cashflows), 0, atol=1e-10, rtol=0)
+
def test_rate(self):
assert_almost_equal(
np.rate(10, 0, -3500, 10000),
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 3185e32ac..062c21725 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -287,7 +287,7 @@ from io import BytesIO
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_raises, assert_raises_regex,
- raises
+ assert_warns
)
from numpy.lib import format
@@ -412,6 +412,7 @@ record_arrays = [
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
+ np.zeros(1, dtype=[('c', ('<f8', (5,)), (2,))])
]
@@ -427,7 +428,7 @@ def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
- arr2 = format.read_array(f2)
+ arr2 = format.read_array(f2, allow_pickle=True)
return arr2
@@ -524,6 +525,30 @@ def test_compressed_roundtrip():
assert_array_equal(arr, arr1)
+# aligned
+dt1 = np.dtype('i1, i4, i1', align=True)
+# non-aligned, explicit offsets
+dt2 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6]})
+# nested struct-in-struct
+dt3 = np.dtype({'names': ['c', 'd'], 'formats': ['i4', dt2]})
+# field with '' name
+dt4 = np.dtype({'names': ['a', '', 'b'], 'formats': ['i4']*3})
+# titles
+dt5 = np.dtype({'names': ['a', 'b'], 'formats': ['i4', 'i4'],
+ 'offsets': [1, 6], 'titles': ['aa', 'bb']})
+
+@pytest.mark.parametrize("dt", [dt1, dt2, dt3, dt4, dt5])
+def test_load_padded_dtype(dt):
+ arr = np.zeros(3, dt)
+ for i in range(3):
+ arr[i] = i + 5
+ npz_file = os.path.join(tempdir, 'aligned.npz')
+ np.savez(npz_file, arr=arr)
+ arr1 = np.load(npz_file)['arr']
+ assert_array_equal(arr, arr1)
+
+
def test_python2_python3_interoperability():
if sys.version_info[0] >= 3:
fname = 'win64python2.npy'
@@ -533,7 +558,6 @@ def test_python2_python3_interoperability():
data = np.load(path)
assert_array_equal(data, np.ones(2))
-
def test_pickle_python2_python3():
# Test that loading object arrays saved on Python 2 works both on
# Python 2 and Python 3 and vice versa
@@ -554,7 +578,7 @@ def test_pickle_python2_python3():
path = os.path.join(data_dir, fname)
for encoding in ['bytes', 'latin1']:
- data_f = np.load(path, encoding=encoding)
+ data_f = np.load(path, allow_pickle=True, encoding=encoding)
if fname.endswith('.npz'):
data = data_f['x']
data_f.close()
@@ -576,16 +600,19 @@ def test_pickle_python2_python3():
if sys.version_info[0] >= 3:
if fname.startswith('py2'):
if fname.endswith('.npz'):
- data = np.load(path)
+ data = np.load(path, allow_pickle=True)
assert_raises(UnicodeError, data.__getitem__, 'x')
data.close()
- data = np.load(path, fix_imports=False, encoding='latin1')
+ data = np.load(path, allow_pickle=True, fix_imports=False,
+ encoding='latin1')
assert_raises(ImportError, data.__getitem__, 'x')
data.close()
else:
- assert_raises(UnicodeError, np.load, path)
+ assert_raises(UnicodeError, np.load, path,
+ allow_pickle=True)
assert_raises(ImportError, np.load, path,
- encoding='latin1', fix_imports=False)
+ allow_pickle=True, fix_imports=False,
+ encoding='latin1')
def test_pickle_disallow():
@@ -603,6 +630,61 @@ def test_pickle_disallow():
assert_raises(ValueError, np.save, path, np.array([None], dtype=object),
allow_pickle=False)
+@pytest.mark.parametrize('dt', [
+ np.dtype(np.dtype([('a', np.int8),
+ ('b', np.int16),
+ ('c', np.int32),
+ ], align=True),
+ (3,)),
+ np.dtype([('x', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8,
+ },
+ (3,)),
+ (4,),
+ )]),
+ np.dtype([('x',
+ ('<f8', (5,)),
+ (2,),
+ )]),
+ np.dtype([('x', np.dtype((
+ np.dtype((
+ np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8}),
+ (3,)
+ )),
+ (4,)
+ )))
+ ]),
+ np.dtype([
+ ('a', np.dtype((
+ np.dtype((
+ np.dtype((
+ np.dtype([
+ ('a', int),
+ ('b', np.dtype({'names':['a','b'],
+ 'formats':['i1','i1'],
+ 'offsets':[0,4],
+ 'itemsize':8})),
+ ]),
+ (3,),
+ )),
+ (4,),
+ )),
+ (5,),
+ )))
+ ]),
+ ])
+
+def test_descr_to_dtype(dt):
+ dt1 = format.descr_to_dtype(dt.descr)
+ assert_equal_(dt1, dt)
+ arr1 = np.zeros(3, dt)
+ arr2 = roundtrip(arr1)
+ assert_array_equal(arr1, arr2)
def test_version_2_0():
f = BytesIO()
@@ -857,3 +939,27 @@ def test_empty_npz():
fname = os.path.join(tempdir, "nothing.npz")
np.savez(fname)
np.load(fname)
+
+
+def test_unicode_field_names():
+ # gh-7391
+ arr = np.array([
+ (1, 3),
+ (1, 2),
+ (1, 3),
+ (1, 2)
+ ], dtype=[
+ ('int', int),
+ (u'\N{CJK UNIFIED IDEOGRAPH-6574}\N{CJK UNIFIED IDEOGRAPH-5F62}', int)
+ ])
+ fname = os.path.join(tempdir, "unicode.npy")
+ with open(fname, 'wb') as f:
+ format.write_array(f, arr, version=(3, 0))
+ with open(fname, 'rb') as f:
+ arr2 = format.read_array(f)
+ assert_array_equal(arr, arr2)
+
+ # notifies the user that 3.0 is selected
+ with open(fname, 'wb') as f:
+ with assert_warns(UserWarning):
+ format.write_array(f, arr, version=None)
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 40cca1dbb..1eae8ccfb 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -4,28 +4,29 @@ import operator
import warnings
import sys
import decimal
+import types
+from fractions import Fraction
import pytest
import numpy as np
from numpy import ma
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises, assert_allclose,
- assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
- HAS_REFCOUNT,
+ assert_array_almost_equal, assert_raises, assert_allclose, IS_PYPY,
+ assert_warns, assert_raises_regex, suppress_warnings, HAS_REFCOUNT,
)
import numpy.lib.function_base as nfb
from numpy.random import rand
from numpy.lib import (
add_newdoc_ufunc, angle, average, bartlett, blackman, corrcoef, cov,
delete, diff, digitize, extract, flipud, gradient, hamming, hanning,
- histogram, histogramdd, i0, insert, interp, kaiser, meshgrid, msort,
- piecewise, place, rot90, select, setxor1d, sinc, split, trapz, trim_zeros,
- unwrap, unique, vectorize
+ i0, insert, interp, kaiser, meshgrid, msort, piecewise, place, rot90,
+ select, setxor1d, sinc, trapz, trim_zeros, unwrap, unique, vectorize
)
from numpy.compat import long
+PY2 = sys.version_info[0] == 2
def get_mat(n):
data = np.arange(n)
@@ -33,6 +34,17 @@ def get_mat(n):
return data
+def _make_complex(real, imag):
+ """
+ Like real + 1j * imag, but behaves as expected when imag contains non-finite
+ values
+ """
+ ret = np.zeros(np.broadcast(real, imag).shape, np.complex_)
+ ret.real = real
+ ret.imag = imag
+ return ret
+
+
class TestRot90(object):
def test_basic(self):
assert_raises(ValueError, rot90, np.ones(4))
@@ -355,9 +367,9 @@ class TestAverage(object):
assert_equal(type(np.average(a, weights=w)), subclass)
def test_upcasting(self):
- types = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
+ typs = [('i4', 'i4', 'f8'), ('i4', 'f4', 'f8'), ('f4', 'i4', 'f8'),
('f4', 'f4', 'f4'), ('f4', 'f8', 'f8')]
- for at, wt, rt in types:
+ for at, wt, rt in typs:
a = np.array([[1,2],[3,4]], dtype=at)
w = np.array([[1,2],[3,4]], dtype=wt)
assert_equal(np.average(a, weights=w).dtype, np.dtype(rt))
@@ -411,27 +423,17 @@ class TestSelect(object):
assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0])
def test_deprecated_empty(self):
- with warnings.catch_warnings(record=True):
- warnings.simplefilter("always")
- assert_equal(select([], [], 3j), 3j)
-
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- assert_warns(DeprecationWarning, select, [], [])
- warnings.simplefilter("error")
- assert_raises(DeprecationWarning, select, [], [])
+ assert_raises(ValueError, select, [], [], 3j)
+ assert_raises(ValueError, select, [], [])
def test_non_bool_deprecation(self):
choices = self.choices
conditions = self.conditions[:]
- with warnings.catch_warnings():
- warnings.filterwarnings("always")
- conditions[0] = conditions[0].astype(np.int_)
- assert_warns(DeprecationWarning, select, conditions, choices)
- conditions[0] = conditions[0].astype(np.uint8)
- assert_warns(DeprecationWarning, select, conditions, choices)
- warnings.filterwarnings("error")
- assert_raises(DeprecationWarning, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.int_)
+ assert_raises(TypeError, select, conditions, choices)
+ conditions[0] = conditions[0].astype(np.uint8)
+ assert_raises(TypeError, select, conditions, choices)
+ assert_raises(TypeError, select, conditions, choices)
def test_many_arguments(self):
# This used to be limited by NPY_MAXARGS == 32
@@ -684,6 +686,9 @@ class TestDiff(object):
assert_raises(np.AxisError, diff, x, axis=3)
assert_raises(np.AxisError, diff, x, axis=-4)
+ x = np.array(1.11111111111, np.float64)
+ assert_raises(ValueError, diff, x)
+
def test_nd(self):
x = 20 * rand(10, 20, 30)
out1 = x[:, :, 1:] - x[:, :, :-1]
@@ -933,7 +938,7 @@ class TestGradient(object):
assert_equal(type(out), type(x))
# And make sure that the output and input don't have aliased mask
# arrays
- assert_(x.mask is not out.mask)
+ assert_(x._mask is not out._mask)
# Also check that edge_order=2 doesn't alter the original mask
x2 = np.ma.arange(5)
x2[2] = np.ma.masked
@@ -1090,7 +1095,7 @@ class TestAngle(object):
np.arctan(3.0 / 1.0),
np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0,
-np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)]
- z = angle(x, deg=1)
+ z = angle(x, deg=True)
zo = np.array(yo) * 180 / np.pi
assert_array_almost_equal(y, yo, 11)
assert_array_almost_equal(z, zo, 11)
@@ -1500,6 +1505,49 @@ class TestVectorize(object):
f(x)
+class TestLeaks(object):
+ class A(object):
+ iters = 20
+
+ def bound(self, *args):
+ return 0
+
+ @staticmethod
+ def unbound(*args):
+ return 0
+
+ @pytest.mark.skipif(not HAS_REFCOUNT, reason="Python lacks refcounts")
+ @pytest.mark.parametrize('name, incr', [
+ ('bound', A.iters),
+ ('unbound', 0),
+ ])
+ def test_frompyfunc_leaks(self, name, incr):
+ # exposed in gh-11867 as np.vectorized, but the problem stems from
+ # frompyfunc.
+ # class.attribute = np.frompyfunc(<method>) creates a
+ # reference cycle if <method> is a bound class method. It requires a
+ # gc collection cycle to break the cycle (on CPython 3)
+ import gc
+ A_func = getattr(self.A, name)
+ gc.disable()
+ try:
+ refcount = sys.getrefcount(A_func)
+ for i in range(self.A.iters):
+ a = self.A()
+ a.f = np.frompyfunc(getattr(a, name), 1, 1)
+ out = a.f(np.arange(10))
+ a = None
+ if PY2:
+ assert_equal(sys.getrefcount(A_func), refcount)
+ else:
+ # A.func is part of a reference cycle if incr is non-zero
+ assert_equal(sys.getrefcount(A_func), refcount + incr)
+ for i in range(5):
+ gc.collect()
+ assert_equal(sys.getrefcount(A_func), refcount)
+ finally:
+ gc.enable()
+
class TestDigitize(object):
def test_forward(self):
@@ -1862,9 +1910,9 @@ class TestCov(object):
[-np.inf, np.inf]]))
def test_1D_rowvar(self):
- assert_allclose(cov(self.x3), cov(self.x3, rowvar=0))
+ assert_allclose(cov(self.x3), cov(self.x3, rowvar=False))
y = np.array([0.0780, 0.3107, 0.2111, 0.0334, 0.8501])
- assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=0))
+ assert_allclose(cov(self.x3, y), cov(self.x3, y, rowvar=False))
def test_1D_variance(self):
assert_allclose(cov(self.x3, ddof=1), np.var(self.x3, ddof=1))
@@ -1926,9 +1974,9 @@ class Test_I0(object):
np.array(1.0634833707413234))
A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549])
- assert_almost_equal(
- i0(A),
- np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049]))
+ expected = np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])
+ assert_almost_equal(i0(A), expected)
+ assert_almost_equal(i0(-A), expected)
B = np.array([[0.827002, 0.99959078],
[0.89694769, 0.39298162],
@@ -1942,6 +1990,26 @@ class Test_I0(object):
[1.03633899, 1.00067775],
[1.03352052, 1.13557954],
[1.05884290, 1.06432317]]))
+ # Regression test for gh-11205
+ i0_0 = np.i0([0.])
+ assert_equal(i0_0.shape, (1,))
+ assert_array_equal(np.i0([0.]), np.array([1.]))
+
+ def test_non_array(self):
+ a = np.arange(4)
+
+ class array_like:
+ __array_interface__ = a.__array_interface__
+
+ def __array_wrap__(self, arr):
+ return self
+
+ # E.g. pandas series survive ufunc calls through array-wrap:
+ assert isinstance(np.abs(array_like()), array_like)
+ exp = np.i0(a)
+ res = np.i0(array_like())
+
+ assert_array_equal(exp, res)
class TestKaiser(object):
@@ -2311,7 +2379,7 @@ class TestInterp(object):
x0 = np.nan
assert_almost_equal(np.interp(x0, x, y), x0)
- def test_non_finite_behavior(self):
+ def test_non_finite_behavior_exact_x(self):
x = [1, 2, 2.5, 3, 4]
xp = [1, 2, 3, 4]
fp = [1, 2, np.inf, 4]
@@ -2319,6 +2387,64 @@ class TestInterp(object):
fp = [1, 2, np.nan, 4]
assert_almost_equal(np.interp(x, xp, fp), [1, 2, np.nan, np.nan, 4])
+ @pytest.fixture(params=[
+ lambda x: np.float_(x),
+ lambda x: _make_complex(x, 0),
+ lambda x: _make_complex(0, x),
+ lambda x: _make_complex(x, np.multiply(x, -2))
+ ], ids=[
+ 'real',
+ 'complex-real',
+ 'complex-imag',
+ 'complex-both'
+ ])
+ def sc(self, request):
+ """ scale function used by the below tests """
+ return request.param
+
+ def test_non_finite_any_nan(self, sc):
+ """ test that nans are propagated """
+ assert_equal(np.interp(0.5, [np.nan, 1], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, np.nan], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([np.nan, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([ 0, np.nan])), sc(np.nan))
+
+ def test_non_finite_inf(self, sc):
+ """ Test that interp between opposite infs gives nan """
+ assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 0, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([-np.inf, +np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, 1], sc([+np.inf, -np.inf])), sc(np.nan))
+
+ # unless the y values are equal
+ assert_equal(np.interp(0.5, [-np.inf, +np.inf], sc([ 10, 10])), sc(10))
+
+ def test_non_finite_half_inf_xf(self, sc):
+ """ Test that interp where both axes have a bound at inf gives nan """
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([-np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([+np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, -np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [-np.inf, 1], sc([ 0, +np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([-np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([+np.inf, 10])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, -np.inf])), sc(np.nan))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([ 0, +np.inf])), sc(np.nan))
+
+ def test_non_finite_half_inf_x(self, sc):
+ """ Test interp where the x axis has a bound at inf """
+ assert_equal(np.interp(0.5, [-np.inf, -np.inf], sc([0, 10])), sc(10))
+ assert_equal(np.interp(0.5, [-np.inf, 1 ], sc([0, 10])), sc(10))
+ assert_equal(np.interp(0.5, [ 0, +np.inf], sc([0, 10])), sc(0))
+ assert_equal(np.interp(0.5, [+np.inf, +np.inf], sc([0, 10])), sc(0))
+
+ def test_non_finite_half_inf_f(self, sc):
+ """ Test interp where the f axis has a bound at inf """
+ assert_equal(np.interp(0.5, [0, 1], sc([ 0, -np.inf])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([ 0, +np.inf])), sc(+np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, 10])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, 10])), sc(+np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([-np.inf, -np.inf])), sc(-np.inf))
+ assert_equal(np.interp(0.5, [0, 1], sc([+np.inf, +np.inf])), sc(+np.inf))
+
def test_complex_interp(self):
# test complex interpolation
x = np.linspace(0, 1, 5)
@@ -2393,11 +2519,23 @@ class TestPercentile(object):
assert_equal(np.percentile(x, 100), 3.5)
assert_equal(np.percentile(x, 50), 1.75)
x[1] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(x, 0), np.nan)
- assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
- assert_(w[0].category is RuntimeWarning)
+ assert_equal(np.percentile(x, 0), np.nan)
+ assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
+
+ def test_fraction(self):
+ x = [Fraction(i, 2) for i in np.arange(8)]
+
+ p = np.percentile(x, Fraction(0))
+ assert_equal(p, Fraction(0))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, Fraction(100))
+ assert_equal(p, Fraction(7, 2))
+ assert_equal(type(p), Fraction)
+
+ p = np.percentile(x, Fraction(50))
+ assert_equal(p, Fraction(7, 4))
+ assert_equal(type(p), Fraction)
def test_api(self):
d = np.ones(5)
@@ -2735,85 +2873,63 @@ class TestPercentile(object):
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, 0.3), np.nan)
- assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
- assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
- np.array([np.nan] * 2))
- assert_(w[0].category is RuntimeWarning)
- assert_(w[1].category is RuntimeWarning)
- assert_(w[2].category is RuntimeWarning)
+ assert_equal(np.percentile(a, 0.3), np.nan)
+ assert_equal(np.percentile(a, 0.3, axis=0), np.nan)
+ assert_equal(np.percentile(a, [0.3, 0.6], axis=0),
+ np.array([np.nan] * 2))
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, 0.3), np.nan)
- assert_equal(np.percentile(a, 0.3).ndim, 0)
- assert_(w[0].category is RuntimeWarning)
+ assert_equal(np.percentile(a, 0.3), np.nan)
+ assert_equal(np.percentile(a, 0.3).ndim, 0)
# axis0 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, 0.3, 0), b)
+ assert_equal(np.percentile(a, 0.3, 0), b)
# axis0 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], 0)
b[:, 2, 3] = np.nan
b[:, 1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
+ assert_equal(np.percentile(a, [0.3, 0.6], 0), b)
# axis1 zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, 0.3, 1), b)
+ assert_equal(np.percentile(a, 0.3, 1), b)
# axis1 not zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), [0.3, 0.6], 1)
b[:, 1, 3] = np.nan
b[:, 1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
+ assert_equal(np.percentile(a, [0.3, 0.6], 1), b)
# axis02 zerod
b = np.percentile(
np.arange(24, dtype=float).reshape(2, 3, 4), 0.3, (0, 2))
b[1] = np.nan
b[2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, 0.3, (0, 2)), b)
+ assert_equal(np.percentile(a, 0.3, (0, 2)), b)
# axis02 not zerod
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2))
b[:, 1] = np.nan
b[:, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
+ assert_equal(np.percentile(a, [0.3, 0.6], (0, 2)), b)
# axis02 not zerod with nearest interpolation
b = np.percentile(np.arange(24, dtype=float).reshape(2, 3, 4),
[0.3, 0.6], (0, 2), interpolation='nearest')
b[:, 1] = np.nan
b[:, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.percentile(
- a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
+ assert_equal(np.percentile(
+ a, [0.3, 0.6], (0, 2), interpolation='nearest'), b)
class TestQuantile(object):
@@ -2825,6 +2941,26 @@ class TestQuantile(object):
assert_equal(np.quantile(x, 1), 3.5)
assert_equal(np.quantile(x, 0.5), 1.75)
+ def test_fraction(self):
+ # fractional input, integral quantile
+ x = [Fraction(i, 2) for i in np.arange(8)]
+
+ q = np.quantile(x, 0)
+ assert_equal(q, 0)
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, 1)
+ assert_equal(q, Fraction(7, 2))
+ assert_equal(type(q), Fraction)
+
+ q = np.quantile(x, Fraction(1, 2))
+ assert_equal(q, Fraction(7, 4))
+ assert_equal(type(q), Fraction)
+
+ # repeat with integral input but fractional quantile
+ x = np.arange(8)
+ assert_equal(np.quantile(x, Fraction(1, 2)), Fraction(7, 2))
+
def test_no_p_overwrite(self):
# this is worth retesting, because quantile does not make a copy
p0 = np.array([0, 0.75, 0.25, 0.5, 1.0])
@@ -2860,10 +2996,7 @@ class TestMedian(object):
# check array scalar result
assert_equal(np.median(a).ndim, 0)
a[1] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.median(a).ndim, 0)
- assert_(w[0].category is RuntimeWarning)
+ assert_equal(np.median(a).ndim, 0)
def test_axis_keyword(self):
a3 = np.array([[2, 3],
@@ -2962,58 +3095,43 @@ class TestMedian(object):
def test_nan_behavior(self):
a = np.arange(24, dtype=float)
a[2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.median(a), np.nan)
- assert_equal(np.median(a, axis=0), np.nan)
- assert_(w[0].category is RuntimeWarning)
- assert_(w[1].category is RuntimeWarning)
+ assert_equal(np.median(a), np.nan)
+ assert_equal(np.median(a, axis=0), np.nan)
a = np.arange(24, dtype=float).reshape(2, 3, 4)
a[1, 2, 3] = np.nan
a[1, 1, 2] = np.nan
# no axis
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.median(a), np.nan)
- assert_equal(np.median(a).ndim, 0)
- assert_(w[0].category is RuntimeWarning)
+ assert_equal(np.median(a), np.nan)
+ assert_equal(np.median(a).ndim, 0)
# axis0
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 0)
b[2, 3] = np.nan
b[1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.median(a, 0), b)
- assert_equal(len(w), 1)
+ assert_equal(np.median(a, 0), b)
# axis1
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), 1)
b[1, 3] = np.nan
b[1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.median(a, 1), b)
- assert_equal(len(w), 1)
+ assert_equal(np.median(a, 1), b)
# axis02
b = np.median(np.arange(24, dtype=float).reshape(2, 3, 4), (0, 2))
b[1] = np.nan
b[2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.median(a, (0, 2)), b)
- assert_equal(len(w), 1)
+ assert_equal(np.median(a, (0, 2)), b)
def test_empty(self):
- # empty arrays
+ # mean(empty array) emits two warnings: empty slice and divide by 0
a = np.array([], dtype=float)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_equal(np.median(a), np.nan)
assert_(w[0].category is RuntimeWarning)
+ assert_equal(len(w), 2)
# multiple dimensions
a = np.array([], dtype=float, ndmin=3)
@@ -3108,9 +3226,36 @@ class TestAdd_newdoc_ufunc(object):
class TestAdd_newdoc(object):
@pytest.mark.skipif(sys.flags.optimize == 2, reason="Python running -OO")
+ @pytest.mark.xfail(IS_PYPY, reason="PyPy does not modify tp_doc")
def test_add_doc(self):
# test np.add_newdoc
tgt = "Current flat index into the array."
assert_equal(np.core.flatiter.index.__doc__[:len(tgt)], tgt)
assert_(len(np.core.ufunc.identity.__doc__) > 300)
assert_(len(np.lib.index_tricks.mgrid.__doc__) > 300)
+
+class TestSortComplex(object):
+
+ @pytest.mark.parametrize("type_in, type_out", [
+ ('l', 'D'),
+ ('h', 'F'),
+ ('H', 'F'),
+ ('b', 'F'),
+ ('B', 'F'),
+ ('g', 'G'),
+ ])
+ def test_sort_real(self, type_in, type_out):
+ # sort_complex() type casting for real input types
+ a = np.array([5, 3, 6, 2, 1], dtype=type_in)
+ actual = np.sort_complex(a)
+ expected = np.sort(a).astype(type_out)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
+
+ def test_sort_complex(self):
+ # sort_complex() handling of complex input
+ a = np.array([2 + 3j, 1 - 2j, 1 - 3j, 2 + 1j], dtype='D')
+ expected = np.array([1 - 3j, 1 - 2j, 2 + 1j, 2 + 3j], dtype='D')
+ actual = np.sort_complex(a)
+ assert_equal(actual, expected)
+ assert_equal(actual.dtype, expected.dtype)
diff --git a/numpy/lib/tests/test_histograms.py b/numpy/lib/tests/test_histograms.py
index 561f5f938..4895a722c 100644
--- a/numpy/lib/tests/test_histograms.py
+++ b/numpy/lib/tests/test_histograms.py
@@ -6,7 +6,7 @@ from numpy.lib.histograms import histogram, histogramdd, histogram_bin_edges
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
assert_array_almost_equal, assert_raises, assert_allclose,
- assert_array_max_ulp, assert_warns, assert_raises_regex, suppress_warnings,
+ assert_array_max_ulp, assert_raises_regex, suppress_warnings,
)
@@ -119,6 +119,13 @@ class TestHistogram(object):
h, b = histogram(a, bins=8, range=[1, 9], weights=w)
assert_equal(h, w[1:-1])
+ def test_arr_weights_mismatch(self):
+ a = np.arange(10) + .5
+ w = np.arange(11) + .5
+ with assert_raises_regex(ValueError, "same shape as"):
+ h, b = histogram(a, range=[1, 9], weights=w, density=True)
+
+
def test_type(self):
# Check the type of the returned histogram
a = np.arange(10) + .5
@@ -141,6 +148,23 @@ class TestHistogram(object):
counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100)
assert_equal(counts_hist.sum(), 3.)
+ def test_bool_conversion(self):
+ # gh-12107
+ # Reference integer histogram
+ a = np.array([1, 1, 0], dtype=np.uint8)
+ int_hist, int_edges = np.histogram(a)
+
+ # Should raise an warning on booleans
+ # Ensure that the histograms are equivalent, need to suppress
+ # the warnings to get the actual outputs
+ with suppress_warnings() as sup:
+ rec = sup.record(RuntimeWarning, 'Converting input from .*')
+ hist, edges = np.histogram([True, True, False])
+ # A warning should be issued
+ assert_equal(len(rec), 1)
+ assert_array_equal(hist, int_hist)
+ assert_array_equal(edges, int_edges)
+
def test_weights(self):
v = np.random.rand(100)
w = np.ones(100) * 5
@@ -225,6 +249,12 @@ class TestHistogram(object):
assert_raises(ValueError, histogram, vals, range=[np.nan,0.75])
assert_raises(ValueError, histogram, vals, range=[0.25,np.inf])
+ def test_invalid_range(self):
+ # start of range must be < end of range
+ vals = np.linspace(0.0, 1.0, num=100)
+ with assert_raises_regex(ValueError, "max must be larger than"):
+ np.histogram(vals, range=[0.1, 0.01])
+
def test_bin_edge_cases(self):
# Ensure that floating-point computations correctly place edge cases.
arr = np.array([337, 404, 739, 806, 1007, 1811, 2012])
@@ -241,6 +271,13 @@ class TestHistogram(object):
hist, edges = np.histogram(arr, bins=30, range=(-0.5, 5))
assert_equal(hist[-1], 1)
+ def test_bin_array_dims(self):
+ # gracefully handle bins object > 1 dimension
+ vals = np.linspace(0.0, 1.0, num=100)
+ bins = np.array([[0, 0.5], [0.6, 1.0]])
+ with assert_raises_regex(ValueError, "must be 1d"):
+ np.histogram(vals, bins=bins)
+
def test_unsigned_monotonicity_check(self):
# Ensures ValueError is raised if bins not increasing monotonically
# when bins contain unsigned values (see #9222)
@@ -252,13 +289,13 @@ class TestHistogram(object):
def test_object_array_of_0d(self):
# gh-7864
assert_raises(ValueError,
- histogram, [np.array([0.4]) for i in range(10)] + [-np.inf])
+ histogram, [np.array(0.4) for i in range(10)] + [-np.inf])
assert_raises(ValueError,
- histogram, [np.array([0.4]) for i in range(10)] + [np.inf])
+ histogram, [np.array(0.4) for i in range(10)] + [np.inf])
# these should not crash
- np.histogram([np.array([0.5]) for i in range(10)] + [.500000000000001])
- np.histogram([np.array([0.5]) for i in range(10)] + [.5])
+ np.histogram([np.array(0.5) for i in range(10)] + [.500000000000001])
+ np.histogram([np.array(0.5) for i in range(10)] + [.5])
def test_some_nan_values(self):
# gh-7503
@@ -394,7 +431,7 @@ class TestHistogramOptimBinNums(object):
def test_empty(self):
estimator_list = ['fd', 'scott', 'rice', 'sturges',
- 'doane', 'sqrt', 'auto']
+ 'doane', 'sqrt', 'auto', 'stone']
# check it can deal with empty data
for estimator in estimator_list:
a, b = histogram([], bins=estimator)
@@ -410,11 +447,11 @@ class TestHistogramOptimBinNums(object):
# Some basic sanity checking, with some fixed data.
# Checking for the correct number of bins
basic_test = {50: {'fd': 4, 'scott': 4, 'rice': 8, 'sturges': 7,
- 'doane': 8, 'sqrt': 8, 'auto': 7},
+ 'doane': 8, 'sqrt': 8, 'auto': 7, 'stone': 2},
500: {'fd': 8, 'scott': 8, 'rice': 16, 'sturges': 10,
- 'doane': 12, 'sqrt': 23, 'auto': 10},
+ 'doane': 12, 'sqrt': 23, 'auto': 10, 'stone': 9},
5000: {'fd': 17, 'scott': 17, 'rice': 35, 'sturges': 14,
- 'doane': 17, 'sqrt': 71, 'auto': 17}}
+ 'doane': 17, 'sqrt': 71, 'auto': 17, 'stone': 20}}
for testlen, expectedResults in basic_test.items():
# Create some sort of non uniform data to test with
@@ -434,11 +471,11 @@ class TestHistogramOptimBinNums(object):
precalculated.
"""
small_dat = {1: {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
- 'doane': 1, 'sqrt': 1},
+ 'doane': 1, 'sqrt': 1, 'stone': 1},
2: {'fd': 2, 'scott': 1, 'rice': 3, 'sturges': 2,
- 'doane': 1, 'sqrt': 2},
+ 'doane': 1, 'sqrt': 2, 'stone': 1},
3: {'fd': 2, 'scott': 2, 'rice': 3, 'sturges': 3,
- 'doane': 3, 'sqrt': 2}}
+ 'doane': 3, 'sqrt': 2, 'stone': 1}}
for testlen, expectedResults in small_dat.items():
testdat = np.arange(testlen)
@@ -462,7 +499,7 @@ class TestHistogramOptimBinNums(object):
"""
novar_dataset = np.ones(100)
novar_resultdict = {'fd': 1, 'scott': 1, 'rice': 1, 'sturges': 1,
- 'doane': 1, 'sqrt': 1, 'auto': 1}
+ 'doane': 1, 'sqrt': 1, 'auto': 1, 'stone': 1}
for estimator, numbins in novar_resultdict.items():
a, b = np.histogram(novar_dataset, estimator)
@@ -501,12 +538,28 @@ class TestHistogramOptimBinNums(object):
xcenter = np.linspace(-10, 10, 50)
outlier_dataset = np.hstack((np.linspace(-110, -100, 5), xcenter))
- outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11}
+ outlier_resultdict = {'fd': 21, 'scott': 5, 'doane': 11, 'stone': 6}
for estimator, numbins in outlier_resultdict.items():
a, b = np.histogram(outlier_dataset, estimator)
assert_equal(len(a), numbins)
+ def test_scott_vs_stone(self):
+ """Verify that Scott's rule and Stone's rule converges for normally distributed data"""
+
+ def nbins_ratio(seed, size):
+ rng = np.random.RandomState(seed)
+ x = rng.normal(loc=0, scale=2, size=size)
+ a, b = len(np.histogram(x, 'stone')[0]), len(np.histogram(x, 'scott')[0])
+ return a / (a + b)
+
+ ll = [[nbins_ratio(seed, size) for size in np.geomspace(start=10, stop=100, num=4).round().astype(int)]
+ for seed in range(10)]
+
+ # the average difference between the two methods decreases as the dataset size increases.
+ avg = abs(np.mean(ll, axis=0) - 0.5)
+ assert_almost_equal(avg, [0.15, 0.09, 0.08, 0.03], decimal=2)
+
def test_simple_range(self):
"""
Straightforward testing with a mixture of linspace data (for
@@ -518,11 +571,11 @@ class TestHistogramOptimBinNums(object):
# Checking for the correct number of bins
basic_test = {
50: {'fd': 8, 'scott': 8, 'rice': 15,
- 'sturges': 14, 'auto': 14},
+ 'sturges': 14, 'auto': 14, 'stone': 8},
500: {'fd': 15, 'scott': 16, 'rice': 32,
- 'sturges': 20, 'auto': 20},
+ 'sturges': 20, 'auto': 20, 'stone': 80},
5000: {'fd': 33, 'scott': 33, 'rice': 69,
- 'sturges': 27, 'auto': 33}
+ 'sturges': 27, 'auto': 33, 'stone': 80}
}
for testlen, expectedResults in basic_test.items():
@@ -745,7 +798,7 @@ class TestHistogramdd(object):
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges))
assert_equal(hist, relative_areas)
- # resulting histogram should be uniform, since counts and areas are propotional
+ # resulting histogram should be uniform, since counts and areas are proportional
hist, edges = histogramdd((y, x), bins=(y_edges, x_edges), density=True)
assert_equal(hist, 1 / (8*8))
@@ -757,3 +810,20 @@ class TestHistogramdd(object):
hist_dd, edges_dd = histogramdd((v,), (bins,), density=True)
assert_equal(hist, hist_dd)
assert_equal(edges, edges_dd[0])
+
+ def test_density_via_normed(self):
+ # normed should simply alias to density argument
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ hist, edges = histogram(v, bins, density=True)
+ hist_dd, edges_dd = histogramdd((v,), (bins,), normed=True)
+ assert_equal(hist, hist_dd)
+ assert_equal(edges, edges_dd[0])
+
+ def test_density_normed_redundancy(self):
+ v = np.arange(10)
+ bins = np.array([0, 1, 3, 6, 10])
+ with assert_raises_regex(TypeError, "Cannot specify both"):
+ hist_dd, edges_dd = histogramdd((v,), (bins,),
+ density=True,
+ normed=True)
diff --git a/numpy/lib/tests/test_index_tricks.py b/numpy/lib/tests/test_index_tricks.py
index 33b98629d..dbe445c2c 100644
--- a/numpy/lib/tests/test_index_tricks.py
+++ b/numpy/lib/tests/test_index_tricks.py
@@ -5,7 +5,8 @@ import pytest
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises, assert_raises_regex
+ assert_array_almost_equal, assert_raises, assert_raises_regex,
+ assert_warns
)
from numpy.lib.index_tricks import (
mgrid, ogrid, ndenumerate, fill_diagonal, diag_indices, diag_indices_from,
@@ -16,6 +17,33 @@ from numpy.lib.index_tricks import (
class TestRavelUnravelIndex(object):
def test_basic(self):
assert_equal(np.unravel_index(2, (2, 2)), (1, 0))
+
+ # test backwards compatibility with older dims
+ # keyword argument; see Issue #10586
+ with assert_warns(DeprecationWarning):
+ # we should achieve the correct result
+ # AND raise the appropriate warning
+ # when using older "dims" kw argument
+ assert_equal(np.unravel_index(indices=2,
+ dims=(2, 2)),
+ (1, 0))
+
+ # test that new shape argument works properly
+ assert_equal(np.unravel_index(indices=2,
+ shape=(2, 2)),
+ (1, 0))
+
+ # test that an invalid second keyword argument
+ # is properly handled
+ with assert_raises(TypeError):
+ np.unravel_index(indices=2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(2, hape=(2, 2))
+
+ with assert_raises(TypeError):
+ np.unravel_index(254, ims=(17, 94))
+
assert_equal(np.ravel_multi_index((1, 0), (2, 2)), 2)
assert_equal(np.unravel_index(254, (17, 94)), (2, 66))
assert_equal(np.ravel_multi_index((2, 66), (17, 94)), 254)
@@ -49,6 +77,26 @@ class TestRavelUnravelIndex(object):
[[3, 6, 6], [4, 5, 1]])
assert_equal(np.unravel_index(1621, (6, 7, 8, 9)), [3, 1, 4, 1])
+ def test_empty_indices(self):
+ msg1 = 'indices must be integral: the provided empty sequence was'
+ msg2 = 'only int indices permitted'
+ assert_raises_regex(TypeError, msg1, np.unravel_index, [], (10, 3, 5))
+ assert_raises_regex(TypeError, msg1, np.unravel_index, (), (10, 3, 5))
+ assert_raises_regex(TypeError, msg2, np.unravel_index, np.array([]),
+ (10, 3, 5))
+ assert_equal(np.unravel_index(np.array([],dtype=int), (10, 3, 5)),
+ [[], [], []])
+ assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], []),
+ (10, 3))
+ assert_raises_regex(TypeError, msg1, np.ravel_multi_index, ([], ['abc']),
+ (10, 3))
+ assert_raises_regex(TypeError, msg2, np.ravel_multi_index,
+ (np.array([]), np.array([])), (5, 3))
+ assert_equal(np.ravel_multi_index(
+ (np.array([], dtype=int), np.array([], dtype=int)), (5, 3)), [])
+ assert_equal(np.ravel_multi_index(np.array([[], []], dtype=int),
+ (5, 3)), [])
+
def test_big_indices(self):
# ravel_multi_index for big indices (issue #7546)
if np.intp == np.int64:
@@ -58,6 +106,9 @@ class TestRavelUnravelIndex(object):
np.ravel_multi_index(arr, (41, 7, 120, 36, 2706, 8, 6)),
[5627771580, 117259570957])
+ # test unravel_index for big indices (issue #9538)
+ assert_raises(ValueError, np.unravel_index, 1, (2**32-1, 2**31+1))
+
# test overflow checking for too big array (issue #7546)
dummy_arr = ([0],[0])
half_max = np.iinfo(np.intp).max // 2
@@ -124,6 +175,24 @@ class TestRavelUnravelIndex(object):
assert_raises_regex(
ValueError, "out of bounds", np.unravel_index, [1], ())
+ @pytest.mark.parametrize("mode", ["clip", "wrap", "raise"])
+ def test_empty_array_ravel(self, mode):
+ res = np.ravel_multi_index(
+ np.zeros((3, 0), dtype=np.intp), (2, 1, 0), mode=mode)
+ assert(res.shape == (0,))
+
+ with assert_raises(ValueError):
+ np.ravel_multi_index(
+ np.zeros((3, 1), dtype=np.intp), (2, 1, 0), mode=mode)
+
+ def test_empty_array_unravel(self):
+ res = np.unravel_index(np.zeros(0, dtype=np.intp), (2, 1, 0))
+ # res is a tuple of three empty arrays
+ assert(len(res) == 3)
+ assert(all(a.shape == (0,) for a in res))
+
+ with assert_raises(ValueError):
+ np.unravel_index([1], (2, 1, 0))
class TestGrid(object):
def test_basic(self):
@@ -139,7 +208,7 @@ class TestGrid(object):
assert_almost_equal(a[1]-a[0], 2.0/9.0, 11)
def test_linspace_equivalence(self):
- y, st = np.linspace(2, 10, retstep=1)
+ y, st = np.linspace(2, 10, retstep=True)
assert_almost_equal(st, 8/49.0)
assert_array_almost_equal(y, mgrid[2:10:50j], 13)
@@ -198,6 +267,11 @@ class TestConcatenator(object):
g = r_[-10.1, np.array([1]), np.array([2, 3, 4]), 10.0]
assert_(g.dtype == 'f8')
+ def test_complex_step(self):
+ # Regression test for #12262
+ g = r_[0:36:100j]
+ assert_(g.shape == (100,))
+
def test_2d(self):
b = np.random.rand(5, 5)
c = np.random.rand(5, 5)
@@ -239,11 +313,16 @@ class TestIndexExpression(object):
class TestIx_(object):
def test_regression_1(self):
- # Test empty inputs create outputs of indexing type, gh-5804
- # Test both lists and arrays
- for func in (range, np.arange):
- a, = np.ix_(func(0))
- assert_equal(a.dtype, np.intp)
+ # Test empty untyped inputs create outputs of indexing type, gh-5804
+ a, = np.ix_(range(0))
+ assert_equal(a.dtype, np.intp)
+
+ a, = np.ix_([])
+ assert_equal(a.dtype, np.intp)
+
+ # but if the type is specified, don't change it
+ a, = np.ix_(np.array([], dtype=np.float32))
+ assert_equal(a.dtype, np.float32)
def test_shape_and_dtype(self):
sizes = (4, 5, 3, 2)
@@ -336,6 +415,19 @@ class TestFillDiagonal(object):
i = np.array([0, 1, 2])
assert_equal(np.where(a != 0), (i, i, i, i))
+ def test_low_dim_handling(self):
+ # raise error with low dimensionality
+ a = np.zeros(3, int)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ fill_diagonal(a, 5)
+
+ def test_hetero_shape_handling(self):
+ # raise error with high dimensionality and
+ # shape mismatch
+ a = np.zeros((3,3,7,3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ fill_diagonal(a, 2)
+
def test_diag_indices():
di = diag_indices(4)
@@ -365,11 +457,23 @@ def test_diag_indices():
)
-def test_diag_indices_from():
- x = np.random.random((4, 4))
- r, c = diag_indices_from(x)
- assert_array_equal(r, np.arange(4))
- assert_array_equal(c, np.arange(4))
+class TestDiagIndicesFrom(object):
+
+ def test_diag_indices_from(self):
+ x = np.random.random((4, 4))
+ r, c = diag_indices_from(x)
+ assert_array_equal(r, np.arange(4))
+ assert_array_equal(c, np.arange(4))
+
+ def test_error_small_input(self):
+ x = np.ones(7)
+ with assert_raises_regex(ValueError, "at least 2-d"):
+ diag_indices_from(x)
+
+ def test_error_shape_mismatch(self):
+ x = np.zeros((3, 3, 2, 3), int)
+ with assert_raises_regex(ValueError, "equal length"):
+ diag_indices_from(x)
def test_ndindex():
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 08800ff97..1181fe986 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -6,7 +6,6 @@ import os
import threading
import time
import warnings
-import gc
import io
import re
import pytest
@@ -18,12 +17,12 @@ import locale
import numpy as np
import numpy.ma as ma
from numpy.lib._iotools import ConverterError, ConversionWarning
-from numpy.compat import asbytes, bytes, unicode, Path
+from numpy.compat import asbytes, bytes, Path
from numpy.ma.testutils import assert_equal
from numpy.testing import (
assert_warns, assert_, assert_raises_regex, assert_raises,
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
- HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles,
+ HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
@@ -88,7 +87,7 @@ class RoundtripTest(object):
"""
save_kwds = kwargs.get('save_kwds', {})
- load_kwds = kwargs.get('load_kwds', {})
+ load_kwds = kwargs.get('load_kwds', {"allow_pickle": True})
file_on_disk = kwargs.get('file_on_disk', False)
if file_on_disk:
@@ -348,13 +347,33 @@ class TestSaveTxt(object):
assert_raises(ValueError, np.savetxt, c, np.array(1))
assert_raises(ValueError, np.savetxt, c, np.array([[[1], [2]]]))
- def test_record(self):
+ def test_structured(self):
a = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
c = BytesIO()
np.savetxt(c, a, fmt='%d')
c.seek(0)
assert_equal(c.readlines(), [b'1 2\n', b'3 4\n'])
+ def test_structured_padded(self):
+ # gh-13297
+ a = np.array([(1, 2, 3),(4, 5, 6)], dtype=[
+ ('foo', 'i4'), ('bar', 'i4'), ('baz', 'i4')
+ ])
+ c = BytesIO()
+ np.savetxt(c, a[['foo', 'baz']], fmt='%d')
+ c.seek(0)
+ assert_equal(c.readlines(), [b'1 3\n', b'4 6\n'])
+
+ @pytest.mark.skipif(Path is None, reason="No pathlib.Path")
+ def test_multifield_view(self):
+ a = np.ones(1, dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'f4')])
+ v = a[['x', 'z']]
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ np.save(path, v)
+ data = np.load(path)
+ assert_array_equal(data, v)
+
def test_delimiter(self):
a = np.array([[1., 2.], [3., 4.]])
c = BytesIO()
@@ -485,8 +504,6 @@ class TestSaveTxt(object):
b' (3.142e+00-2.718e+00j) (3.142e+00-2.718e+00j)\n'])
-
-
def test_custom_writer(self):
class CustomWriter(list):
@@ -542,6 +559,33 @@ class TestSaveTxt(object):
s.seek(0)
assert_equal(s.read(), utf8 + '\n')
+ @pytest.mark.parametrize("fmt", [u"%f", b"%f"])
+ @pytest.mark.parametrize("iotype", [StringIO, BytesIO])
+ def test_unicode_and_bytes_fmt(self, fmt, iotype):
+ # string type of fmt should not matter, see also gh-4053
+ a = np.array([1.])
+ s = iotype()
+ np.savetxt(s, a, fmt=fmt)
+ s.seek(0)
+ if iotype is StringIO:
+ assert_equal(s.read(), u"%f\n" % 1.)
+ else:
+ assert_equal(s.read(), b"%f\n" % 1.)
+
+ @pytest.mark.skipif(sys.platform=='win32',
+ reason="large files cause problems")
+ @pytest.mark.slow
+ def test_large_zip(self):
+ # The test takes at least 6GB of memory, writes a file larger than 4GB
+ try:
+ a = 'a' * 6 * 1024 * 1024 * 1024
+ del a
+ except (MemoryError, OverflowError):
+ pytest.skip("Cannot allocate enough memory for test")
+ test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
+ for i in range(800000)])
+ with tempdir() as tmpdir:
+ np.savez(os.path.join(tmpdir, 'test.npz'), test_data=test_data)
class LoadTxtBase(object):
def check_compressed(self, fopen, suffixes):
@@ -1183,7 +1227,7 @@ class TestFromTxt(LoadTxtBase):
def test_record(self):
# Test w/ explicit dtype
data = TextIO('1 2\n3 4')
- test = np.ndfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
+ test = np.genfromtxt(data, dtype=[('x', np.int32), ('y', np.int32)])
control = np.array([(1, 2), (3, 4)], dtype=[('x', 'i4'), ('y', 'i4')])
assert_equal(test, control)
#
@@ -1192,14 +1236,14 @@ class TestFromTxt(LoadTxtBase):
'formats': ('S1', 'i4', 'f4')}
control = np.array([('M', 64.0, 75.0), ('F', 25.0, 60.0)],
dtype=descriptor)
- test = np.ndfromtxt(data, dtype=descriptor)
+ test = np.genfromtxt(data, dtype=descriptor)
assert_equal(test, control)
def test_array(self):
# Test outputting a standard ndarray
data = TextIO('1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
- test = np.ndfromtxt(data, dtype=int)
+ test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data.seek(0)
@@ -1212,11 +1256,11 @@ class TestFromTxt(LoadTxtBase):
control = np.array([1, 2, 3, 4], int)
#
data = TextIO('1\n2\n3\n4\n')
- test = np.ndfromtxt(data, dtype=int)
+ test = np.genfromtxt(data, dtype=int)
assert_array_equal(test, control)
#
data = TextIO('1,2,3,4\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=',')
+ test = np.genfromtxt(data, dtype=int, delimiter=',')
assert_array_equal(test, control)
def test_comments(self):
@@ -1224,11 +1268,11 @@ class TestFromTxt(LoadTxtBase):
control = np.array([1, 2, 3, 5], int)
# Comment on its own line
data = TextIO('# comment\n1,2,3,5\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
+ test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
# Comment at the end of a line
data = TextIO('1,2,3,5# comment\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=',', comments='#')
+ test = np.genfromtxt(data, dtype=int, delimiter=',', comments='#')
assert_equal(test, control)
def test_skiprows(self):
@@ -1237,7 +1281,7 @@ class TestFromTxt(LoadTxtBase):
kwargs = dict(dtype=int, delimiter=',')
#
data = TextIO('comment\n1,2,3,5\n')
- test = np.ndfromtxt(data, skip_header=1, **kwargs)
+ test = np.genfromtxt(data, skip_header=1, **kwargs)
assert_equal(test, control)
#
data = TextIO('# comment\n1,2,3,5\n')
@@ -1284,7 +1328,7 @@ class TestFromTxt(LoadTxtBase):
data = TextIO('gender age weight\nM 64.0 75.0\nF 25.0 60.0')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
- test = np.ndfromtxt(data, dtype=None, names=True)
+ test = np.genfromtxt(data, dtype=None, names=True)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = {'gender': np.array([b'M', b'F']),
'age': np.array([64.0, 25.0]),
@@ -1298,7 +1342,7 @@ class TestFromTxt(LoadTxtBase):
data = TextIO('A 64 75.0 3+4j True\nBCD 25 60.0 5+6j False')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
- test = np.ndfromtxt(data, dtype=None)
+ test = np.genfromtxt(data, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = [np.array([b'A', b'BCD']),
np.array([64, 25]),
@@ -1312,7 +1356,7 @@ class TestFromTxt(LoadTxtBase):
def test_auto_dtype_uniform(self):
# Tests whether the output dtype can be uniformized
data = TextIO('1 2 3 4\n5 6 7 8\n')
- test = np.ndfromtxt(data, dtype=None)
+ test = np.genfromtxt(data, dtype=None)
control = np.array([[1, 2, 3, 4], [5, 6, 7, 8]])
assert_equal(test, control)
@@ -1320,7 +1364,7 @@ class TestFromTxt(LoadTxtBase):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
- test = np.ndfromtxt(data, dtype=fancydtype, delimiter=',')
+ test = np.genfromtxt(data, dtype=fancydtype, delimiter=',')
control = np.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
@@ -1330,7 +1374,7 @@ class TestFromTxt(LoadTxtBase):
'formats': ('S1', 'i4', 'f4')}
data = TextIO(b'M 64.0 75.0\nF 25.0 60.0')
names = ('gender', 'age', 'weight')
- test = np.ndfromtxt(data, dtype=descriptor, names=names)
+ test = np.genfromtxt(data, dtype=descriptor, names=names)
descriptor['names'] = names
control = np.array([('M', 64.0, 75.0),
('F', 25.0, 60.0)], dtype=descriptor)
@@ -1372,12 +1416,25 @@ M 33 21.99
control = np.array([(1, 2), (3, 4)], dtype=[('col1', int), ('col2', int)])
assert_equal(test, control)
+ def test_file_is_closed_on_error(self):
+ # gh-13200
+ with tempdir() as tmpdir:
+ fpath = os.path.join(tmpdir, "test.csv")
+ with open(fpath, "wb") as f:
+ f.write(u'\N{GREEK PI SYMBOL}'.encode('utf8'))
+
+ # ResourceWarnings are emitted from a destructor, so won't be
+ # detected by regular propagation to errors.
+ with assert_no_warnings():
+ with pytest.raises(UnicodeDecodeError):
+ np.genfromtxt(fpath, encoding="ascii")
+
def test_autonames_and_usecols(self):
# Tests names and usecols
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
- test = np.ndfromtxt(data, usecols=('A', 'C', 'D'),
+ test = np.genfromtxt(data, usecols=('A', 'C', 'D'),
names=True, dtype=None)
assert_(w[0].category is np.VisibleDeprecationWarning)
control = np.array(('aaaa', 45, 9.1),
@@ -1387,7 +1444,7 @@ M 33 21.99
def test_converters_with_usecols(self):
# Test the combination user-defined converters and usecol
data = TextIO('1,2,3,,5\n6,7,8,9,10\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=',',
+ test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)},
usecols=(1, 3,))
control = np.array([[2, -999], [7, 9]], int)
@@ -1398,7 +1455,7 @@ M 33 21.99
data = TextIO('A B C D\n aaaa 121 45 9.1')
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
- test = np.ndfromtxt(data, usecols=('A', 'C', 'D'), names=True,
+ test = np.genfromtxt(data, usecols=('A', 'C', 'D'), names=True,
dtype=None,
converters={'C': lambda s: 2 * int(s)})
assert_(w[0].category is np.VisibleDeprecationWarning)
@@ -1411,7 +1468,7 @@ M 33 21.99
converter = {
'date': lambda s: strptime(s, '%Y-%m-%d %H:%M:%SZ')}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
- test = np.ndfromtxt(data, delimiter=',', dtype=None,
+ test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', np.object_), ('stid', float)])
@@ -1422,7 +1479,7 @@ M 33 21.99
converter = {
'date': lambda s: np.datetime64(strptime(s, '%Y-%m-%d %H:%M:%SZ'))}
data = TextIO('2009-02-03 12:00:00Z, 72214.0')
- test = np.ndfromtxt(data, delimiter=',', dtype=None,
+ test = np.genfromtxt(data, delimiter=',', dtype=None,
names=['date', 'stid'], converters=converter)
control = np.array((datetime(2009, 2, 3), 72214.),
dtype=[('date', 'datetime64[us]'), ('stid', float)])
@@ -1431,12 +1488,12 @@ M 33 21.99
def test_unused_converter(self):
# Test whether unused converters are forgotten
data = TextIO("1 21\n 3 42\n")
- test = np.ndfromtxt(data, usecols=(1,),
+ test = np.genfromtxt(data, usecols=(1,),
converters={0: lambda s: int(s, 16)})
assert_equal(test, [21, 42])
#
data.seek(0)
- test = np.ndfromtxt(data, usecols=(1,),
+ test = np.genfromtxt(data, usecols=(1,),
converters={1: lambda s: int(s, 16)})
assert_equal(test, [33, 66])
@@ -1463,12 +1520,12 @@ M 33 21.99
def test_dtype_with_converters(self):
dstr = "2009; 23; 46"
- test = np.ndfromtxt(TextIO(dstr,),
+ test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: bytes})
control = np.array([('2009', 23., 46)],
dtype=[('f0', '|S4'), ('f1', float), ('f2', float)])
assert_equal(test, control)
- test = np.ndfromtxt(TextIO(dstr,),
+ test = np.genfromtxt(TextIO(dstr,),
delimiter=";", dtype=float, converters={0: float})
control = np.array([2009., 23., 46],)
assert_equal(test, control)
@@ -1508,6 +1565,13 @@ M 33 21.99
test = np.genfromtxt(TextIO(data), delimiter=";",
dtype=ndtype, converters=converters)
+ # nested but empty fields also aren't supported
+ ndtype = [('idx', int), ('code', object), ('nest', [])]
+ with assert_raises_regex(NotImplementedError,
+ 'Nested fields.* not supported.*'):
+ test = np.genfromtxt(TextIO(data), delimiter=";",
+ dtype=ndtype, converters=converters)
+
def test_userconverters_with_explicit_dtype(self):
# Test user_converters w/ explicit (standard) dtype
data = TextIO('skip,skip,2001-01-01,1.0,skip')
@@ -1532,7 +1596,7 @@ M 33 21.99
def test_spacedelimiter(self):
# Test space delimiter
data = TextIO("1 2 3 4 5\n6 7 8 9 10")
- test = np.ndfromtxt(data)
+ test = np.genfromtxt(data)
control = np.array([[1., 2., 3., 4., 5.],
[6., 7., 8., 9., 10.]])
assert_equal(test, control)
@@ -1546,7 +1610,7 @@ M 33 21.99
def test_missing(self):
data = TextIO('1,2,3,,5\n')
- test = np.ndfromtxt(data, dtype=int, delimiter=',',
+ test = np.genfromtxt(data, dtype=int, delimiter=',',
converters={3: lambda s: int(s or - 999)})
control = np.array([1, 2, 3, -999, 5], int)
assert_equal(test, control)
@@ -1568,18 +1632,18 @@ M 33 21.99
data = TextIO()
np.savetxt(data, control)
data.seek(0)
- test = np.ndfromtxt(data, dtype=float, usecols=(1,))
+ test = np.genfromtxt(data, dtype=float, usecols=(1,))
assert_equal(test, control[:, 1])
#
control = np.array([[1, 2, 3], [3, 4, 5]], float)
data = TextIO()
np.savetxt(data, control)
data.seek(0)
- test = np.ndfromtxt(data, dtype=float, usecols=(1, 2))
+ test = np.genfromtxt(data, dtype=float, usecols=(1, 2))
assert_equal(test, control[:, 1:])
# Testing with arrays instead of tuples.
data.seek(0)
- test = np.ndfromtxt(data, dtype=float, usecols=np.array([1, 2]))
+ test = np.genfromtxt(data, dtype=float, usecols=np.array([1, 2]))
assert_equal(test, control[:, 1:])
def test_usecols_as_css(self):
@@ -1595,7 +1659,7 @@ M 33 21.99
data = TextIO("JOE 70.1 25.3\nBOB 60.5 27.9")
names = ['stid', 'temp']
dtypes = ['S4', 'f8']
- test = np.ndfromtxt(
+ test = np.genfromtxt(
data, usecols=(0, 2), dtype=list(zip(names, dtypes)))
assert_equal(test['stid'], [b"JOE", b"BOB"])
assert_equal(test['temp'], [25.3, 27.9])
@@ -1624,11 +1688,15 @@ M 33 21.99
test = np.genfromtxt(data)
assert_equal(test, np.array([]))
+ # when skip_header > 0
+ test = np.genfromtxt(data, skip_header=1)
+ assert_equal(test, np.array([]))
+
def test_fancy_dtype_alt(self):
# Check that a nested dtype isn't MIA
data = TextIO('1,2,3.0\n4,5,6.0\n')
fancydtype = np.dtype([('x', int), ('y', [('t', int), ('s', float)])])
- test = np.mafromtxt(data, dtype=fancydtype, delimiter=',')
+ test = np.genfromtxt(data, dtype=fancydtype, delimiter=',', usemask=True)
control = ma.array([(1, (2, 3.0)), (4, (5, 6.0))], dtype=fancydtype)
assert_equal(test, control)
@@ -1636,7 +1704,7 @@ M 33 21.99
c = TextIO("aaaa 1.0 8.0 1 2 3 4 5 6")
dt = np.dtype([('name', 'S4'), ('x', float), ('y', float),
('block', int, (2, 3))])
- x = np.ndfromtxt(c, dtype=dt)
+ x = np.genfromtxt(c, dtype=dt)
a = np.array([('aaaa', 1.0, 8.0, [[1, 2, 3], [4, 5, 6]])],
dtype=dt)
assert_array_equal(x, a)
@@ -1644,7 +1712,7 @@ M 33 21.99
def test_withmissing(self):
data = TextIO('A,B\n0,1\n2,N/A')
kwargs = dict(delimiter=",", missing_values="N/A", names=True)
- test = np.mafromtxt(data, dtype=None, **kwargs)
+ test = np.genfromtxt(data, dtype=None, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', int)])
@@ -1652,7 +1720,7 @@ M 33 21.99
assert_equal(test.mask, control.mask)
#
data.seek(0)
- test = np.mafromtxt(data, **kwargs)
+ test = np.genfromtxt(data, usemask=True, **kwargs)
control = ma.array([(0, 1), (2, -1)],
mask=[(False, False), (False, True)],
dtype=[('A', float), ('B', float)])
@@ -1664,7 +1732,7 @@ M 33 21.99
basekwargs = dict(dtype=None, delimiter=",", names=True,)
mdtype = [('A', int), ('B', float), ('C', complex)]
#
- test = np.mafromtxt(TextIO(data), missing_values="N/A",
+ test = np.genfromtxt(TextIO(data), missing_values="N/A",
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
@@ -1673,16 +1741,17 @@ M 33 21.99
assert_equal(test, control)
#
basekwargs['dtype'] = mdtype
- test = np.mafromtxt(TextIO(data),
- missing_values={0: -9, 1: -99, 2: -999j}, **basekwargs)
+ test = np.genfromtxt(TextIO(data),
+ missing_values={0: -9, 1: -99, 2: -999j}, usemask=True, **basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
mask=[(0, 0, 0), (0, 1, 0), (1, 0, 1), (0, 1, 0)],
dtype=mdtype)
assert_equal(test, control)
#
- test = np.mafromtxt(TextIO(data),
+ test = np.genfromtxt(TextIO(data),
missing_values={0: -9, 'B': -99, 'C': -999j},
+ usemask=True,
**basekwargs)
control = ma.array([(0, 0.0, 0j), (1, -999, 1j),
(-9, 2.2, -999j), (3, -99, 3j)],
@@ -1720,8 +1789,8 @@ M 33 21.99
def test_withmissing_float(self):
data = TextIO('A,B\n0,1.5\n2,-999.00')
- test = np.mafromtxt(data, dtype=None, delimiter=',',
- missing_values='-999.0', names=True,)
+ test = np.genfromtxt(data, dtype=None, delimiter=',',
+ missing_values='-999.0', names=True, usemask=True)
control = ma.array([(0, 1.5), (2, -1.)],
mask=[(False, False), (False, True)],
dtype=[('A', int), ('B', float)])
@@ -1760,14 +1829,14 @@ M 33 21.99
ret = {}
def f(_ret={}):
- _ret['mtest'] = np.ndfromtxt(mdata, invalid_raise=False, **kwargs)
+ _ret['mtest'] = np.genfromtxt(mdata, invalid_raise=False, **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'abcde']))
#
mdata.seek(0)
- assert_raises(ValueError, np.ndfromtxt, mdata,
+ assert_raises(ValueError, np.genfromtxt, mdata,
delimiter=",", names=True)
def test_invalid_raise_with_usecols(self):
@@ -1784,14 +1853,14 @@ M 33 21.99
ret = {}
def f(_ret={}):
- _ret['mtest'] = np.ndfromtxt(mdata, usecols=(0, 4), **kwargs)
+ _ret['mtest'] = np.genfromtxt(mdata, usecols=(0, 4), **kwargs)
assert_warns(ConversionWarning, f, _ret=ret)
mtest = ret['mtest']
assert_equal(len(mtest), 45)
assert_equal(mtest, np.ones(45, dtype=[(_, int) for _ in 'ae']))
#
mdata.seek(0)
- mtest = np.ndfromtxt(mdata, usecols=(0, 1), **kwargs)
+ mtest = np.genfromtxt(mdata, usecols=(0, 1), **kwargs)
assert_equal(len(mtest), 50)
control = np.ones(50, dtype=[(_, int) for _ in 'ab'])
control[[10 * _ for _ in range(5)]] = (2, 2)
@@ -1802,7 +1871,7 @@ M 33 21.99
data = ["1, 1, 1, 1, -1.1"] * 50
mdata = TextIO("\n".join(data))
- converters = {4: lambda x: "(%s)" % x}
+ converters = {4: lambda x: "(%s)" % x.decode()}
kwargs = dict(delimiter=",", converters=converters,
dtype=[(_, int) for _ in 'abcde'],)
assert_raises(ValueError, np.genfromtxt, mdata, **kwargs)
@@ -1810,7 +1879,7 @@ M 33 21.99
def test_default_field_format(self):
# Test default format
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(TextIO(data),
+ mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=None, defaultfmt="f%02i")
ctrl = np.array([(0, 1, 2.3), (4, 5, 6.7)],
dtype=[("f00", int), ("f01", int), ("f02", float)])
@@ -1819,7 +1888,7 @@ M 33 21.99
def test_single_dtype_wo_names(self):
# Test single dtype w/o names
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(TextIO(data),
+ mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, defaultfmt="f%02i")
ctrl = np.array([[0., 1., 2.3], [4., 5., 6.7]], dtype=float)
assert_equal(mtest, ctrl)
@@ -1827,7 +1896,7 @@ M 33 21.99
def test_single_dtype_w_explicit_names(self):
# Test single dtype w explicit names
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(TextIO(data),
+ mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names="a, b, c")
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
@@ -1836,7 +1905,7 @@ M 33 21.99
def test_single_dtype_w_implicit_names(self):
# Test single dtype w implicit names
data = "a, b, c\n0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(TextIO(data),
+ mtest = np.genfromtxt(TextIO(data),
delimiter=",", dtype=float, names=True)
ctrl = np.array([(0., 1., 2.3), (4., 5., 6.7)],
dtype=[(_, float) for _ in "abc"])
@@ -1845,7 +1914,7 @@ M 33 21.99
def test_easy_structured_dtype(self):
# Test easy structured dtype
data = "0, 1, 2.3\n4, 5, 6.7"
- mtest = np.ndfromtxt(TextIO(data), delimiter=",",
+ mtest = np.genfromtxt(TextIO(data), delimiter=",",
dtype=(int, float, float), defaultfmt="f_%02i")
ctrl = np.array([(0, 1., 2.3), (4, 5., 6.7)],
dtype=[("f_00", int), ("f_01", float), ("f_02", float)])
@@ -1857,14 +1926,14 @@ M 33 21.99
kwargs = dict(delimiter=",", dtype=None)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
- mtest = np.ndfromtxt(TextIO(data), **kwargs)
+ mtest = np.genfromtxt(TextIO(data), **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003 ', 1.3, ' abcde')],
dtype=[('f0', '|S12'), ('f1', float), ('f2', '|S8')])
assert_equal(mtest, ctrl)
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', np.VisibleDeprecationWarning)
- mtest = np.ndfromtxt(TextIO(data), autostrip=True, **kwargs)
+ mtest = np.genfromtxt(TextIO(data), autostrip=True, **kwargs)
assert_(w[0].category is np.VisibleDeprecationWarning)
ctrl = np.array([('01/01/2003', 1.3, 'abcde')],
dtype=[('f0', '|S10'), ('f1', float), ('f2', '|S5')])
@@ -1925,12 +1994,12 @@ M 33 21.99
# w/ dtype=None
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, int) for _ in ('A', 'f0', 'C')])
- test = np.ndfromtxt(TextIO(data), dtype=None, **kwargs)
+ test = np.genfromtxt(TextIO(data), dtype=None, **kwargs)
assert_equal(test, ctrl)
# w/ default dtype
ctrl = np.array([(0, 1, 2), (3, 4, 5)],
dtype=[(_, float) for _ in ('A', 'f0', 'C')])
- test = np.ndfromtxt(TextIO(data), **kwargs)
+ test = np.genfromtxt(TextIO(data), **kwargs)
def test_names_auto_completion(self):
# Make sure that names are properly completed
@@ -1966,13 +2035,13 @@ M 33 21.99
kwargs = dict(delimiter=(5, 5, 4), names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
- test = np.ndfromtxt(TextIO(data), **kwargs)
+ test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
#
kwargs = dict(delimiter=5, names=True, dtype=None)
ctrl = np.array([(0, 1, 2.3), (45, 67, 9.)],
dtype=[('A', int), ('B', int), ('C', float)])
- test = np.ndfromtxt(TextIO(data), **kwargs)
+ test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_filling_values(self):
@@ -1980,7 +2049,7 @@ M 33 21.99
data = b"1, 2, 3\n1, , 5\n0, 6, \n"
kwargs = dict(delimiter=",", dtype=None, filling_values=-999)
ctrl = np.array([[1, 2, 3], [1, -999, 5], [0, 6, -999]], dtype=int)
- test = np.ndfromtxt(TextIO(data), **kwargs)
+ test = np.genfromtxt(TextIO(data), **kwargs)
assert_equal(test, ctrl)
def test_comments_is_none(self):
@@ -2049,7 +2118,6 @@ M 33 21.99
def test_utf8_file(self):
utf8 = b"\xcf\x96"
- latin1 = b"\xf6\xfc\xf6"
with temppath() as path:
with open(path, "wb") as f:
f.write((b"test1,testNonethe" + utf8 + b",test3\n") * 2)
@@ -2270,7 +2338,7 @@ M 33 21.99
data = TextIO('73786976294838206464 17179869184 1024')
- test = np.ndfromtxt(data, dtype=None)
+ test = np.genfromtxt(data, dtype=None)
assert_equal(test.dtype.names, ['f0', 'f1', 'f2'])
@@ -2295,7 +2363,7 @@ class TestPathUsage(object):
assert_array_equal(x, a)
def test_save_load(self):
- # Test that pathlib.Path instances can be used with savez.
+ # Test that pathlib.Path instances can be used with save.
with temppath(suffix='.npy') as path:
path = Path(path)
a = np.array([[1, 2], [3, 4]], int)
@@ -2303,6 +2371,30 @@ class TestPathUsage(object):
data = np.load(path)
assert_array_equal(data, a)
+ def test_save_load_memmap(self):
+ # Test that pathlib.Path instances can be loaded mem-mapped.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ data = np.load(path, mmap_mode='r')
+ assert_array_equal(data, a)
+ # close the mem-mapped file
+ del data
+
+ def test_save_load_memmap_readwrite(self):
+ # Test that pathlib.Path instances can be written mem-mapped.
+ with temppath(suffix='.npy') as path:
+ path = Path(path)
+ a = np.array([[1, 2], [3, 4]], int)
+ np.save(path, a)
+ b = np.load(path, mmap_mode='r+')
+ a[0][0] = 5
+ b[0][0] = 5
+ del b # closes the file
+ data = np.load(path)
+ assert_array_equal(data, a)
+
def test_savez_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
@@ -2310,7 +2402,7 @@ class TestPathUsage(object):
np.savez(path, lab='place holder')
with np.load(path) as data:
assert_array_equal(data['lab'], 'place holder')
-
+
def test_savez_compressed_load(self):
# Test that pathlib.Path instances can be used with savez.
with temppath(suffix='.npz') as path:
@@ -2336,7 +2428,7 @@ class TestPathUsage(object):
f.write(u'1 2\n3 4')
control = np.array([[1, 2], [3, 4]], dtype=int)
- test = np.ndfromtxt(path, dtype=int)
+ test = np.genfromtxt(path, dtype=int)
assert_array_equal(test, control)
def test_mafromtxt(self):
@@ -2346,7 +2438,7 @@ class TestPathUsage(object):
with path.open('w') as f:
f.write(u'1,2,3.0\n4,5,6.0\n')
- test = np.mafromtxt(path, delimiter=',')
+ test = np.genfromtxt(path, delimiter=',', usemask=True)
control = ma.array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)])
assert_equal(test, control)
@@ -2391,6 +2483,44 @@ def test_gzip_load():
assert_array_equal(np.load(f), a)
+# These next two classes encode the minimal API needed to save()/load() arrays.
+# The `test_ducktyping` ensures they work correctly
+class JustWriter(object):
+ def __init__(self, base):
+ self.base = base
+
+ def write(self, s):
+ return self.base.write(s)
+
+ def flush(self):
+ return self.base.flush()
+
+class JustReader(object):
+ def __init__(self, base):
+ self.base = base
+
+ def read(self, n):
+ return self.base.read(n)
+
+ def seek(self, off, whence=0):
+ return self.base.seek(off, whence)
+
+
+def test_ducktyping():
+ a = np.random.random((5, 5))
+
+ s = BytesIO()
+ f = JustWriter(s)
+
+ np.save(f, a)
+ f.flush()
+ s.seek(0)
+
+ f = JustReader(s)
+ assert_array_equal(np.load(f), a)
+
+
+
def test_gzip_loadtxt():
# Thanks to another windows brokenness, we can't use
# NamedTemporaryFile: a file created from this function cannot be
diff --git a/numpy/lib/tests/test_mixins.py b/numpy/lib/tests/test_mixins.py
index f2d915502..3dd5346b6 100644
--- a/numpy/lib/tests/test_mixins.py
+++ b/numpy/lib/tests/test_mixins.py
@@ -199,6 +199,17 @@ class TestNDArrayOperatorsMixin(object):
err_msg = 'failed for operator {}'.format(op)
_assert_equal_type_and_value(expected, actual, err_msg=err_msg)
+ def test_matmul(self):
+ array = np.array([1, 2], dtype=np.float64)
+ array_like = ArrayLike(array)
+ expected = ArrayLike(np.float64(5))
+ _assert_equal_type_and_value(expected, np.matmul(array_like, array))
+ if not PY2:
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array_like, array))
+ _assert_equal_type_and_value(
+ expected, operator.matmul(array, array_like))
+
def test_ufunc_at(self):
array = ArrayLike(np.array([1, 2, 3, 4]))
assert_(np.negative.at(array, np.array([0, 1])) is None)
diff --git a/numpy/lib/tests/test_nanfunctions.py b/numpy/lib/tests/test_nanfunctions.py
index 504372faf..b7261c63f 100644
--- a/numpy/lib/tests/test_nanfunctions.py
+++ b/numpy/lib/tests/test_nanfunctions.py
@@ -1,8 +1,10 @@
from __future__ import division, absolute_import, print_function
import warnings
+import pytest
import numpy as np
+from numpy.lib.nanfunctions import _nan_mask
from numpy.testing import (
assert_, assert_equal, assert_almost_equal, assert_no_warnings,
assert_raises, assert_array_equal, suppress_warnings
@@ -925,3 +927,29 @@ class TestNanFunctions_Quantile(object):
p = p.tolist()
np.nanquantile(np.arange(100.), p, interpolation="midpoint")
assert_array_equal(p, p0)
+
+@pytest.mark.parametrize("arr, expected", [
+ # array of floats with some nans
+ (np.array([np.nan, 5.0, np.nan, np.inf]),
+ np.array([False, True, False, True])),
+ # int64 array that can't possibly have nans
+ (np.array([1, 5, 7, 9], dtype=np.int64),
+ True),
+ # bool array that can't possibly have nans
+ (np.array([False, True, False, True]),
+ True),
+ # 2-D complex array with nans
+ (np.array([[np.nan, 5.0],
+ [np.nan, np.inf]], dtype=np.complex64),
+ np.array([[False, True],
+ [False, True]])),
+ ])
+def test__nan_mask(arr, expected):
+ for out in [None, np.empty(arr.shape, dtype=np.bool_)]:
+ actual = _nan_mask(arr, out=out)
+ assert_equal(actual, expected)
+ # the above won't distinguish between True proper
+ # and an array of True values; we want True proper
+ # for types that can't possibly contain NaN
+ if type(expected) is not np.ndarray:
+ assert actual is True
diff --git a/numpy/lib/tests/test_packbits.py b/numpy/lib/tests/test_packbits.py
index fde5c37f2..95a465c36 100644
--- a/numpy/lib/tests/test_packbits.py
+++ b/numpy/lib/tests/test_packbits.py
@@ -2,7 +2,8 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import assert_array_equal, assert_equal, assert_raises
-
+import pytest
+from itertools import chain
def test_packbits():
# Copied from the docstring.
@@ -50,8 +51,8 @@ def test_packbits_empty_with_axis():
assert_equal(b.dtype, np.uint8)
assert_equal(b.shape, out_shape)
-
-def test_packbits_large():
+@pytest.mark.parametrize('bitorder', ('little', 'big'))
+def test_packbits_large(bitorder):
# test data large enough for 16 byte vectorization
a = np.array([1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0,
0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1,
@@ -71,7 +72,7 @@ def test_packbits_large():
a = a.repeat(3)
for dtype in '?bBhHiIlLqQ':
arr = np.array(a, dtype=dtype)
- b = np.packbits(arr, axis=None)
+ b = np.packbits(arr, axis=None, bitorder=bitorder)
assert_equal(b.dtype, np.uint8)
r = [252, 127, 192, 3, 254, 7, 252, 0, 7, 31, 240, 0, 28, 1, 255, 252,
113, 248, 3, 255, 192, 28, 15, 192, 28, 126, 0, 224, 127, 255,
@@ -81,9 +82,10 @@ def test_packbits_large():
255, 224, 1, 255, 252, 126, 63, 0, 1, 192, 252, 14, 63, 0, 15,
199, 252, 113, 255, 3, 128, 56, 252, 14, 7, 0, 113, 255, 255, 142, 56, 227,
129, 248, 227, 129, 199, 31, 128]
- assert_array_equal(b, r)
+ if bitorder == 'big':
+ assert_array_equal(b, r)
# equal for size being multiple of 8
- assert_array_equal(np.unpackbits(b)[:-4], a)
+ assert_array_equal(np.unpackbits(b, bitorder=bitorder)[:-4], a)
# check last byte of different remainders (16 byte vectorization)
b = [np.packbits(arr[:-i], axis=None)[-1] for i in range(1, 16)]
@@ -229,6 +231,20 @@ def test_unpackbits():
[0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 0, 1, 1, 1]]))
+def test_pack_unpack_order():
+ a = np.array([[2], [7], [23]], dtype=np.uint8)
+ b = np.unpackbits(a, axis=1)
+ assert_equal(b.dtype, np.uint8)
+ b_little = np.unpackbits(a, axis=1, bitorder='little')
+ b_big = np.unpackbits(a, axis=1, bitorder='big')
+ assert_array_equal(b, b_big)
+ assert_array_equal(a, np.packbits(b_little, axis=1, bitorder='little'))
+ assert_array_equal(b[:,::-1], b_little)
+ assert_array_equal(a, np.packbits(b_big, axis=1, bitorder='big'))
+ assert_raises(ValueError, np.unpackbits, a, bitorder='r')
+ assert_raises(TypeError, np.unpackbits, a, bitorder=10)
+
+
def test_unpackbits_empty():
a = np.empty((0,), dtype=np.uint8)
@@ -266,3 +282,97 @@ def test_unpackbits_large():
assert_array_equal(np.packbits(np.unpackbits(d, axis=1), axis=1), d)
d = d.T.copy()
assert_array_equal(np.packbits(np.unpackbits(d, axis=0), axis=0), d)
+
+
+class TestCount():
+ x = np.array([
+ [1, 0, 1, 0, 0, 1, 0],
+ [0, 1, 1, 1, 0, 0, 0],
+ [0, 0, 1, 0, 0, 1, 1],
+ [1, 1, 0, 0, 0, 1, 1],
+ [1, 0, 1, 0, 1, 0, 1],
+ [0, 0, 1, 1, 1, 0, 0],
+ [0, 1, 0, 1, 0, 1, 0],
+ ], dtype=np.uint8)
+ padded1 = np.zeros(57, dtype=np.uint8)
+ padded1[:49] = x.ravel()
+ padded1b = np.zeros(57, dtype=np.uint8)
+ padded1b[:49] = x[::-1].copy().ravel()
+ padded2 = np.zeros((9, 9), dtype=np.uint8)
+ padded2[:7, :7] = x
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ @pytest.mark.parametrize('count', chain(range(58), range(-1, -57, -1)))
+ def test_roundtrip(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ # test complete invertibility of packbits and unpackbits with count
+ packed = np.packbits(self.x, bitorder=bitorder)
+ unpacked = np.unpackbits(packed, count=count, bitorder=bitorder)
+ assert_equal(unpacked.dtype, np.uint8)
+ assert_array_equal(unpacked, self.padded1[:cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ ])
+ def test_count(self, kwargs):
+ packed = np.packbits(self.x)
+ unpacked = np.unpackbits(packed, **kwargs)
+ assert_equal(unpacked.dtype, np.uint8)
+ assert_array_equal(unpacked, self.padded1[:-1])
+
+ @pytest.mark.parametrize('bitorder', ('little', 'big'))
+ # delta==-1 when count<0 because one extra zero of padding
+ @pytest.mark.parametrize('count', chain(range(8), range(-1, -9, -1)))
+ def test_roundtrip_axis(self, bitorder, count):
+ if count < 0:
+ # one extra zero of padding
+ cutoff = count - 1
+ else:
+ cutoff = count
+ packed0 = np.packbits(self.x, axis=0, bitorder=bitorder)
+ unpacked0 = np.unpackbits(packed0, axis=0, count=count,
+ bitorder=bitorder)
+ assert_equal(unpacked0.dtype, np.uint8)
+ assert_array_equal(unpacked0, self.padded2[:cutoff, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1, bitorder=bitorder)
+ unpacked1 = np.unpackbits(packed1, axis=1, count=count,
+ bitorder=bitorder)
+ assert_equal(unpacked1.dtype, np.uint8)
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :cutoff])
+
+ @pytest.mark.parametrize('kwargs', [
+ {}, {'count': None},
+ {'bitorder' : 'little'},
+ {'bitorder': 'little', 'count': None},
+ {'bitorder' : 'big'},
+ {'bitorder': 'big', 'count': None},
+ ])
+ def test_axis_count(self, kwargs):
+ packed0 = np.packbits(self.x, axis=0)
+ unpacked0 = np.unpackbits(packed0, axis=0, **kwargs)
+ assert_equal(unpacked0.dtype, np.uint8)
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked0, self.padded2[:-1, :self.x.shape[1]])
+ else:
+ assert_array_equal(unpacked0[::-1, :], self.padded2[:-1, :self.x.shape[1]])
+
+ packed1 = np.packbits(self.x, axis=1)
+ unpacked1 = np.unpackbits(packed1, axis=1, **kwargs)
+ assert_equal(unpacked1.dtype, np.uint8)
+ if kwargs.get('bitorder', 'big') == 'big':
+ assert_array_equal(unpacked1, self.padded2[:self.x.shape[0], :-1])
+ else:
+ assert_array_equal(unpacked1[:, ::-1], self.padded2[:self.x.shape[0], :-1])
+
+ def test_bad_count(self):
+ packed0 = np.packbits(self.x, axis=0)
+ assert_raises(ValueError, np.unpackbits, packed0, axis=0, count=-9)
+ packed1 = np.packbits(self.x, axis=1)
+ assert_raises(ValueError, np.unpackbits, packed1, axis=1, count=-9)
+ packed = np.packbits(self.x)
+ assert_raises(ValueError, np.unpackbits, packed, count=-57)
diff --git a/numpy/lib/tests/test_polynomial.py b/numpy/lib/tests/test_polynomial.py
index 9f7c117a2..89759bd83 100644
--- a/numpy/lib/tests/test_polynomial.py
+++ b/numpy/lib/tests/test_polynomial.py
@@ -3,7 +3,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_array_equal, assert_almost_equal,
- assert_array_almost_equal, assert_raises
+ assert_array_almost_equal, assert_raises, assert_allclose
)
@@ -122,27 +122,34 @@ class TestPolynomial(object):
weights = np.arange(8, 1, -1)**2/7.0
# Check exception when too few points for variance estimate. Note that
- # the Bayesian estimate requires the number of data points to exceed
- # degree + 3.
+ # the estimate requires the number of data points to exceed
+ # degree + 1
assert_raises(ValueError, np.polyfit,
- [0, 1, 3], [0, 1, 3], deg=0, cov=True)
+ [1], [1], deg=0, cov=True)
# check 1D case
m, cov = np.polyfit(x, y+err, 2, cov=True)
est = [3.8571, 0.2857, 1.619]
assert_almost_equal(est, m, decimal=4)
- val0 = [[2.9388, -5.8776, 1.6327],
- [-5.8776, 12.7347, -4.2449],
- [1.6327, -4.2449, 2.3220]]
+ val0 = [[ 1.4694, -2.9388, 0.8163],
+ [-2.9388, 6.3673, -2.1224],
+ [ 0.8163, -2.1224, 1.161 ]]
assert_almost_equal(val0, cov, decimal=4)
m2, cov2 = np.polyfit(x, y+err, 2, w=weights, cov=True)
assert_almost_equal([4.8927, -1.0177, 1.7768], m2, decimal=4)
- val = [[8.7929, -10.0103, 0.9756],
- [-10.0103, 13.6134, -1.8178],
- [0.9756, -1.8178, 0.6674]]
+ val = [[ 4.3964, -5.0052, 0.4878],
+ [-5.0052, 6.8067, -0.9089],
+ [ 0.4878, -0.9089, 0.3337]]
assert_almost_equal(val, cov2, decimal=4)
+ m3, cov3 = np.polyfit(x, y+err, 2, w=weights, cov="unscaled")
+ assert_almost_equal([4.8927, -1.0177, 1.7768], m3, decimal=4)
+ val = [[ 0.1473, -0.1677, 0.0163],
+ [-0.1677, 0.228 , -0.0304],
+ [ 0.0163, -0.0304, 0.0112]]
+ assert_almost_equal(val, cov3, decimal=4)
+
# check 2D (n,1) case
y = y[:, np.newaxis]
c = c[:, np.newaxis]
@@ -158,6 +165,29 @@ class TestPolynomial(object):
assert_almost_equal(val0, cov[:, :, 0], decimal=4)
assert_almost_equal(val0, cov[:, :, 1], decimal=4)
+ # check order 1 (deg=0) case, were the analytic results are simple
+ np.random.seed(123)
+ y = np.random.normal(size=(4, 10000))
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, deg=0, cov=True)
+ # Should get sigma_mean = sigma/sqrt(N) = 1./sqrt(4) = 0.5.
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # Without scaling, since reduced chi2 is 1, the result should be the same.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=np.ones(y.shape[0]),
+ deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.5)
+ # If we estimate our errors wrong, no change with scaling:
+ w = np.full(y.shape[0], 1./0.5)
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov=True)
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_allclose(np.sqrt(cov.mean()), 0.5, atol=0.01)
+ # But if we do not scale, our estimate for the error in the mean will
+ # differ.
+ mean, cov = np.polyfit(np.zeros(y.shape[0]), y, w=w, deg=0, cov="unscaled")
+ assert_allclose(mean.std(), 0.5, atol=0.01)
+ assert_almost_equal(np.sqrt(cov.mean()), 0.25)
+
def test_objects(self):
from decimal import Decimal
p = np.poly1d([Decimal('4.0'), Decimal('3.0'), Decimal('2.0')])
@@ -216,16 +246,16 @@ class TestPolynomial(object):
assert_equal(r.coeffs.dtype, np.complex128)
assert_equal(q*a + r, b)
- def test_poly_coeffs_immutable(self):
- """ Coefficients should not be modifiable """
+ def test_poly_coeffs_mutable(self):
+ """ Coefficients should be modifiable """
p = np.poly1d([1, 2, 3])
- try:
- # despite throwing an exception, this used to change state
- p.coeffs += 1
- except Exception:
- pass
- assert_equal(p.coeffs, [1, 2, 3])
+ p.coeffs += 1
+ assert_equal(p.coeffs, [2, 3, 4])
p.coeffs[2] += 10
- assert_equal(p.coeffs, [1, 2, 3])
+ assert_equal(p.coeffs, [2, 3, 14])
+
+ # this never used to be allowed - let's not add features to deprecated
+ # APIs
+ assert_raises(AttributeError, setattr, p, 'coeffs', np.array(1))
diff --git a/numpy/lib/tests/test_recfunctions.py b/numpy/lib/tests/test_recfunctions.py
index 5585a95f9..fa5f4dec2 100644
--- a/numpy/lib/tests/test_recfunctions.py
+++ b/numpy/lib/tests/test_recfunctions.py
@@ -10,10 +10,13 @@ from numpy.testing import assert_, assert_raises
from numpy.lib.recfunctions import (
drop_fields, rename_fields, get_fieldstructure, recursive_fill_fields,
find_duplicates, merge_arrays, append_fields, stack_arrays, join_by,
- repack_fields)
+ repack_fields, unstructured_to_structured, structured_to_unstructured,
+ apply_along_fields, require_fields, assign_fields_by_name)
+get_fieldspec = np.lib.recfunctions._get_fieldspec
get_names = np.lib.recfunctions.get_names
get_names_flat = np.lib.recfunctions.get_names_flat
-zip_descr = np.lib.recfunctions.zip_descr
+zip_descr = np.lib.recfunctions._zip_descr
+zip_dtype = np.lib.recfunctions._zip_dtype
class TestRecFunctions(object):
@@ -88,8 +91,10 @@ class TestRecFunctions(object):
control = np.array([(1,), (4,)], dtype=[('a', int)])
assert_equal(test, control)
+ # dropping all fields results in an array with no fields
test = drop_fields(a, ['a', 'b'])
- assert_(test is None)
+ control = np.array([(), ()], dtype=[])
+ assert_equal(test, control)
def test_rename_fields(self):
# Test rename fields
@@ -112,6 +117,14 @@ class TestRecFunctions(object):
test = get_names(ndtype)
assert_equal(test, ('a', ('b', ('ba', 'bb'))))
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names(ndtype)
+ assert_equal(test, ('a', ('b', ())))
+
+ ndtype = np.dtype([])
+ test = get_names(ndtype)
+ assert_equal(test, ())
+
def test_get_names_flat(self):
# Test get_names_flat
ndtype = np.dtype([('A', '|S3'), ('B', float)])
@@ -122,6 +135,14 @@ class TestRecFunctions(object):
test = get_names_flat(ndtype)
assert_equal(test, ('a', 'b', 'ba', 'bb'))
+ ndtype = np.dtype([('a', int), ('b', [])])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ('a', 'b'))
+
+ ndtype = np.dtype([])
+ test = get_names_flat(ndtype)
+ assert_equal(test, ())
+
def test_get_fieldstructure(self):
# Test get_fieldstructure
@@ -144,6 +165,11 @@ class TestRecFunctions(object):
'BBA': ['B', 'BB'], 'BBB': ['B', 'BB']}
assert_equal(test, control)
+ # 0 fields
+ ndtype = np.dtype([])
+ test = get_fieldstructure(ndtype)
+ assert_equal(test, {})
+
def test_find_duplicates(self):
# Test find_duplicates
a = ma.array([(2, (2., 'B')), (1, (2., 'B')), (2, (2., 'B')),
@@ -204,6 +230,123 @@ class TestRecFunctions(object):
dt = np.dtype((np.record, dt))
assert_(repack_fields(dt).type is np.record)
+ def test_structured_to_unstructured(self):
+ a = np.zeros(4, dtype=[('a', 'i4'), ('b', 'f4,u2'), ('c', 'f4', 2)])
+ out = structured_to_unstructured(a)
+ assert_equal(out, np.zeros((4,5), dtype='f8'))
+
+ b = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ out = np.mean(structured_to_unstructured(b[['x', 'z']]), axis=-1)
+ assert_equal(out, np.array([ 3. , 5.5, 9. , 11. ]))
+ out = np.mean(structured_to_unstructured(b[['x']]), axis=-1)
+ assert_equal(out, np.array([ 1. , 4. , 7. , 10. ]))
+
+ c = np.arange(20).reshape((4,5))
+ out = unstructured_to_structured(c, a.dtype)
+ want = np.array([( 0, ( 1., 2), [ 3., 4.]),
+ ( 5, ( 6., 7), [ 8., 9.]),
+ (10, (11., 12), [13., 14.]),
+ (15, (16., 17), [18., 19.])],
+ dtype=[('a', 'i4'),
+ ('b', [('f0', 'f4'), ('f1', 'u2')]),
+ ('c', 'f4', (2,))])
+ assert_equal(out, want)
+
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'f4'), ('z', 'f8')])
+ assert_equal(apply_along_fields(np.mean, d),
+ np.array([ 8.0/3, 16.0/3, 26.0/3, 11. ]))
+ assert_equal(apply_along_fields(np.mean, d[['x', 'z']]),
+ np.array([ 3. , 5.5, 9. , 11. ]))
+
+ # check that for uniform field dtypes we get a view, not a copy:
+ d = np.array([(1, 2, 5), (4, 5, 7), (7, 8 ,11), (10, 11, 12)],
+ dtype=[('x', 'i4'), ('y', 'i4'), ('z', 'i4')])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
+ # including uniform fields with subarrays unpacked
+ d = np.array([(1, [2, 3], [[ 4, 5], [ 6, 7]]),
+ (8, [9, 10], [[11, 12], [13, 14]])],
+ dtype=[('x0', 'i4'), ('x1', ('i4', 2)),
+ ('x2', ('i4', (2, 2)))])
+ dd = structured_to_unstructured(d)
+ ddd = unstructured_to_structured(dd, d.dtype)
+ assert_(dd.base is d)
+ assert_(ddd.base is d)
+
+ # test that nested fields with identical names don't break anything
+ point = np.dtype([('x', int), ('y', int)])
+ triangle = np.dtype([('a', point), ('b', point), ('c', point)])
+ arr = np.zeros(10, triangle)
+ res = structured_to_unstructured(arr, dtype=int)
+ assert_equal(res, np.zeros((10, 6), dtype=int))
+
+
+ # test nested combinations of subarrays and structured arrays, gh-13333
+ def subarray(dt, shape):
+ return np.dtype((dt, shape))
+
+ def structured(*dts):
+ return np.dtype([('x{}'.format(i), dt) for i, dt in enumerate(dts)])
+
+ def inspect(dt, dtype=None):
+ arr = np.zeros((), dt)
+ ret = structured_to_unstructured(arr, dtype=dtype)
+ backarr = unstructured_to_structured(ret, dt)
+ return ret.shape, ret.dtype, backarr.dtype
+
+ dt = structured(subarray(structured(np.int32, np.int32), 3))
+ assert_equal(inspect(dt), ((6,), np.int32, dt))
+
+ dt = structured(subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((4,), np.int32, dt))
+
+ dt = structured(np.int32)
+ assert_equal(inspect(dt), ((1,), np.int32, dt))
+
+ dt = structured(np.int32, subarray(subarray(np.int32, 2), 2))
+ assert_equal(inspect(dt), ((5,), np.int32, dt))
+
+ dt = structured()
+ assert_raises(ValueError, structured_to_unstructured, np.zeros(3, dt))
+
+ # these currently don't work, but we may make it work in the future
+ assert_raises(NotImplementedError, structured_to_unstructured,
+ np.zeros(3, dt), dtype=np.int32)
+ assert_raises(NotImplementedError, unstructured_to_structured,
+ np.zeros((3,0), dtype=np.int32))
+
+ def test_field_assignment_by_name(self):
+ a = np.ones(2, dtype=[('a', 'i4'), ('b', 'f8'), ('c', 'u1')])
+ newdt = [('b', 'f4'), ('c', 'u1')]
+
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+
+ b = np.array([(1,2), (3,4)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([(1,1,2),(1,3,4)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([(0,1,2),(0,3,4)], dtype=a.dtype))
+
+ # test nested fields
+ a = np.ones(2, dtype=[('a', [('b', 'f8'), ('c', 'u1')])])
+ newdt = [('a', [('c', 'u1')])]
+ assert_equal(require_fields(a, newdt), np.ones(2, newdt))
+ b = np.array([((2,),), ((3,),)], dtype=newdt)
+ assign_fields_by_name(a, b, zero_unassigned=False)
+ assert_equal(a, np.array([((1,2),), ((1,3),)], dtype=a.dtype))
+ assign_fields_by_name(a, b)
+ assert_equal(a, np.array([((0,2),), ((0,3),)], dtype=a.dtype))
+
+ # test unstructured code path for 0d arrays
+ a, b = np.array(3), np.array(0)
+ assign_fields_by_name(b, a)
+ assert_equal(b[()], 3)
+
class TestRecursiveFillFields(object):
# Test recursive_fill_fields.
@@ -237,8 +380,8 @@ class TestMergeArrays(object):
z = np.array(
[('A', 1.), ('B', 2.)], dtype=[('A', '|S3'), ('B', float)])
w = np.array(
- [(1, (2, 3.0)), (4, (5, 6.0))],
- dtype=[('a', int), ('b', [('ba', float), ('bb', int)])])
+ [(1, (2, 3.0, ())), (4, (5, 6.0, ()))],
+ dtype=[('a', int), ('b', [('ba', float), ('bb', int), ('bc', [])])])
self.data = (w, x, y, z)
def test_solo(self):
@@ -309,8 +452,8 @@ class TestMergeArrays(object):
test = merge_arrays((x, w), flatten=False)
controldtype = [('f0', int),
('f1', [('a', int),
- ('b', [('ba', float), ('bb', int)])])]
- control = np.array([(1., (1, (2, 3.0))), (2, (4, (5, 6.0)))],
+ ('b', [('ba', float), ('bb', int), ('bc', [])])])]
+ control = np.array([(1., (1, (2, 3.0, ()))), (2, (4, (5, 6.0, ())))],
dtype=controldtype)
assert_equal(test, control)
diff --git a/numpy/lib/tests/test_regression.py b/numpy/lib/tests/test_regression.py
index 4c46bc46b..4cd812f5d 100644
--- a/numpy/lib/tests/test_regression.py
+++ b/numpy/lib/tests/test_regression.py
@@ -21,8 +21,8 @@ class TestRegression(object):
# Ticket #91
x = np.random.random((3, 3))
y = x.copy()
- np.cov(x, rowvar=1)
- np.cov(y, rowvar=0)
+ np.cov(x, rowvar=True)
+ np.cov(y, rowvar=False)
assert_array_equal(x, y)
def test_mem_digitize(self):
@@ -56,7 +56,7 @@ class TestRegression(object):
def test_poly1d_nan_roots(self):
# Ticket #396
- p = np.poly1d([np.nan, np.nan, 1], r=0)
+ p = np.poly1d([np.nan, np.nan, 1], r=False)
assert_raises(np.linalg.LinAlgError, getattr, p, "r")
def test_mem_polymul(self):
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 6e4cd225d..01ea028bb 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -260,8 +260,8 @@ class TestApplyAlongAxis(object):
def test_with_iterable_object(self):
# from issue 5248
d = np.array([
- [set([1, 11]), set([2, 22]), set([3, 33])],
- [set([4, 44]), set([5, 55]), set([6, 66])]
+ [{1, 11}, {2, 22}, {3, 33}],
+ [{4, 44}, {5, 55}, {6, 66}]
])
actual = np.apply_along_axis(lambda a: set.union(*a), 0, d)
expected = np.array([{1, 11, 4, 44}, {2, 22, 5, 55}, {3, 33, 6, 66}])
@@ -457,10 +457,35 @@ class TestSplit(object):
a = np.arange(10)
assert_raises(ValueError, split, a, 3)
+
class TestColumnStack(object):
def test_non_iterable(self):
assert_raises(TypeError, column_stack, 1)
+ def test_1D_arrays(self):
+ # example from docstring
+ a = np.array((1, 2, 3))
+ b = np.array((2, 3, 4))
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_2D_arrays(self):
+ # same as hstack 2D docstring example
+ a = np.array([[1], [2], [3]])
+ b = np.array([[2], [3], [4]])
+ expected = np.array([[1, 2],
+ [2, 3],
+ [3, 4]])
+ actual = np.column_stack((a, b))
+ assert_equal(actual, expected)
+
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ column_stack((np.arange(3) for _ in range(2)))
+
class TestDstack(object):
def test_non_iterable(self):
@@ -494,6 +519,10 @@ class TestDstack(object):
desired = np.array([[[1, 1], [2, 2]]])
assert_array_equal(res, desired)
+ def test_generator(self):
+ with assert_warns(FutureWarning):
+ dstack((np.arange(3) for _ in range(2)))
+
# array_split has more comprehensive test of splitting.
# only do simple test on hsplit, vsplit, and dsplit
diff --git a/numpy/lib/tests/test_stride_tricks.py b/numpy/lib/tests/test_stride_tricks.py
index b2bd7da3e..85fcceedc 100644
--- a/numpy/lib/tests/test_stride_tricks.py
+++ b/numpy/lib/tests/test_stride_tricks.py
@@ -4,7 +4,7 @@ import numpy as np
from numpy.core._rational_tests import rational
from numpy.testing import (
assert_equal, assert_array_equal, assert_raises, assert_,
- assert_raises_regex
+ assert_raises_regex, assert_warns,
)
from numpy.lib.stride_tricks import (
as_strided, broadcast_arrays, _broadcast_shape, broadcast_to
@@ -415,12 +415,32 @@ def test_writeable():
assert_equal(result.flags.writeable, False)
assert_raises(ValueError, result.__setitem__, slice(None), 0)
- # but the result of broadcast_arrays needs to be writeable (for now), to
+ # but the result of broadcast_arrays needs to be writeable, to
# preserve backwards compatibility
+ for is_broadcast, results in [(False, broadcast_arrays(original,)),
+ (True, broadcast_arrays(0, original))]:
+ for result in results:
+ # This will change to False in a future version
+ if is_broadcast:
+ with assert_warns(FutureWarning):
+ assert_equal(result.flags.writeable, True)
+ with assert_warns(DeprecationWarning):
+ result[:] = 0
+ # Warning not emitted, writing to the array resets it
+ assert_equal(result.flags.writeable, True)
+ else:
+ # No warning:
+ assert_equal(result.flags.writeable, True)
+
for results in [broadcast_arrays(original),
broadcast_arrays(0, original)]:
for result in results:
+ # resets the warn_on_write DeprecationWarning
+ result.flags.writeable = True
+ # check: no warning emitted
assert_equal(result.flags.writeable, True)
+ result[:] = 0
+
# keep readonly input readonly
original.flags.writeable = False
_, result = broadcast_arrays(0, original)
@@ -435,6 +455,25 @@ def test_writeable():
assert_(first.shape == second.shape)
+def test_writeable_memoryview():
+ # The result of broadcast_arrays exports as a non-writeable memoryview
+ # because otherwise there is no good way to opt in to the new behaviour
+ # (i.e. you would need to set writeable to False explicitly).
+ # See gh-13929.
+ original = np.array([1, 2, 3])
+
+ for is_broadcast, results in [(False, broadcast_arrays(original,)),
+ (True, broadcast_arrays(0, original))]:
+ for result in results:
+ # This will change to False in a future version
+ if is_broadcast:
+ # memoryview(result, writable=True) will give warning but cannot
+ # be tested using the python API.
+ assert memoryview(result).readonly
+ else:
+ assert not memoryview(result).readonly
+
+
def test_reference_types():
input_array = np.array('a', dtype=object)
expected = np.array(['a'] * 3, dtype=object)
diff --git a/numpy/lib/tests/test_twodim_base.py b/numpy/lib/tests/test_twodim_base.py
index bf93b4adb..bb844e4bd 100644
--- a/numpy/lib/tests/test_twodim_base.py
+++ b/numpy/lib/tests/test_twodim_base.py
@@ -5,7 +5,7 @@ from __future__ import division, absolute_import, print_function
from numpy.testing import (
assert_equal, assert_array_equal, assert_array_max_ulp,
- assert_array_almost_equal, assert_raises,
+ assert_array_almost_equal, assert_raises, assert_
)
from numpy import (
@@ -17,6 +17,9 @@ from numpy import (
import numpy as np
+from numpy.core.tests.test_overrides import requires_array_function
+
+
def get_mat(n):
data = arange(n)
data = add.outer(data, data)
@@ -273,6 +276,27 @@ class TestHistogram2d(object):
assert_array_equal(H, answer)
assert_array_equal(xe, array([0., 0.25, 0.5, 0.75, 1]))
+ @requires_array_function
+ def test_dispatch(self):
+ class ShouldDispatch:
+ def __array_function__(self, function, types, args, kwargs):
+ return types, args, kwargs
+
+ xy = [1, 2]
+ s_d = ShouldDispatch()
+ r = histogram2d(s_d, xy)
+ # Cannot use assert_equal since that dispatches...
+ assert_(r == ((ShouldDispatch,), (s_d, xy), {}))
+ r = histogram2d(xy, s_d)
+ assert_(r == ((ShouldDispatch,), (xy, s_d), {}))
+ r = histogram2d(xy, xy, bins=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=s_d)))
+ r = histogram2d(xy, xy, bins=[s_d, 5])
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(bins=[s_d, 5])))
+ assert_raises(Exception, histogram2d, xy, xy, bins=[s_d])
+ r = histogram2d(xy, xy, weights=s_d)
+ assert_(r, ((ShouldDispatch,), (xy, xy), dict(weights=s_d)))
+
class TestTri(object):
def test_dtype(self):
diff --git a/numpy/lib/tests/test_type_check.py b/numpy/lib/tests/test_type_check.py
index 2982ca31a..b3f114b92 100644
--- a/numpy/lib/tests/test_type_check.py
+++ b/numpy/lib/tests/test_type_check.py
@@ -360,6 +360,14 @@ class TestNanToNum(object):
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
+
+ # perform the same tests but with nan, posinf and neginf keywords
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1))/0.,
+ nan=10, posinf=20, neginf=30)
+ assert_equal(vals, [30, 10, 20])
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_equal(type(vals), np.ndarray)
# perform the same test but in-place
with np.errstate(divide='ignore', invalid='ignore'):
@@ -371,26 +379,48 @@ class TestNanToNum(object):
assert_(vals[1] == 0)
assert_all(vals[2] > 1e10) and assert_all(np.isfinite(vals[2]))
assert_equal(type(vals), np.ndarray)
+
+ # perform the same test but in-place
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = np.array((-1., 0, 1))/0.
+ result = nan_to_num(vals, copy=False, nan=10, posinf=20, neginf=30)
+
+ assert_(result is vals)
+ assert_equal(vals, [30, 10, 20])
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_equal(type(vals), np.ndarray)
def test_array(self):
vals = nan_to_num([1])
assert_array_equal(vals, np.array([1], int))
assert_equal(type(vals), np.ndarray)
+ vals = nan_to_num([1], nan=10, posinf=20, neginf=30)
+ assert_array_equal(vals, np.array([1], int))
+ assert_equal(type(vals), np.ndarray)
def test_integer(self):
vals = nan_to_num(1)
assert_all(vals == 1)
assert_equal(type(vals), np.int_)
+ vals = nan_to_num(1, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1)
+ assert_equal(type(vals), np.int_)
def test_float(self):
vals = nan_to_num(1.0)
assert_all(vals == 1.0)
assert_equal(type(vals), np.float_)
+ vals = nan_to_num(1.1, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1.1)
+ assert_equal(type(vals), np.float_)
def test_complex_good(self):
vals = nan_to_num(1+1j)
assert_all(vals == 1+1j)
assert_equal(type(vals), np.complex_)
+ vals = nan_to_num(1+1j, nan=10, posinf=20, neginf=30)
+ assert_all(vals == 1+1j)
+ assert_equal(type(vals), np.complex_)
def test_complex_bad(self):
with np.errstate(divide='ignore', invalid='ignore'):
@@ -414,6 +444,16 @@ class TestNanToNum(object):
# !! inf. Comment out for now, and see if it
# !! changes
#assert_all(vals.real < -1e10) and assert_all(np.isfinite(vals))
+
+ def test_do_not_rewrite_previous_keyword(self):
+ # This is done to test that when, for instance, nan=np.inf then these
+ # values are not rewritten by posinf keyword to the posinf value.
+ with np.errstate(divide='ignore', invalid='ignore'):
+ vals = nan_to_num(np.array((-1., 0, 1))/0., nan=np.inf, posinf=999)
+ assert_all(np.isfinite(vals[[0, 2]]))
+ assert_all(vals[0] < -1e10)
+ assert_equal(vals[[1, 2]], [np.inf, 999])
+ assert_equal(type(vals), np.ndarray)
class TestRealIfClose(object):
diff --git a/numpy/lib/tests/test_utils.py b/numpy/lib/tests/test_utils.py
index c27c3cbf5..9673a05fa 100644
--- a/numpy/lib/tests/test_utils.py
+++ b/numpy/lib/tests/test_utils.py
@@ -1,5 +1,6 @@
from __future__ import division, absolute_import, print_function
+import inspect
import sys
import pytest
@@ -38,6 +39,32 @@ def old_func3(self, x):
new_func3 = deprecate(old_func3, old_name="old_func3", new_name="new_func3")
+def old_func4(self, x):
+ """Summary.
+
+ Further info.
+ """
+ return x
+new_func4 = deprecate(old_func4)
+
+
+def old_func5(self, x):
+ """Summary.
+
+ Bizarre indentation.
+ """
+ return x
+new_func5 = deprecate(old_func5)
+
+
+def old_func6(self, x):
+ """
+ Also in PEP-257.
+ """
+ return x
+new_func6 = deprecate(old_func6)
+
+
def test_deprecate_decorator():
assert_('deprecated' in old_func.__doc__)
@@ -51,15 +78,58 @@ def test_deprecate_fn():
assert_('new_func3' in new_func3.__doc__)
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings")
+def test_deprecate_help_indentation():
+ _compare_docs(old_func4, new_func4)
+ _compare_docs(old_func5, new_func5)
+ _compare_docs(old_func6, new_func6)
+
+
+def _compare_docs(old_func, new_func):
+ old_doc = inspect.getdoc(old_func)
+ new_doc = inspect.getdoc(new_func)
+ index = new_doc.index('\n\n') + 2
+ assert_equal(new_doc[index:], old_doc)
+
+
+@pytest.mark.skipif(sys.flags.optimize == 2, reason="-OO discards docstrings")
+def test_deprecate_preserve_whitespace():
+ assert_('\n Bizarre' in new_func5.__doc__)
+
+
def test_safe_eval_nameconstant():
# Test if safe_eval supports Python 3.4 _ast.NameConstant
utils.safe_eval('None')
-def test_byte_bounds():
- a = arange(12).reshape(3, 4)
- low, high = utils.byte_bounds(a)
- assert_equal(high - low, a.size * a.itemsize)
+class TestByteBounds(object):
+
+ def test_byte_bounds(self):
+ # pointer difference matches size * itemsize
+ # due to contiguity
+ a = arange(12).reshape(3, 4)
+ low, high = utils.byte_bounds(a)
+ assert_equal(high - low, a.size * a.itemsize)
+
+ def test_unusual_order_positive_stride(self):
+ a = arange(12).reshape(3, 4)
+ b = a.T
+ low, high = utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_unusual_order_negative_stride(self):
+ a = arange(12).reshape(3, 4)
+ b = a.T[::-1]
+ low, high = utils.byte_bounds(b)
+ assert_equal(high - low, b.size * b.itemsize)
+
+ def test_strided(self):
+ a = arange(12)
+ b = a[::2]
+ low, high = utils.byte_bounds(b)
+ # the largest pointer address is lost (even numbers only in the
+ # stride), and compensate addresses for striding by 2
+ assert_equal(high - low, b.size * 2 * b.itemsize - b.itemsize)
def test_assert_raises_regex_context_manager():
diff --git a/numpy/lib/twodim_base.py b/numpy/lib/twodim_base.py
index 98efba191..f45392188 100644
--- a/numpy/lib/twodim_base.py
+++ b/numpy/lib/twodim_base.py
@@ -3,11 +3,15 @@
"""
from __future__ import division, absolute_import, print_function
+import functools
+
from numpy.core.numeric import (
absolute, asanyarray, arange, zeros, greater_equal, multiply, ones,
asarray, where, int8, int16, int32, int64, empty, promote_types, diagonal,
nonzero
)
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.core import iinfo, transpose
@@ -17,6 +21,10 @@ __all__ = [
'tril_indices_from', 'triu_indices', 'triu_indices_from', ]
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
i1 = iinfo(int8)
i2 = iinfo(int16)
i4 = iinfo(int32)
@@ -33,6 +41,11 @@ def _min_int(low, high):
return int64
+def _flip_dispatcher(m):
+ return (m,)
+
+
+@array_function_dispatch(_flip_dispatcher)
def fliplr(m):
"""
Flip array in the left/right direction.
@@ -64,13 +77,13 @@ def fliplr(m):
--------
>>> A = np.diag([1.,2.,3.])
>>> A
- array([[ 1., 0., 0.],
- [ 0., 2., 0.],
- [ 0., 0., 3.]])
+ array([[1., 0., 0.],
+ [0., 2., 0.],
+ [0., 0., 3.]])
>>> np.fliplr(A)
- array([[ 0., 0., 1.],
- [ 0., 2., 0.],
- [ 3., 0., 0.]])
+ array([[0., 0., 1.],
+ [0., 2., 0.],
+ [3., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.fliplr(A) == A[:,::-1,...])
@@ -83,6 +96,7 @@ def fliplr(m):
return m[:, ::-1]
+@array_function_dispatch(_flip_dispatcher)
def flipud(m):
"""
Flip array in the up/down direction.
@@ -115,13 +129,13 @@ def flipud(m):
--------
>>> A = np.diag([1.0, 2, 3])
>>> A
- array([[ 1., 0., 0.],
- [ 0., 2., 0.],
- [ 0., 0., 3.]])
+ array([[1., 0., 0.],
+ [0., 2., 0.],
+ [0., 0., 3.]])
>>> np.flipud(A)
- array([[ 0., 0., 3.],
- [ 0., 2., 0.],
- [ 1., 0., 0.]])
+ array([[0., 0., 3.],
+ [0., 2., 0.],
+ [1., 0., 0.]])
>>> A = np.random.randn(2,3,5)
>>> np.all(np.flipud(A) == A[::-1,...])
@@ -137,6 +151,7 @@ def flipud(m):
return m[::-1, ...]
+@set_module('numpy')
def eye(N, M=None, k=0, dtype=float, order='C'):
"""
Return a 2-D array with ones on the diagonal and zeros elsewhere.
@@ -176,9 +191,9 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
array([[1, 0],
[0, 1]])
>>> np.eye(3, k=1)
- array([[ 0., 1., 0.],
- [ 0., 0., 1.],
- [ 0., 0., 0.]])
+ array([[0., 1., 0.],
+ [0., 0., 1.],
+ [0., 0., 0.]])
"""
if M is None:
@@ -194,6 +209,11 @@ def eye(N, M=None, k=0, dtype=float, order='C'):
return m
+def _diag_dispatcher(v, k=None):
+ return (v,)
+
+
+@array_function_dispatch(_diag_dispatcher)
def diag(v, k=0):
"""
Extract a diagonal or construct a diagonal array.
@@ -265,6 +285,7 @@ def diag(v, k=0):
raise ValueError("Input must be 1- or 2-d.")
+@array_function_dispatch(_diag_dispatcher)
def diagflat(v, k=0):
"""
Create a two-dimensional array with the flattened input as a diagonal.
@@ -324,6 +345,7 @@ def diagflat(v, k=0):
return wrap(res)
+@set_module('numpy')
def tri(N, M=None, k=0, dtype=float):
"""
An array with ones at and below the given diagonal and zeros elsewhere.
@@ -346,7 +368,7 @@ def tri(N, M=None, k=0, dtype=float):
-------
tri : ndarray of shape (N, M)
Array with its lower triangle filled with ones and zero elsewhere;
- in other words ``T[i,j] == 1`` for ``i <= j + k``, 0 otherwise.
+ in other words ``T[i,j] == 1`` for ``j <= i + k``, 0 otherwise.
Examples
--------
@@ -356,9 +378,9 @@ def tri(N, M=None, k=0, dtype=float):
[1, 1, 1, 1, 1]])
>>> np.tri(3, 5, -1)
- array([[ 0., 0., 0., 0., 0.],
- [ 1., 0., 0., 0., 0.],
- [ 1., 1., 0., 0., 0.]])
+ array([[0., 0., 0., 0., 0.],
+ [1., 0., 0., 0., 0.],
+ [1., 1., 0., 0., 0.]])
"""
if M is None:
@@ -373,6 +395,11 @@ def tri(N, M=None, k=0, dtype=float):
return m
+def _trilu_dispatcher(m, k=None):
+ return (m,)
+
+
+@array_function_dispatch(_trilu_dispatcher)
def tril(m, k=0):
"""
Lower triangle of an array.
@@ -411,6 +438,7 @@ def tril(m, k=0):
return where(mask, m, zeros(1, m.dtype))
+@array_function_dispatch(_trilu_dispatcher)
def triu(m, k=0):
"""
Upper triangle of an array.
@@ -439,7 +467,12 @@ def triu(m, k=0):
return where(mask, zeros(1, m.dtype), m)
+def _vander_dispatcher(x, N=None, increasing=None):
+ return (x,)
+
+
# Originally borrowed from John Hunter and matplotlib
+@array_function_dispatch(_vander_dispatcher)
def vander(x, N=None, increasing=False):
"""
Generate a Vandermonde matrix.
@@ -507,7 +540,7 @@ def vander(x, N=None, increasing=False):
of the differences between the values of the input vector:
>>> np.linalg.det(np.vander(x))
- 48.000000000000043
+ 48.000000000000043 # may vary
>>> (5-3)*(5-2)*(5-1)*(3-2)*(3-1)*(2-1)
48
@@ -530,6 +563,25 @@ def vander(x, N=None, increasing=False):
return v
+def _histogram2d_dispatcher(x, y, bins=None, range=None, normed=None,
+ weights=None, density=None):
+ yield x
+ yield y
+
+ # This terrible logic is adapted from the checks in histogram2d
+ try:
+ N = len(bins)
+ except TypeError:
+ N = 1
+ if N == 2:
+ yield from bins # bins=[x, y]
+ else:
+ yield bins
+
+ yield weights
+
+
+@array_function_dispatch(_histogram2d_dispatcher)
def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
density=None):
"""
@@ -605,7 +657,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
Examples
--------
- >>> import matplotlib as mpl
+ >>> from matplotlib.image import NonUniformImage
>>> import matplotlib.pyplot as plt
Construct a 2-D histogram with variable bin width. First define the bin
@@ -627,6 +679,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
>>> ax = fig.add_subplot(131, title='imshow: square bins')
>>> plt.imshow(H, interpolation='nearest', origin='low',
... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]])
+ <matplotlib.image.AxesImage object at 0x...>
:func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges:
@@ -634,13 +687,14 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
... aspect='equal')
>>> X, Y = np.meshgrid(xedges, yedges)
>>> ax.pcolormesh(X, Y, H)
+ <matplotlib.collections.QuadMesh object at 0x...>
:class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to
display actual bin edges with interpolation:
>>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated',
... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]])
- >>> im = mpl.image.NonUniformImage(ax, interpolation='bilinear')
+ >>> im = NonUniformImage(ax, interpolation='bilinear')
>>> xcenters = (xedges[:-1] + xedges[1:]) / 2
>>> ycenters = (yedges[:-1] + yedges[1:]) / 2
>>> im.set_data(xcenters, ycenters, H)
@@ -662,6 +716,7 @@ def histogram2d(x, y, bins=10, range=None, normed=None, weights=None,
return hist, edges[0], edges[1]
+@set_module('numpy')
def mask_indices(n, mask_func, k=0):
"""
Return the indices to access (n, n) arrays, given a masking function.
@@ -732,6 +787,7 @@ def mask_indices(n, mask_func, k=0):
return nonzero(a != 0)
+@set_module('numpy')
def tril_indices(n, k=0, m=None):
"""
Return the indices for the lower-triangle of an (n, m) array.
@@ -788,7 +844,7 @@ def tril_indices(n, k=0, m=None):
Both for indexing:
>>> a[il1]
- array([ 0, 4, 5, 8, 9, 10, 12, 13, 14, 15])
+ array([ 0, 4, 5, ..., 13, 14, 15])
And for assigning values:
@@ -812,6 +868,11 @@ def tril_indices(n, k=0, m=None):
return nonzero(tri(n, m, k=k, dtype=bool))
+def _trilu_indices_form_dispatcher(arr, k=None):
+ return (arr,)
+
+
+@array_function_dispatch(_trilu_indices_form_dispatcher)
def tril_indices_from(arr, k=0):
"""
Return the indices for the lower-triangle of arr.
@@ -840,6 +901,7 @@ def tril_indices_from(arr, k=0):
return tril_indices(arr.shape[-2], k=k, m=arr.shape[-1])
+@set_module('numpy')
def triu_indices(n, k=0, m=None):
"""
Return the indices for the upper-triangle of an (n, m) array.
@@ -897,7 +959,7 @@ def triu_indices(n, k=0, m=None):
Both for indexing:
>>> a[iu1]
- array([ 0, 1, 2, 3, 5, 6, 7, 10, 11, 15])
+ array([ 0, 1, 2, ..., 10, 11, 15])
And for assigning values:
@@ -922,6 +984,7 @@ def triu_indices(n, k=0, m=None):
return nonzero(~tri(n, m, k=k-1, dtype=bool))
+@array_function_dispatch(_trilu_indices_form_dispatcher)
def triu_indices_from(arr, k=0):
"""
Return the indices for the upper-triangle of arr.
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 3f7aa32fa..586824743 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -2,6 +2,8 @@
"""
from __future__ import division, absolute_import, print_function
+import functools
+import warnings
__all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'isreal', 'nan_to_num', 'real', 'real_if_close',
@@ -9,12 +11,21 @@ __all__ = ['iscomplexobj', 'isrealobj', 'imag', 'iscomplex',
'common_type']
import numpy.core.numeric as _nx
-from numpy.core.numeric import asarray, asanyarray, array, isnan, zeros
+from numpy.core.numeric import asarray, asanyarray, isnan, zeros
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from .ufunclike import isneginf, isposinf
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy')
+
+
_typecodes_by_elsize = 'GDFgdfQqLlIiHhBb?'
-def mintypecode(typechars,typeset='GDFgdf',default='d'):
+
+@set_module('numpy')
+def mintypecode(typechars, typeset='GDFgdf', default='d'):
"""
Return the character for the minimum-size type to which given types can
be safely cast.
@@ -64,13 +75,16 @@ def mintypecode(typechars,typeset='GDFgdf',default='d'):
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
- l = []
- for t in intersection:
- i = _typecodes_by_elsize.index(t)
- l.append((i, t))
+ l = [(_typecodes_by_elsize.index(t), t) for t in intersection]
l.sort()
return l[0][1]
+
+def _asfarray_dispatcher(a, dtype=None):
+ return (a,)
+
+
+@array_function_dispatch(_asfarray_dispatcher)
def asfarray(a, dtype=_nx.float_):
"""
Return an array converted to a float type.
@@ -91,11 +105,11 @@ def asfarray(a, dtype=_nx.float_):
Examples
--------
>>> np.asfarray([2, 3])
- array([ 2., 3.])
+ array([2., 3.])
>>> np.asfarray([2, 3], dtype='float')
- array([ 2., 3.])
+ array([2., 3.])
>>> np.asfarray([2, 3], dtype='int8')
- array([ 2., 3.])
+ array([2., 3.])
"""
if not _nx.issubdtype(dtype, _nx.inexact):
@@ -103,6 +117,11 @@ def asfarray(a, dtype=_nx.float_):
return asarray(a, dtype=dtype)
+def _real_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_real_dispatcher)
def real(val):
"""
Return the real part of the complex argument.
@@ -127,13 +146,13 @@ def real(val):
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.real
- array([ 1., 3., 5.])
+ array([1., 3., 5.])
>>> a.real = 9
>>> a
- array([ 9.+2.j, 9.+4.j, 9.+6.j])
+ array([9.+2.j, 9.+4.j, 9.+6.j])
>>> a.real = np.array([9, 8, 7])
>>> a
- array([ 9.+2.j, 8.+4.j, 7.+6.j])
+ array([9.+2.j, 8.+4.j, 7.+6.j])
>>> np.real(1 + 1j)
1.0
@@ -144,6 +163,11 @@ def real(val):
return asanyarray(val).real
+def _imag_dispatcher(val):
+ return (val,)
+
+
+@array_function_dispatch(_imag_dispatcher)
def imag(val):
"""
Return the imaginary part of the complex argument.
@@ -168,10 +192,10 @@ def imag(val):
--------
>>> a = np.array([1+2j, 3+4j, 5+6j])
>>> a.imag
- array([ 2., 4., 6.])
+ array([2., 4., 6.])
>>> a.imag = np.array([8, 10, 12])
>>> a
- array([ 1. +8.j, 3.+10.j, 5.+12.j])
+ array([1. +8.j, 3.+10.j, 5.+12.j])
>>> np.imag(1 + 1j)
1.0
@@ -182,6 +206,11 @@ def imag(val):
return asanyarray(val).imag
+def _is_type_dispatcher(x):
+ return (x,)
+
+
+@array_function_dispatch(_is_type_dispatcher)
def iscomplex(x):
"""
Returns a bool array, where True if input element is complex.
@@ -217,6 +246,8 @@ def iscomplex(x):
res = zeros(ax.shape, bool)
return res[()] # convert to scalar if needed
+
+@array_function_dispatch(_is_type_dispatcher)
def isreal(x):
"""
Returns a bool array, where True if input element is real.
@@ -247,6 +278,8 @@ def isreal(x):
"""
return imag(x) == 0
+
+@array_function_dispatch(_is_type_dispatcher)
def iscomplexobj(x):
"""
Check for a complex type or an array of complex numbers.
@@ -287,6 +320,7 @@ def iscomplexobj(x):
return issubclass(type_, _nx.complexfloating)
+@array_function_dispatch(_is_type_dispatcher)
def isrealobj(x):
"""
Return True if x is a not complex type or an array of complex numbers.
@@ -328,13 +362,24 @@ def _getmaxmin(t):
f = getlimits.finfo(t)
return f.max, f.min
-def nan_to_num(x, copy=True):
+
+def _nan_to_num_dispatcher(x, copy=None, nan=None, posinf=None, neginf=None):
+ return (x,)
+
+
+@array_function_dispatch(_nan_to_num_dispatcher)
+def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""
- Replace NaN with zero and infinity with large finite numbers.
+ Replace NaN with zero and infinity with large finite numbers (default
+ behaviour) or with the numbers defined by the user using the `nan`,
+ `posinf` and/or `neginf` keywords.
- If `x` is inexact, NaN is replaced by zero, and infinity and -infinity
- replaced by the respectively largest and most negative finite floating
- point values representable by ``x.dtype``.
+ If `x` is inexact, NaN is replaced by zero or by the user defined value in
+ `nan` keyword, infinity is replaced by the largest finite floating point
+ values representable by ``x.dtype`` or by the user defined value in
+ `posinf` keyword and -infinity is replaced by the most negative finite
+ floating point values representable by ``x.dtype`` or by the user defined
+ value in `neginf` keyword.
For complex dtypes, the above is applied to each of the real and
imaginary components of `x` separately.
@@ -350,8 +395,27 @@ def nan_to_num(x, copy=True):
in-place (False). The in-place operation only occurs if
casting to an array does not require a copy.
Default is True.
-
+
.. versionadded:: 1.13
+ nan : int, float, optional
+ Value to be used to fill NaN values. If no value is passed
+ then NaN values will be replaced with 0.0.
+
+ .. versionadded:: 1.17
+ posinf : int, float, optional
+ Value to be used to fill positive infinity values. If no value is
+ passed then positive infinity values will be replaced with a very
+ large number.
+
+ .. versionadded:: 1.17
+ neginf : int, float, optional
+ Value to be used to fill negative infinity values. If no value is
+ passed then negative infinity values will be replaced with a very
+ small (or negative) number.
+
+ .. versionadded:: 1.17
+
+
Returns
-------
@@ -382,13 +446,20 @@ def nan_to_num(x, copy=True):
0.0
>>> x = np.array([np.inf, -np.inf, np.nan, -128, 128])
>>> np.nan_to_num(x)
- array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000,
- -1.28000000e+002, 1.28000000e+002])
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
+ -1.28000000e+002, 1.28000000e+002])
+ >>> np.nan_to_num(x, nan=-9999, posinf=33333333, neginf=33333333)
+ array([ 3.3333333e+07, 3.3333333e+07, -9.9990000e+03,
+ -1.2800000e+02, 1.2800000e+02])
>>> y = np.array([complex(np.inf, np.nan), np.nan, complex(np.nan, np.inf)])
+ array([ 1.79769313e+308, -1.79769313e+308, 0.00000000e+000, # may vary
+ -1.28000000e+002, 1.28000000e+002])
>>> np.nan_to_num(y)
- array([ 1.79769313e+308 +0.00000000e+000j,
+ array([ 1.79769313e+308 +0.00000000e+000j, # may vary
0.00000000e+000 +0.00000000e+000j,
0.00000000e+000 +1.79769313e+308j])
+ >>> np.nan_to_num(y, nan=111111, posinf=222222)
+ array([222222.+111111.j, 111111. +0.j, 111111.+222222.j])
"""
x = _nx.array(x, subok=True, copy=copy)
xtype = x.dtype.type
@@ -402,15 +473,27 @@ def nan_to_num(x, copy=True):
dest = (x.real, x.imag) if iscomplex else (x,)
maxf, minf = _getmaxmin(x.real.dtype)
+ if posinf is not None:
+ maxf = posinf
+ if neginf is not None:
+ minf = neginf
for d in dest:
- _nx.copyto(d, 0.0, where=isnan(d))
- _nx.copyto(d, maxf, where=isposinf(d))
- _nx.copyto(d, minf, where=isneginf(d))
+ idx_nan = isnan(d)
+ idx_posinf = isposinf(d)
+ idx_neginf = isneginf(d)
+ _nx.copyto(d, nan, where=idx_nan)
+ _nx.copyto(d, maxf, where=idx_posinf)
+ _nx.copyto(d, minf, where=idx_neginf)
return x[()] if isscalar else x
#-----------------------------------------------------------------------------
-def real_if_close(a,tol=100):
+def _real_if_close_dispatcher(a, tol=None):
+ return (a,)
+
+
+@array_function_dispatch(_real_if_close_dispatcher)
+def real_if_close(a, tol=100):
"""
If complex input returns a real array if complex parts are close to zero.
@@ -445,12 +528,12 @@ def real_if_close(a,tol=100):
Examples
--------
>>> np.finfo(float).eps
- 2.2204460492503131e-16
+ 2.2204460492503131e-16 # may vary
>>> np.real_if_close([2.1 + 4e-14j], tol=1000)
- array([ 2.1])
+ array([2.1])
>>> np.real_if_close([2.1 + 4e-13j], tol=1000)
- array([ 2.1 +4.00000000e-13j])
+ array([2.1+4.e-13j])
"""
a = asanyarray(a)
@@ -465,10 +548,22 @@ def real_if_close(a,tol=100):
return a
+def _asscalar_dispatcher(a):
+ # 2018-10-10, 1.16
+ warnings.warn('np.asscalar(a) is deprecated since NumPy v1.16, use '
+ 'a.item() instead', DeprecationWarning, stacklevel=3)
+ return (a,)
+
+
+@array_function_dispatch(_asscalar_dispatcher)
def asscalar(a):
"""
Convert an array of size 1 to its scalar equivalent.
+ .. deprecated:: 1.16
+
+ Deprecated, use `numpy.ndarray.item()` instead.
+
Parameters
----------
a : ndarray
@@ -484,7 +579,6 @@ def asscalar(a):
--------
>>> np.asscalar(np.array([24]))
24
-
"""
return a.item()
@@ -514,6 +608,7 @@ _namefromtype = {'S1': 'character',
'O': 'object'
}
+@set_module('numpy')
def typename(char):
"""
Return a description for the given data type code.
@@ -577,6 +672,13 @@ array_precision = {_nx.half: 0,
_nx.csingle: 1,
_nx.cdouble: 2,
_nx.clongdouble: 3}
+
+
+def _common_type_dispatcher(*arrays):
+ return arrays
+
+
+@array_function_dispatch(_common_type_dispatcher)
def common_type(*arrays):
"""
Return a scalar type which is common to the input arrays.
@@ -606,11 +708,11 @@ def common_type(*arrays):
Examples
--------
>>> np.common_type(np.arange(2, dtype=np.float32))
- <type 'numpy.float32'>
+ <class 'numpy.float32'>
>>> np.common_type(np.arange(2, dtype=np.float32), np.arange(2))
- <type 'numpy.float64'>
+ <class 'numpy.float64'>
>>> np.common_type(np.arange(4), np.array([45, 6.j]), np.array([45.0]))
- <type 'numpy.complex128'>
+ <class 'numpy.complex128'>
"""
is_complex = False
diff --git a/numpy/lib/ufunclike.py b/numpy/lib/ufunclike.py
index 6259c5445..96fd5b319 100644
--- a/numpy/lib/ufunclike.py
+++ b/numpy/lib/ufunclike.py
@@ -8,6 +8,9 @@ from __future__ import division, absolute_import, print_function
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
+from numpy.core.overrides import (
+ array_function_dispatch, ARRAY_FUNCTION_ENABLED,
+)
import warnings
import functools
@@ -37,7 +40,40 @@ def _deprecate_out_named_y(f):
return func
+def _fix_out_named_y(f):
+ """
+ Allow the out argument to be passed as the name `y` (deprecated)
+
+ This decorator should only be used if _deprecate_out_named_y is used on
+ a corresponding dispatcher function.
+ """
+ @functools.wraps(f)
+ def func(x, out=None, **kwargs):
+ if 'y' in kwargs:
+ # we already did error checking in _deprecate_out_named_y
+ out = kwargs.pop('y')
+ return f(x, out=out, **kwargs)
+
+ return func
+
+
+def _fix_and_maybe_deprecate_out_named_y(f):
+ """
+ Use the appropriate decorator, depending upon if dispatching is being used.
+ """
+ if ARRAY_FUNCTION_ENABLED:
+ return _fix_out_named_y(f)
+ else:
+ return _deprecate_out_named_y(f)
+
+
@_deprecate_out_named_y
+def _dispatcher(x, out=None):
+ return (x, out)
+
+
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_and_maybe_deprecate_out_named_y
def fix(x, out=None):
"""
Round to nearest integer towards zero.
@@ -83,7 +119,8 @@ def fix(x, out=None):
return res
-@_deprecate_out_named_y
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_and_maybe_deprecate_out_named_y
def isposinf(x, out=None):
"""
Test element-wise for positive infinity, return result as bool array.
@@ -125,11 +162,11 @@ def isposinf(x, out=None):
Examples
--------
>>> np.isposinf(np.PINF)
- array(True, dtype=bool)
+ True
>>> np.isposinf(np.inf)
- array(True, dtype=bool)
+ True
>>> np.isposinf(np.NINF)
- array(False, dtype=bool)
+ False
>>> np.isposinf([-np.inf, 0., np.inf])
array([False, False, True])
@@ -151,7 +188,8 @@ def isposinf(x, out=None):
return nx.logical_and(is_inf, signbit, out)
-@_deprecate_out_named_y
+@array_function_dispatch(_dispatcher, verify=False, module='numpy')
+@_fix_and_maybe_deprecate_out_named_y
def isneginf(x, out=None):
"""
Test element-wise for negative infinity, return result as bool array.
@@ -194,11 +232,11 @@ def isneginf(x, out=None):
Examples
--------
>>> np.isneginf(np.NINF)
- array(True, dtype=bool)
+ True
>>> np.isneginf(np.inf)
- array(False, dtype=bool)
+ False
>>> np.isneginf(np.PINF)
- array(False, dtype=bool)
+ False
>>> np.isneginf([-np.inf, 0., np.inf])
array([ True, False, False])
diff --git a/numpy/lib/utils.py b/numpy/lib/utils.py
index 249873654..3c71d2a7c 100644
--- a/numpy/lib/utils.py
+++ b/numpy/lib/utils.py
@@ -7,6 +7,7 @@ import re
import warnings
from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
+from numpy.core.overrides import set_module
from numpy.core import ndarray, ufunc, asarray
import numpy as np
@@ -104,6 +105,20 @@ class _Deprecate(object):
if doc is None:
doc = depdoc
else:
+ lines = doc.expandtabs().split('\n')
+ indent = _get_indent(lines[1:])
+ if lines[0].lstrip():
+ # Indent the original first line to let inspect.cleandoc()
+ # dedent the docstring despite the deprecation notice.
+ doc = indent * ' ' + doc
+ else:
+ # Remove the same leading blank lines as cleandoc() would.
+ skip = len(lines[0]) + 1
+ for line in lines[1:]:
+ if len(line) > indent:
+ break
+ skip += len(line) + 1
+ doc = doc[skip:]
doc = '\n\n'.join([depdoc, doc])
newfunc.__doc__ = doc
try:
@@ -114,6 +129,21 @@ class _Deprecate(object):
newfunc.__dict__.update(d)
return newfunc
+
+def _get_indent(lines):
+ """
+ Determines the leading whitespace that could be removed from all the lines.
+ """
+ indent = sys.maxsize
+ for line in lines:
+ content = len(line.lstrip())
+ if content:
+ indent = min(indent, len(line) - content)
+ if indent == sys.maxsize:
+ indent = 0
+ return indent
+
+
def deprecate(*args, **kwargs):
"""
Issues a DeprecationWarning, adds warning to `old_name`'s
@@ -149,10 +179,8 @@ def deprecate(*args, **kwargs):
Warning:
>>> olduint = np.deprecate(np.uint)
+ DeprecationWarning: `uint64` is deprecated! # may vary
>>> olduint(6)
- /usr/lib/python2.5/site-packages/numpy/lib/utils.py:114:
- DeprecationWarning: uint32 is deprecated
- warnings.warn(str1, DeprecationWarning, stacklevel=2)
6
"""
@@ -164,13 +192,6 @@ def deprecate(*args, **kwargs):
fn = args[0]
args = args[1:]
- # backward compatibility -- can be removed
- # after next release
- if 'newname' in kwargs:
- kwargs['new_name'] = kwargs.pop('newname')
- if 'oldname' in kwargs:
- kwargs['old_name'] = kwargs.pop('oldname')
-
return _Deprecate(*args, **kwargs)(fn)
else:
return _Deprecate(*args, **kwargs)
@@ -207,8 +228,8 @@ def byte_bounds(a):
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
- >>> I = np.eye(2, dtype='G'); I.dtype
- dtype('complex192')
+ >>> I = np.eye(2); I.dtype
+ dtype('float64')
>>> low, high = np.byte_bounds(I)
>>> high - low == I.size*I.itemsize
True
@@ -269,17 +290,17 @@ def who(vardict=None):
>>> np.who()
Name Shape Bytes Type
===========================================================
- a 10 40 int32
+ a 10 80 int64
b 20 160 float64
- Upper bound on total bytes = 200
+ Upper bound on total bytes = 240
>>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
... 'idx':5}
>>> np.who(d)
Name Shape Bytes Type
===========================================================
- y 3 24 float64
x 2 16 float64
+ y 3 24 float64
Upper bound on total bytes = 40
"""
@@ -439,6 +460,7 @@ def _info(obj, output=sys.stdout):
print("type: %s" % obj.dtype, file=output)
+@set_module('numpy')
def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
"""
Get help information for a function, class, or module.
@@ -644,6 +666,7 @@ def info(object=None, maxwidth=76, output=sys.stdout, toplevel='numpy'):
print(inspect.getdoc(object), file=output)
+@set_module('numpy')
def source(object, output=sys.stdout):
"""
Print or write to a file the source code for a NumPy object.
@@ -701,6 +724,8 @@ _lookfor_caches = {}
# signature
_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
+
+@set_module('numpy')
def lookfor(what, module=None, import_modules=True, regenerate=False,
output=None):
"""
@@ -735,7 +760,7 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
Examples
--------
- >>> np.lookfor('binary representation')
+ >>> np.lookfor('binary representation') # doctest: +SKIP
Search results for 'binary representation'
------------------------------------------
numpy.binary_repr
@@ -763,13 +788,8 @@ def lookfor(what, module=None, import_modules=True, regenerate=False,
if kind in ('module', 'object'):
# don't show modules or objects
continue
- ok = True
doc = docstring.lower()
- for w in whats:
- if w not in doc:
- ok = False
- break
- if ok:
+ if all(w in doc for w in whats):
found.append(name)
# Relevance sort
@@ -978,93 +998,6 @@ def _getmembers(item):
if hasattr(item, x)]
return members
-#-----------------------------------------------------------------------------
-
-# The following SafeEval class and company are adapted from Michael Spencer's
-# ASPN Python Cookbook recipe: https://code.activestate.com/recipes/364469/
-#
-# Accordingly it is mostly Copyright 2006 by Michael Spencer.
-# The recipe, like most of the other ASPN Python Cookbook recipes was made
-# available under the Python license.
-# https://en.wikipedia.org/wiki/Python_License
-
-# It has been modified to:
-# * handle unary -/+
-# * support True/False/None
-# * raise SyntaxError instead of a custom exception.
-
-class SafeEval(object):
- """
- Object to evaluate constant string expressions.
-
- This includes strings with lists, dicts and tuples using the abstract
- syntax tree created by ``compiler.parse``.
-
- .. deprecated:: 1.10.0
-
- See Also
- --------
- safe_eval
-
- """
- def __init__(self):
- # 2014-10-15, 1.10
- warnings.warn("SafeEval is deprecated in 1.10 and will be removed.",
- DeprecationWarning, stacklevel=2)
-
- def visit(self, node):
- cls = node.__class__
- meth = getattr(self, 'visit' + cls.__name__, self.default)
- return meth(node)
-
- def default(self, node):
- raise SyntaxError("Unsupported source construct: %s"
- % node.__class__)
-
- def visitExpression(self, node):
- return self.visit(node.body)
-
- def visitNum(self, node):
- return node.n
-
- def visitStr(self, node):
- return node.s
-
- def visitBytes(self, node):
- return node.s
-
- def visitDict(self, node,**kw):
- return dict([(self.visit(k), self.visit(v))
- for k, v in zip(node.keys, node.values)])
-
- def visitTuple(self, node):
- return tuple([self.visit(i) for i in node.elts])
-
- def visitList(self, node):
- return [self.visit(i) for i in node.elts]
-
- def visitUnaryOp(self, node):
- import ast
- if isinstance(node.op, ast.UAdd):
- return +self.visit(node.operand)
- elif isinstance(node.op, ast.USub):
- return -self.visit(node.operand)
- else:
- raise SyntaxError("Unknown unary op: %r" % node.op)
-
- def visitName(self, node):
- if node.id == 'False':
- return False
- elif node.id == 'True':
- return True
- elif node.id == 'None':
- return None
- else:
- raise SyntaxError("Unknown name: %s" % node.id)
-
- def visitNameConstant(self, node):
- return node.value
-
def safe_eval(source):
"""
@@ -1106,12 +1039,11 @@ def safe_eval(source):
>>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
Traceback (most recent call last):
...
- SyntaxError: Unsupported source construct: compiler.ast.CallFunc
+ ValueError: malformed node or string: <_ast.Call object at 0x...>
"""
# Local import to speed up numpy's import time.
import ast
-
return ast.literal_eval(source)
@@ -1144,17 +1076,12 @@ def _median_nancheck(data, result, axis, out):
n = n.filled(False)
if result.ndim == 0:
if n == True:
- warnings.warn("Invalid value encountered in median",
- RuntimeWarning, stacklevel=3)
if out is not None:
out[...] = data.dtype.type(np.nan)
result = out
else:
result = data.dtype.type(np.nan)
elif np.count_nonzero(n.ravel()) > 0:
- warnings.warn("Invalid value encountered in median for" +
- " %d results" % np.count_nonzero(n.ravel()),
- RuntimeWarning, stacklevel=3)
result[n] = np.nan
return result
diff --git a/numpy/linalg/__init__.py b/numpy/linalg/__init__.py
index 4b696c883..55560815d 100644
--- a/numpy/linalg/__init__.py
+++ b/numpy/linalg/__init__.py
@@ -1,53 +1,77 @@
"""
-Core Linear Algebra Tools
-=========================
-
-=============== ==========================================================
-Linear algebra basics
-==========================================================================
-norm Vector or matrix norm
-inv Inverse of a square matrix
-solve Solve a linear system of equations
-det Determinant of a square matrix
-slogdet Logarithm of the determinant of a square matrix
-lstsq Solve linear least-squares problem
-pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
- value decomposition
-matrix_power Integer power of a square matrix
-matrix_rank Calculate matrix rank using an SVD-based method
-=============== ==========================================================
-
-=============== ==========================================================
-Eigenvalues and decompositions
-==========================================================================
-eig Eigenvalues and vectors of a square matrix
-eigh Eigenvalues and eigenvectors of a Hermitian matrix
-eigvals Eigenvalues of a square matrix
-eigvalsh Eigenvalues of a Hermitian matrix
-qr QR decomposition of a matrix
-svd Singular value decomposition of a matrix
-cholesky Cholesky decomposition of a matrix
-=============== ==========================================================
-
-=============== ==========================================================
-Tensor operations
-==========================================================================
-tensorsolve Solve a linear tensor equation
-tensorinv Calculate an inverse of a tensor
-=============== ==========================================================
-
-=============== ==========================================================
+``numpy.linalg``
+================
+
+The NumPy linear algebra functions rely on BLAS and LAPACK to provide efficient
+low level implementations of standard linear algebra algorithms. Those
+libraries may be provided by NumPy itself using C versions of a subset of their
+reference implementations but, when possible, highly optimized libraries that
+take advantage of specialized processor functionality are preferred. Examples
+of such libraries are OpenBLAS, MKL (TM), and ATLAS. Because those libraries
+are multithreaded and processor dependent, environmental variables and external
+packages such as threadpoolctl may be needed to control the number of threads
+or specify the processor architecture.
+
+- OpenBLAS: https://www.openblas.net/
+- threadpoolctl: https://github.com/joblib/threadpoolctl
+
+Please note that the most-used linear algebra functions in NumPy are present in
+the main ``numpy`` namespace rather than in ``numpy.linalg``. There are:
+``dot``, ``vdot``, ``inner``, ``outer``, ``matmul``, ``tensordot``, ``einsum``,
+``einsum_path`` and ``kron``.
+
+Functions present in numpy.linalg are listed below.
+
+
+Matrix and vector products
+--------------------------
+
+ multi_dot
+ matrix_power
+
+Decompositions
+--------------
+
+ cholesky
+ qr
+ svd
+
+Matrix eigenvalues
+------------------
+
+ eig
+ eigh
+ eigvals
+ eigvalsh
+
+Norms and other numbers
+-----------------------
+
+ norm
+ cond
+ det
+ matrix_rank
+ slogdet
+
+Solving equations and inverting matrices
+----------------------------------------
+
+ solve
+ tensorsolve
+ lstsq
+ inv
+ pinv
+ tensorinv
+
Exceptions
-==========================================================================
-LinAlgError Indicates a failed linear algebra operation
-=============== ==========================================================
+----------
+
+ LinAlgError
"""
from __future__ import division, absolute_import, print_function
# To get sub-modules
-from .info import __doc__
-
from .linalg import *
from numpy._pytesttester import PytestTester
diff --git a/numpy/linalg/info.py b/numpy/linalg/info.py
deleted file mode 100644
index 646ecda04..000000000
--- a/numpy/linalg/info.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""\
-Core Linear Algebra Tools
--------------------------
-Linear algebra basics:
-
-- norm Vector or matrix norm
-- inv Inverse of a square matrix
-- solve Solve a linear system of equations
-- det Determinant of a square matrix
-- lstsq Solve linear least-squares problem
-- pinv Pseudo-inverse (Moore-Penrose) calculated using a singular
- value decomposition
-- matrix_power Integer power of a square matrix
-
-Eigenvalues and decompositions:
-
-- eig Eigenvalues and vectors of a square matrix
-- eigh Eigenvalues and eigenvectors of a Hermitian matrix
-- eigvals Eigenvalues of a square matrix
-- eigvalsh Eigenvalues of a Hermitian matrix
-- qr QR decomposition of a matrix
-- svd Singular value decomposition of a matrix
-- cholesky Cholesky decomposition of a matrix
-
-Tensor operations:
-
-- tensorsolve Solve a linear tensor equation
-- tensorinv Calculate an inverse of a tensor
-
-Exceptions:
-
-- LinAlgError Indicates a failed linear algebra operation
-
-"""
-from __future__ import division, absolute_import, print_function
-
-depends = ['core']
diff --git a/numpy/linalg/lapack_lite/clapack_scrub.py b/numpy/linalg/lapack_lite/clapack_scrub.py
index e72a39e64..434586113 100644
--- a/numpy/linalg/lapack_lite/clapack_scrub.py
+++ b/numpy/linalg/lapack_lite/clapack_scrub.py
@@ -294,9 +294,8 @@ def scrubSource(source, nsteps=None, verbose=False):
if __name__ == '__main__':
filename = sys.argv[1]
outfilename = os.path.join(sys.argv[2], os.path.basename(filename))
- fo = open(filename, 'r')
- source = fo.read()
- fo.close()
+ with open(filename, 'r') as fo:
+ source = fo.read()
if len(sys.argv) > 3:
nsteps = int(sys.argv[3])
diff --git a/numpy/linalg/lapack_lite/fortran.py b/numpy/linalg/lapack_lite/fortran.py
index 3b6ac70f0..dc0a5ebd9 100644
--- a/numpy/linalg/lapack_lite/fortran.py
+++ b/numpy/linalg/lapack_lite/fortran.py
@@ -54,7 +54,7 @@ class PushbackIterator(object):
Return an iterator for which items can be pushed back into.
Call the .pushback(item) method to have item returned as the next
- value of .next().
+ value of next().
"""
def __init__(self, iterable):
object.__init__(self)
@@ -110,15 +110,14 @@ def getDependencies(filename):
"""For a Fortran source file, return a list of routines declared as EXTERNAL
in it.
"""
- fo = open(filename)
external_pat = re.compile(r'^\s*EXTERNAL\s', re.I)
routines = []
- for lineno, line in fortranSourceLines(fo):
- m = external_pat.match(line)
- if m:
- names = line = line[m.end():].strip().split(',')
- names = [n.strip().lower() for n in names]
- names = [n for n in names if n]
- routines.extend(names)
- fo.close()
+ with open(filename) as fo:
+ for lineno, line in fortranSourceLines(fo):
+ m = external_pat.match(line)
+ if m:
+ names = line = line[m.end():].strip().split(',')
+ names = [n.strip().lower() for n in names]
+ names = [n for n in names if n]
+ routines.extend(names)
return routines
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index ccc437663..816a200eb 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -16,6 +16,7 @@ __all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
+import functools
import operator
import warnings
@@ -25,12 +26,19 @@ from numpy.core import (
add, multiply, sqrt, fastCopyAndTranspose, sum, isfinite,
finfo, errstate, geterrobj, moveaxis, amin, amax, product, abs,
atleast_2d, intp, asanyarray, object_, matmul,
- swapaxes, divide, count_nonzero, isnan
+ swapaxes, divide, count_nonzero, isnan, sign
)
from numpy.core.multiarray import normalize_axis_index
+from numpy.core.overrides import set_module
+from numpy.core import overrides
from numpy.lib.twodim_base import triu, eye
from numpy.linalg import lapack_lite, _umath_linalg
+
+array_function_dispatch = functools.partial(
+ overrides.array_function_dispatch, module='numpy.linalg')
+
+
# For Python2/3 compatibility
_N = b'N'
_V = b'V'
@@ -40,7 +48,8 @@ _L = b'L'
fortran_int = intc
-# Error object
+
+@set_module('numpy.linalg')
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
@@ -68,7 +77,6 @@ class LinAlgError(Exception):
numpy.linalg.LinAlgError: Singular matrix
"""
- pass
def _determine_error_states():
@@ -198,11 +206,6 @@ def _assertRankAtLeast2(*arrays):
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
-def _assertSquareness(*arrays):
- for a in arrays:
- if max(a.shape) != min(a.shape):
- raise LinAlgError('Array must be square')
-
def _assertNdSquareness(*arrays):
for a in arrays:
m, n = a.shape[-2:]
@@ -242,6 +245,11 @@ def transpose(a):
# Linear equations
+def _tensorsolve_dispatcher(a, b, axes=None):
+ return (a, b)
+
+
+@array_function_dispatch(_tensorsolve_dispatcher)
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
@@ -311,6 +319,12 @@ def tensorsolve(a, b, axes=None):
res.shape = oldshape
return res
+
+def _solve_dispatcher(a, b):
+ return (a, b)
+
+
+@array_function_dispatch(_solve_dispatcher)
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
@@ -343,7 +357,7 @@ def solve(a, b):
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
- The solutions are computed using LAPACK routine _gesv
+ The solutions are computed using LAPACK routine ``_gesv``.
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
@@ -363,7 +377,7 @@ def solve(a, b):
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
- array([ 2., 3.])
+ array([2., 3.])
Check that the solution is correct:
@@ -391,6 +405,11 @@ def solve(a, b):
return wrap(r.astype(result_t, copy=False))
+def _tensorinv_dispatcher(a, ind=None):
+ return (a,)
+
+
+@array_function_dispatch(_tensorinv_dispatcher)
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
@@ -460,6 +479,11 @@ def tensorinv(a, ind=2):
# Matrix inversion
+def _unary_dispatcher(a):
+ return (a,)
+
+
+@array_function_dispatch(_unary_dispatcher)
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
@@ -511,10 +535,10 @@ def inv(a):
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
- array([[[-2. , 1. ],
- [ 1.5, -0.5]],
- [[-5. , 2. ],
- [ 3. , -1. ]]])
+ array([[[-2. , 1. ],
+ [ 1.5 , -0.5 ]],
+ [[-1.25, 0.75],
+ [ 0.75, -0.25]]])
"""
a, wrap = _makearray(a)
@@ -528,6 +552,11 @@ def inv(a):
return wrap(ainv.astype(result_t, copy=False))
+def _matrix_power_dispatcher(a, n):
+ return (a,)
+
+
+@array_function_dispatch(_matrix_power_dispatcher)
def matrix_power(a, n):
"""
Raise a square matrix to the (integer) power `n`.
@@ -542,7 +571,7 @@ def matrix_power(a, n):
Parameters
----------
a : (..., M, M) array_like
- Matrix to be "powered."
+ Matrix to be "powered".
n : int
The exponent can be any integer or long integer, positive,
negative, or zero.
@@ -645,6 +674,8 @@ def matrix_power(a, n):
# Cholesky decomposition
+
+@array_function_dispatch(_unary_dispatcher)
def cholesky(a):
"""
Cholesky decomposition.
@@ -699,21 +730,21 @@ def cholesky(a):
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
- array([[ 1.+0.j, 0.-2.j],
+ array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
- array([[ 1.+0.j, 0.+0.j],
- [ 0.+2.j, 1.+0.j]])
+ array([[1.+0.j, 0.+0.j],
+ [0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
- array([[ 1.+0.j, 0.-2.j],
- [ 0.+2.j, 5.+0.j]])
+ array([[1.+0.j, 0.-2.j],
+ [0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
- array([[ 1.+0.j, 0.+0.j],
- [ 0.+2.j, 1.+0.j]])
+ array([[1.+0.j, 0.+0.j],
+ [0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
- >>> LA.cholesky(np.matrix(A))
+ >>> np.linalg.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
@@ -728,8 +759,14 @@ def cholesky(a):
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
+
# QR decompostion
+def _qr_dispatcher(a, mode=None):
+ return (a,)
+
+
+@array_function_dispatch(_qr_dispatcher)
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
@@ -741,15 +778,13 @@ def qr(a, mode='reduced'):
----------
a : array_like, shape (M, N)
Matrix to be factored.
- mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
+ mode : {'reduced', 'complete', 'r', 'raw'}, optional
If K = min(M, N), then
* 'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
* 'complete' : returns q, r with dimensions (M, M), (M, N)
* 'r' : returns r only with dimensions (K, N)
* 'raw' : returns h, tau with dimensions (N, M), (K,)
- * 'full' : alias of 'reduced', deprecated
- * 'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced', and to
@@ -783,8 +818,8 @@ def qr(a, mode='reduced'):
Notes
-----
- This is an interface to the LAPACK routines dgeqrf, zgeqrf,
- dorgqr, and zungqr.
+ This is an interface to the LAPACK routines ``dgeqrf``, ``zgeqrf``,
+ ``dorgqr``, and ``zungqr``.
For more information on the qr factorization, see for example:
https://en.wikipedia.org/wiki/QR_factorization
@@ -811,12 +846,8 @@ def qr(a, mode='reduced'):
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
- >>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
- >>> # But only triu parts are guaranteed equal when mode='economic'
- >>> np.allclose(r, np.triu(r3[:6,:6], k=0))
- True
Example illustrating a common use of `qr`: solving of least squares
problems
@@ -841,9 +872,9 @@ def qr(a, mode='reduced'):
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
- >>> q, r = LA.qr(A)
+ >>> q, r = np.linalg.qr(A)
>>> p = np.dot(q.T, b)
- >>> np.dot(LA.inv(r), p)
+ >>> np.dot(np.linalg.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
@@ -853,12 +884,12 @@ def qr(a, mode='reduced'):
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
- warnings.warn(msg, DeprecationWarning, stacklevel=2)
+ warnings.warn(msg, DeprecationWarning, stacklevel=3)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
@@ -945,6 +976,7 @@ def qr(a, mode='reduced'):
# Eigenvalues
+@array_function_dispatch(_unary_dispatcher)
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
@@ -972,7 +1004,7 @@ def eigvals(a):
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
- eigvalsh : eigenvalues of real symmetric or complex Hermitian
+ eigvalsh : eigenvalues of real symmetric or complex Hermitian
(conjugate symmetric) arrays.
eigh : eigenvalues and eigenvectors of real symmetric or complex
Hermitian (conjugate symmetric) arrays.
@@ -985,7 +1017,7 @@ def eigvals(a):
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
- This is implemented using the _geev LAPACK routines which compute
+ This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
@@ -1003,7 +1035,7 @@ def eigvals(a):
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
- Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
+ Now multiply a diagonal matrix by ``Q`` on one side and by ``Q.T`` on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
@@ -1011,7 +1043,7 @@ def eigvals(a):
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
- array([ 1., -1.])
+ array([ 1., -1.]) # random
"""
a, wrap = _makearray(a)
@@ -1034,6 +1066,12 @@ def eigvals(a):
return w.astype(result_t, copy=False)
+
+def _eigvalsh_dispatcher(a, UPLO=None):
+ return (a,)
+
+
+@array_function_dispatch(_eigvalsh_dispatcher)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a complex Hermitian or real symmetric matrix.
@@ -1080,31 +1118,31 @@ def eigvalsh(a, UPLO='L'):
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
- The eigenvalues are computed using LAPACK routines _syevd, _heevd
+ The eigenvalues are computed using LAPACK routines ``_syevd``, ``_heevd``.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
- array([ 0.17157288, 5.82842712])
+ array([ 0.17157288, 5.82842712]) # may vary
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
- array([[ 5.+2.j, 9.-2.j],
- [ 0.+2.j, 2.-1.j]])
+ array([[5.+2.j, 9.-2.j],
+ [0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
- array([[ 5.+0.j, 0.-2.j],
- [ 0.+2.j, 2.+0.j]])
+ array([[5.+0.j, 0.-2.j],
+ [0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
- array([ 1., 6.])
- array([ 6.+0.j, 1.+0.j])
+ array([1., 6.])
+ array([6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
@@ -1135,6 +1173,7 @@ def _convertarray(a):
# Eigenvectors
+@array_function_dispatch(_unary_dispatcher)
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
@@ -1169,7 +1208,7 @@ def eig(a):
--------
eigvals : eigenvalues of a non-symmetric array.
- eigh : eigenvalues and eigenvectors of a real symmetric or complex
+ eigh : eigenvalues and eigenvectors of a real symmetric or complex
Hermitian (conjugate symmetric) array.
eigvalsh : eigenvalues of a real symmetric or complex Hermitian
@@ -1183,7 +1222,7 @@ def eig(a):
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
- This is implemented using the _geev LAPACK routines which compute
+ This is implemented using the ``_geev`` LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
@@ -1219,29 +1258,29 @@ def eig(a):
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
- array([ 1., 2., 3.])
- array([[ 1., 0., 0.],
- [ 0., 1., 0.],
- [ 0., 0., 1.]])
+ array([1., 2., 3.])
+ array([[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
- array([ 1. + 1.j, 1. - 1.j])
- array([[ 0.70710678+0.j , 0.70710678+0.j ],
- [ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
+ array([1.+1.j, 1.-1.j])
+ array([[0.70710678+0.j , 0.70710678-0.j ],
+ [0. -0.70710678j, 0. +0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
- note that a.conj().T = a, i.e., a is Hermitian.
+ note that ``a.conj().T == a``, i.e., `a` is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
- array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
- array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
- [ 0.70710678+0.j , 0.00000000+0.70710678j]])
+ array([2.+0.j, 0.+0.j])
+ array([[ 0. +0.70710678j, 0.70710678+0.j ], # may vary
+ [ 0.70710678+0.j , -0. +0.70710678j]])
Be careful about round-off error!
@@ -1249,9 +1288,9 @@ def eig(a):
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
- array([ 1., 1.])
- array([[ 1., 0.],
- [ 0., 1.]])
+ array([1., 1.])
+ array([[1., 0.],
+ [0., 1.]])
"""
a, wrap = _makearray(a)
@@ -1276,6 +1315,7 @@ def eig(a):
return w.astype(result_t, copy=False), wrap(vt)
+@array_function_dispatch(_eigvalsh_dispatcher)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a complex Hermitian
@@ -1328,8 +1368,8 @@ def eigh(a, UPLO='L'):
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
- The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
- _heevd
+ The eigenvalues/eigenvectors are computed using LAPACK routines ``_syevd``,
+ ``_heevd``.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
@@ -1346,49 +1386,49 @@ def eigh(a, UPLO='L'):
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
- array([[ 1.+0.j, 0.-2.j],
+ array([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
- array([ 0.17157288, 5.82842712])
- array([[-0.92387953+0.j , -0.38268343+0.j ],
- [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
+ array([0.17157288, 5.82842712])
+ array([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
+ [ 0. +0.38268343j, 0. -0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
- array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
+ array([5.55111512e-17+0.0000000e+00j, 0.00000000e+00+1.2490009e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
- array([ 0.+0.j, 0.+0.j])
+ array([0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
- matrix([[ 1.+0.j, 0.-2.j],
+ matrix([[ 1.+0.j, -0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
- array([ 0.17157288, 5.82842712])
- matrix([[-0.92387953+0.j , -0.38268343+0.j ],
- [ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
+ array([0.17157288, 5.82842712])
+ matrix([[-0.92387953+0.j , -0.38268343+0.j ], # may vary
+ [ 0. +0.38268343j, 0. -0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
- array([[ 5.+2.j, 9.-2.j],
- [ 0.+2.j, 2.-1.j]])
+ array([[5.+2.j, 9.-2.j],
+ [0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
- array([[ 5.+0.j, 0.-2.j],
- [ 0.+2.j, 2.+0.j]])
+ array([[5.+0.j, 0.-2.j],
+ [0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
- array([ 1., 6.])
- array([ 6.+0.j, 1.+0.j])
+ array([1., 6.])
+ array([6.+0.j, 1.+0.j])
>>> va; vb
- array([[-0.44721360-0.j , -0.89442719+0.j ],
- [ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
- array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
- [ 0.00000000-0.4472136j, 0.89442719+0.j ]])
+ array([[-0.4472136 +0.j , -0.89442719+0.j ], # may vary
+ [ 0. +0.89442719j, 0. -0.4472136j ]])
+ array([[ 0.89442719+0.j , -0. +0.4472136j],
+ [-0. +0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
@@ -1415,7 +1455,12 @@ def eigh(a, UPLO='L'):
# Singular value decomposition
-def svd(a, full_matrices=True, compute_uv=True):
+def _svd_dispatcher(a, full_matrices=None, compute_uv=None, hermitian=None):
+ return (a,)
+
+
+@array_function_dispatch(_svd_dispatcher)
+def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
"""
Singular Value Decomposition.
@@ -1436,6 +1481,12 @@ def svd(a, full_matrices=True, compute_uv=True):
compute_uv : bool, optional
Whether or not to compute `u` and `vh` in addition to `s`. True
by default.
+ hermitian : bool, optional
+ If True, `a` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -1539,6 +1590,24 @@ def svd(a, full_matrices=True, compute_uv=True):
"""
a, wrap = _makearray(a)
+
+ if hermitian:
+ # note: lapack returns eigenvalues in reverse order to our contract.
+ # reversing is cheap by design in numpy, so we do so to be consistent
+ if compute_uv:
+ s, u = eigh(a)
+ s = s[..., ::-1]
+ u = u[..., ::-1]
+ # singular values are unsigned, move the sign into v
+ vt = transpose(u * sign(s)[..., None, :]).conjugate()
+ s = abs(s)
+ return wrap(u), s, wrap(vt)
+ else:
+ s = eigvalsh(a)
+ s = s[..., ::-1]
+ s = abs(s)
+ return s
+
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
@@ -1575,6 +1644,11 @@ def svd(a, full_matrices=True, compute_uv=True):
return s
+def _cond_dispatcher(x, p=None):
+ return (x,)
+
+
+@array_function_dispatch(_cond_dispatcher)
def cond(x, p=None):
"""
Compute the condition number of a matrix.
@@ -1649,9 +1723,9 @@ def cond(x, p=None):
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
- 0.70710678118654746
- >>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
- 0.70710678118654746
+ 0.70710678118654746 # may vary
+ >>> min(LA.svd(a, compute_uv=False))*min(LA.svd(LA.inv(a), compute_uv=False))
+ 0.70710678118654746 # may vary
"""
x = asarray(x) # in case we have a matrix
@@ -1692,6 +1766,11 @@ def cond(x, p=None):
return r
+def _matrix_rank_dispatcher(M, tol=None, hermitian=None):
+ return (M,)
+
+
+@array_function_dispatch(_matrix_rank_dispatcher)
def matrix_rank(M, tol=None, hermitian=False):
"""
Return matrix rank of array using SVD method
@@ -1705,9 +1784,9 @@ def matrix_rank(M, tol=None, hermitian=False):
Parameters
----------
M : {(M,), (..., M, N)} array_like
- input vector or stack of matrices
+ Input vector or stack of matrices.
tol : (...) array_like, float, optional
- threshold below which SVD values are considered zero. If `tol` is
+ Threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
@@ -1721,6 +1800,11 @@ def matrix_rank(M, tol=None, hermitian=False):
.. versionadded:: 1.14
+ Returns
+ -------
+ rank : (...) array_like
+ Rank of M.
+
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
@@ -1783,10 +1867,7 @@ def matrix_rank(M, tol=None, hermitian=False):
M = asarray(M)
if M.ndim < 2:
return int(not all(M==0))
- if hermitian:
- S = abs(eigvalsh(M))
- else:
- S = svd(M, compute_uv=False)
+ S = svd(M, compute_uv=False, hermitian=hermitian)
if tol is None:
tol = S.max(axis=-1, keepdims=True) * max(M.shape[-2:]) * finfo(S.dtype).eps
else:
@@ -1796,7 +1877,12 @@ def matrix_rank(M, tol=None, hermitian=False):
# Generalized inverse
-def pinv(a, rcond=1e-15 ):
+def _pinv_dispatcher(a, rcond=None, hermitian=None):
+ return (a,)
+
+
+@array_function_dispatch(_pinv_dispatcher)
+def pinv(a, rcond=1e-15, hermitian=False):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
@@ -1813,9 +1899,15 @@ def pinv(a, rcond=1e-15 ):
Matrix or stack of matrices to be pseudo-inverted.
rcond : (...) array_like of float
Cutoff for small singular values.
- Singular values smaller (in modulus) than
- `rcond` * largest_singular_value (again, in modulus)
- are set to zero. Broadcasts against the stack of matrices
+ Singular values less than or equal to
+ ``rcond * largest_singular_value`` are set to zero.
+ Broadcasts against the stack of matrices.
+ hermitian : bool, optional
+ If True, `a` is assumed to be Hermitian (symmetric if real-valued),
+ enabling a more efficient method for finding singular values.
+ Defaults to False.
+
+ .. versionadded:: 1.17.0
Returns
-------
@@ -1869,7 +1961,7 @@ def pinv(a, rcond=1e-15 ):
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
- u, s, vt = svd(a, full_matrices=False)
+ u, s, vt = svd(a, full_matrices=False, hermitian=hermitian)
# discard small singular values
cutoff = rcond[..., newaxis] * amax(s, axis=-1, keepdims=True)
@@ -1880,8 +1972,11 @@ def pinv(a, rcond=1e-15 ):
res = matmul(transpose(vt), multiply(s[..., newaxis], transpose(u)))
return wrap(res)
+
# Determinant
+
+@array_function_dispatch(_unary_dispatcher)
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
@@ -1923,7 +2018,7 @@ def slogdet(a):
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
- routine z/dgetrf.
+ routine ``z/dgetrf``.
Examples
@@ -1933,7 +2028,7 @@ def slogdet(a):
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
- (-1, 0.69314718055994529)
+ (-1, 0.69314718055994529) # may vary
>>> sign * np.exp(logdet)
-2.0
@@ -1967,6 +2062,8 @@ def slogdet(a):
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
+
+@array_function_dispatch(_unary_dispatcher)
def det(a):
"""
Compute the determinant of an array.
@@ -1995,7 +2092,7 @@ def det(a):
details.
The determinant is computed via LU factorization using the LAPACK
- routine z/dgetrf.
+ routine ``z/dgetrf``.
Examples
--------
@@ -2003,7 +2100,7 @@ def det(a):
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
- -2.0
+ -2.0 # may vary
Computing determinants for a stack of matrices:
@@ -2023,19 +2120,25 @@ def det(a):
r = r.astype(result_t, copy=False)
return r
+
# Linear Least Squares
+def _lstsq_dispatcher(a, b, rcond=None):
+ return (a, b)
+
+
+@array_function_dispatch(_lstsq_dispatcher)
def lstsq(a, b, rcond="warn"):
- """
+ r"""
Return the least-squares solution to a linear matrix equation.
- Solves the equation `a x = b` by computing a vector `x` that
- minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
- be under-, well-, or over- determined (i.e., the number of
- linearly independent rows of `a` can be less than, equal to, or
- greater than its number of linearly independent columns). If `a`
- is square and of full rank, then `x` (but for round-off error) is
- the "exact" solution of the equation.
+ Solves the equation :math:`a x = b` by computing a vector `x` that
+ minimizes the squared Euclidean 2-norm :math:`\| b - a x \|^2_2`.
+ The equation may be under-, well-, or over-determined (i.e., the
+ number of linearly independent rows of `a` can be less than, equal
+ to, or greater than its number of linearly independent columns).
+ If `a` is square and of full rank, then `x` (but for round-off error)
+ is the "exact" solution of the equation.
Parameters
----------
@@ -2104,15 +2207,15 @@ def lstsq(a, b, rcond="warn"):
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y, rcond=None)[0]
- >>> print(m, c)
- 1.0 -0.95
+ >>> m, c
+ (1.0 -0.95) # may vary
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
- >>> plt.plot(x, y, 'o', label='Original data', markersize=10)
- >>> plt.plot(x, m*x + c, 'r', label='Fitted line')
- >>> plt.legend()
+ >>> _ = plt.plot(x, y, 'o', label='Original data', markersize=10)
+ >>> _ = plt.plot(x, m*x + c, 'r', label='Fitted line')
+ >>> _ = plt.legend()
>>> plt.show()
"""
@@ -2128,6 +2231,7 @@ def lstsq(a, b, rcond="warn"):
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
+ # FIXME: real_t is unused
real_t = _linalgRealType(t)
result_real_t = _realType(result_t)
@@ -2140,7 +2244,7 @@ def lstsq(a, b, rcond="warn"):
"To use the future default and silence this warning "
"we advise to pass `rcond=None`, to keep using the old, "
"explicitly pass `rcond=-1`.",
- FutureWarning, stacklevel=2)
+ FutureWarning, stacklevel=3)
rcond = -1
if rcond is None:
rcond = finfo(t).eps * max(n, m)
@@ -2183,7 +2287,7 @@ def lstsq(a, b, rcond="warn"):
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
- This is a private utility function used by numpy.linalg.norm().
+ This is a private utility function used by `numpy.linalg.norm()`.
Parameters
----------
@@ -2191,7 +2295,7 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
- This should be either numpy.amin or numpy.amax or numpy.sum.
+ This should be either numpy.amin or `numpy.amax` or `numpy.sum`.
Returns
-------
@@ -2204,10 +2308,15 @@ def _multi_svd_norm(x, row_axis, col_axis, op):
"""
y = moveaxis(x, (row_axis, col_axis), (-2, -1))
- result = op(svd(y, compute_uv=0), axis=-1)
+ result = op(svd(y, compute_uv=False), axis=-1)
return result
+def _norm_dispatcher(x, ord=None, axis=None, keepdims=None):
+ return (x,)
+
+
+@array_function_dispatch(_norm_dispatcher)
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
@@ -2284,7 +2393,7 @@ def norm(x, ord=None, axis=None, keepdims=False):
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
- array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
+ array([-4, -3, -2, ..., 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
@@ -2320,13 +2429,13 @@ def norm(x, ord=None, axis=None, keepdims=False):
7.3484692283495345
>>> LA.norm(a, -2)
- nan
+ 0.0
>>> LA.norm(b, -2)
- 1.8570331885190563e-016
+ 1.8570331885190563e-016 # may vary
>>> LA.norm(a, 3)
- 5.8480354764257312
+ 5.8480354764257312 # may vary
>>> LA.norm(a, -3)
- nan
+ 0.0
Using the `axis` argument to compute vector norms:
@@ -2450,6 +2559,11 @@ def norm(x, ord=None, axis=None, keepdims=False):
# multi_dot
+def _multidot_dispatcher(arrays):
+ return arrays
+
+
+@array_function_dispatch(_multidot_dispatcher)
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
@@ -2496,18 +2610,18 @@ def multi_dot(arrays):
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
- >>> A = np.random.random(10000, 100)
- >>> B = np.random.random(100, 1000)
- >>> C = np.random.random(1000, 5)
- >>> D = np.random.random(5, 333)
+ >>> A = np.random.random((10000, 100))
+ >>> B = np.random.random((100, 1000))
+ >>> C = np.random.random((1000, 5))
+ >>> D = np.random.random((5, 333))
>>> # the actual dot multiplication
- >>> multi_dot([A, B, C, D])
+ >>> _ = multi_dot([A, B, C, D])
instead of::
- >>> np.dot(np.dot(np.dot(A, B), C), D)
+ >>> _ = np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
- >>> A.dot(B).dot(C).dot(D)
+ >>> _ = A.dot(B).dot(C).dot(D)
Notes
-----
@@ -2517,7 +2631,7 @@ def multi_dot(arrays):
def cost(A, B):
return A.shape[0] * A.shape[1] * B.shape[1]
- Let's assume we have three matrices
+ Assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}`.
The costs for the two different parenthesizations are as follows::
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 320d123e7..173e81e9c 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -13,13 +13,14 @@ import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
-from numpy import multiply, atleast_2d, inf, asarray, matrix
+from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
- assert_almost_equal, assert_allclose, suppress_warnings
+ assert_almost_equal, assert_allclose, suppress_warnings,
+ assert_raises_regex,
)
@@ -632,18 +633,9 @@ class TestEig(EigCases):
assert_(isinstance(a, np.ndarray))
-class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+class SVDBaseTests(object):
+ hermitian = False
- def do(self, a, b, tags):
- u, s, vt = linalg.svd(a, 0)
- assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
- np.asarray(vt)),
- rtol=get_rtol(u.dtype))
- assert_(consistent_subclass(u, a))
- assert_(consistent_subclass(vt, a))
-
-
-class TestSVD(SVDCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
@@ -651,24 +643,52 @@ class TestSVD(SVDCases):
assert_equal(u.dtype, dtype)
assert_equal(s.dtype, get_real_dtype(dtype))
assert_equal(vh.dtype, dtype)
- s = linalg.svd(x, compute_uv=False)
+ s = linalg.svd(x, compute_uv=False, hermitian=self.hermitian)
assert_equal(s.dtype, get_real_dtype(dtype))
+
+class SVDCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
+
+ def do(self, a, b, tags):
+ u, s, vt = linalg.svd(a, False)
+ assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
+ np.asarray(vt)),
+ rtol=get_rtol(u.dtype))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+
+class TestSVD(SVDCases, SVDBaseTests):
def test_empty_identity(self):
""" Empty input should put an identity matrix in u or vh """
x = np.empty((4, 0))
- u, s, vh = linalg.svd(x, compute_uv=True)
+ u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (4, 4))
assert_equal(vh.shape, (0, 0))
assert_equal(u, np.eye(4))
x = np.empty((0, 4))
- u, s, vh = linalg.svd(x, compute_uv=True)
+ u, s, vh = linalg.svd(x, compute_uv=True, hermitian=self.hermitian)
assert_equal(u.shape, (0, 0))
assert_equal(vh.shape, (4, 4))
assert_equal(vh, np.eye(4))
+class SVDHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
+
+ def do(self, a, b, tags):
+ u, s, vt = linalg.svd(a, False, hermitian=True)
+ assert_allclose(a, dot_generalized(np.asarray(u) * np.asarray(s)[..., None, :],
+ np.asarray(vt)),
+ rtol=get_rtol(u.dtype))
+ assert_(consistent_subclass(u, a))
+ assert_(consistent_subclass(vt, a))
+
+
+class TestSVDHermitian(SVDHermitianCases, SVDBaseTests):
+ hermitian = True
+
+
class CondCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# cond(x, p) for p in (None, 2, -2)
@@ -796,6 +816,20 @@ class TestPinv(PinvCases):
pass
+class PinvHermitianCases(HermitianTestCase, HermitianGeneralizedTestCase):
+
+ def do(self, a, b, tags):
+ a_ginv = linalg.pinv(a, hermitian=True)
+ # `a @ a_ginv == I` does not hold if a is singular
+ dot = dot_generalized
+ assert_almost_equal(dot(dot(a, a_ginv), a), a, single_decimal=5, double_decimal=11)
+ assert_(consistent_subclass(a_ginv, a))
+
+
+class TestPinvHermitian(PinvHermitianCases):
+ pass
+
+
class DetCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
def do(self, a, b, tags):
@@ -863,7 +897,7 @@ class LstsqCases(LinalgSquareTestCase, LinalgNonsquareTestCase):
def do(self, a, b, tags):
arr = np.asarray(a)
m, n = arr.shape
- u, s, vt = linalg.svd(a, 0)
+ u, s, vt = linalg.svd(a, False)
x, residuals, rank, sv = linalg.lstsq(a, b, rcond=-1)
if m == 0:
assert_((x == 0).all())
@@ -931,6 +965,14 @@ class TestLstsq(LstsqCases):
assert_equal(rank, min(m, n))
assert_equal(s.shape, (min(m, n),))
+ def test_incompatible_dims(self):
+ # use modified version of docstring example
+ x = np.array([0, 1, 2, 3])
+ y = np.array([-1, 0.2, 0.9, 2.1, 3.3])
+ A = np.vstack([x, np.ones(len(x))]).T
+ with assert_raises_regex(LinAlgError, "Incompatible dimensions"):
+ linalg.lstsq(A, y, rcond=None)
+
@pytest.mark.parametrize('dt', [np.dtype(c) for c in '?bBhHiIqQefdgFDGO'])
class TestMatrixPower(object):
@@ -946,7 +988,6 @@ class TestMatrixPower(object):
dtnoinv = [object, np.dtype('e'), np.dtype('g'), np.dtype('G')]
def test_large_power(self, dt):
- power = matrix_power
rshft = self.rshft_1.astype(dt)
assert_equal(
matrix_power(rshft, 2**100 + 2**10 + 2**5 + 0), self.rshft_0)
@@ -1610,8 +1651,6 @@ class TestQR(object):
def test_qr_empty(self, m, n):
k = min(m, n)
a = np.empty((m, n))
- a_type = type(a)
- a_dtype = a.dtype
self.check_qr(a)
@@ -1835,6 +1874,14 @@ class TestMultiDot(object):
assert_almost_equal(multi_dot([A, B, C]), A.dot(B).dot(C))
assert_almost_equal(multi_dot([A, B, C]), np.dot(A, np.dot(B, C)))
+ def test_basic_function_with_two_arguments(self):
+ # separate code path with two arguments
+ A = np.random.random((6, 2))
+ B = np.random.random((2, 6))
+
+ assert_almost_equal(multi_dot([A, B]), A.dot(B))
+ assert_almost_equal(multi_dot([A, B]), np.dot(A, B))
+
def test_basic_function_with_dynamic_programing_optimization(self):
# multi_dot with four or more arguments uses the dynamic programing
# optimization and therefore deserve a separate
@@ -1907,3 +1954,51 @@ class TestMultiDot(object):
def test_too_few_input_arrays(self):
assert_raises(ValueError, multi_dot, [])
assert_raises(ValueError, multi_dot, [np.random.random((3, 3))])
+
+
+class TestTensorinv(object):
+
+ @pytest.mark.parametrize("arr, ind", [
+ (np.ones((4, 6, 8, 2)), 2),
+ (np.ones((3, 3, 2)), 1),
+ ])
+ def test_non_square_handling(self, arr, ind):
+ with assert_raises(LinAlgError):
+ linalg.tensorinv(arr, ind=ind)
+
+ @pytest.mark.parametrize("shape, ind", [
+ # examples from docstring
+ ((4, 6, 8, 3), 2),
+ ((24, 8, 3), 1),
+ ])
+ def test_tensorinv_shape(self, shape, ind):
+ a = np.eye(24)
+ a.shape = shape
+ ainv = linalg.tensorinv(a=a, ind=ind)
+ expected = a.shape[ind:] + a.shape[:ind]
+ actual = ainv.shape
+ assert_equal(actual, expected)
+
+ @pytest.mark.parametrize("ind", [
+ 0, -2,
+ ])
+ def test_tensorinv_ind_limit(self, ind):
+ a = np.eye(24)
+ a.shape = (4, 6, 8, 3)
+ with assert_raises(ValueError):
+ linalg.tensorinv(a=a, ind=ind)
+
+ def test_tensorinv_result(self):
+ # mimic a docstring example
+ a = np.eye(24)
+ a.shape = (24, 8, 3)
+ ainv = linalg.tensorinv(a, ind=1)
+ b = np.ones(24)
+ assert_allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
+
+
+def test_unsupported_commontype():
+ # linalg gracefully handles unsupported type
+ arr = np.array([[1, -2], [2, 5]], dtype='float16')
+ with assert_raises_regex(TypeError, "unsupported in linalg"):
+ linalg.cholesky(arr)
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index 9fc68a7aa..ee103c327 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -2522,8 +2522,6 @@ init_@lapack_func@(GESDD_PARAMS_t *params,
params->VT = vt;
params->RWORK = NULL;
params->IWORK = iwork;
- params->M = m;
- params->N = n;
params->LDA = ld;
params->LDU = ld;
params->LDVT = vt_column_count;
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index a6c3e64d6..bb3788c9a 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -44,16 +44,11 @@ from numpy.compat import (
getargspec, formatargspec, long, basestring, unicode, bytes
)
from numpy import expand_dims
-from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
from numpy.core._internal import recursive
+from numpy.compat import pickle
-if sys.version_info[0] >= 3:
- import pickle
-else:
- import cPickle as pickle
-
__all__ = [
'MAError', 'MaskError', 'MaskType', 'MaskedArray', 'abs', 'absolute',
'add', 'all', 'allclose', 'allequal', 'alltrue', 'amax', 'amin',
@@ -64,14 +59,14 @@ __all__ = [
'choose', 'clip', 'common_fill_value', 'compress', 'compressed',
'concatenate', 'conjugate', 'convolve', 'copy', 'correlate', 'cos', 'cosh',
'count', 'cumprod', 'cumsum', 'default_fill_value', 'diag', 'diagonal',
- 'diff', 'divide', 'dump', 'dumps', 'empty', 'empty_like', 'equal', 'exp',
+ 'diff', 'divide', 'empty', 'empty_like', 'equal', 'exp',
'expand_dims', 'fabs', 'filled', 'fix_invalid', 'flatten_mask',
'flatten_structured_array', 'floor', 'floor_divide', 'fmod',
'frombuffer', 'fromflex', 'fromfunction', 'getdata', 'getmask',
'getmaskarray', 'greater', 'greater_equal', 'harden_mask', 'hypot',
'identity', 'ids', 'indices', 'inner', 'innerproduct', 'isMA',
'isMaskedArray', 'is_mask', 'is_masked', 'isarray', 'left_shift',
- 'less', 'less_equal', 'load', 'loads', 'log', 'log10', 'log2',
+ 'less', 'less_equal', 'log', 'log10', 'log2',
'logical_and', 'logical_not', 'logical_or', 'logical_xor', 'make_mask',
'make_mask_descr', 'make_mask_none', 'mask_or', 'masked',
'masked_array', 'masked_equal', 'masked_greater',
@@ -82,7 +77,7 @@ __all__ = [
'maximum_fill_value', 'mean', 'min', 'minimum', 'minimum_fill_value',
'mod', 'multiply', 'mvoid', 'ndim', 'negative', 'nomask', 'nonzero',
'not_equal', 'ones', 'outer', 'outerproduct', 'power', 'prod',
- 'product', 'ptp', 'put', 'putmask', 'rank', 'ravel', 'remainder',
+ 'product', 'ptp', 'put', 'putmask', 'ravel', 'remainder',
'repeat', 'reshape', 'resize', 'right_shift', 'round', 'round_',
'set_fill_value', 'shape', 'sin', 'sinh', 'size', 'soften_mask',
'sometrue', 'sort', 'sqrt', 'squeeze', 'std', 'subtract', 'sum',
@@ -450,36 +445,37 @@ def _check_fill_value(fill_value, ndtype):
If fill_value is not None, its value is forced to the given dtype.
The result is always a 0d array.
+
"""
ndtype = np.dtype(ndtype)
- fields = ndtype.fields
if fill_value is None:
fill_value = default_fill_value(ndtype)
- elif fields:
- fdtype = [(_[0], _[1]) for _ in ndtype.descr]
+ elif ndtype.names is not None:
if isinstance(fill_value, (ndarray, np.void)):
try:
- fill_value = np.array(fill_value, copy=False, dtype=fdtype)
+ fill_value = np.array(fill_value, copy=False, dtype=ndtype)
except ValueError:
err_msg = "Unable to transform %s to dtype %s"
- raise ValueError(err_msg % (fill_value, fdtype))
+ raise ValueError(err_msg % (fill_value, ndtype))
else:
fill_value = np.asarray(fill_value, dtype=object)
fill_value = np.array(_recursive_set_fill_value(fill_value, ndtype),
dtype=ndtype)
else:
if isinstance(fill_value, basestring) and (ndtype.char not in 'OSVU'):
+ # Note this check doesn't work if fill_value is not a scalar
err_msg = "Cannot set fill value of string with array of dtype %s"
raise TypeError(err_msg % ndtype)
else:
# In case we want to convert 1e20 to int.
+ # Also in case of converting string arrays.
try:
fill_value = np.array(fill_value, copy=False, dtype=ndtype)
- except OverflowError:
- # Raise TypeError instead of OverflowError. OverflowError
- # is seldom used, and the real problem here is that the
- # passed fill_value is not compatible with the ndtype.
- err_msg = "Fill value %s overflows dtype %s"
+ except (OverflowError, ValueError):
+ # Raise TypeError instead of OverflowError or ValueError.
+ # OverflowError is seldom used, and the real problem here is
+ # that the passed fill_value is not compatible with the ndtype.
+ err_msg = "Cannot convert fill_value %s to dtype %s"
raise TypeError(err_msg % (fill_value, ndtype))
return np.array(fill_value)
@@ -519,18 +515,18 @@ def set_fill_value(a, fill_value):
array([0, 1, 2, 3, 4])
>>> a = ma.masked_where(a < 3, a)
>>> a
- masked_array(data = [-- -- -- 3 4],
- mask = [ True True True False False],
- fill_value=999999)
+ masked_array(data=[--, --, --, 3, 4],
+ mask=[ True, True, True, False, False],
+ fill_value=999999)
>>> ma.set_fill_value(a, -999)
>>> a
- masked_array(data = [-- -- -- 3 4],
- mask = [ True True True False False],
- fill_value=-999)
+ masked_array(data=[--, --, --, 3, 4],
+ mask=[ True, True, True, False, False],
+ fill_value=-999)
Nothing happens if `a` is not a masked array.
- >>> a = range(5)
+ >>> a = list(range(5))
>>> a
[0, 1, 2, 3, 4]
>>> ma.set_fill_value(a, 100)
@@ -692,13 +688,12 @@ def getdata(a, subok=True):
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
- masked_array(data =
- [[1 --]
- [3 4]],
- mask =
- [[False True]
- [False False]],
- fill_value=999999)
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=2)
>>> ma.getdata(a)
array([[1, 2],
[3, 4]])
@@ -755,20 +750,19 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
--------
>>> x = np.ma.array([1., -1, np.nan, np.inf], mask=[1] + [0]*3)
>>> x
- masked_array(data = [-- -1.0 nan inf],
- mask = [ True False False False],
- fill_value = 1e+20)
+ masked_array(data=[--, -1.0, nan, inf],
+ mask=[ True, False, False, False],
+ fill_value=1e+20)
>>> np.ma.fix_invalid(x)
- masked_array(data = [-- -1.0 -- --],
- mask = [ True False True True],
- fill_value = 1e+20)
+ masked_array(data=[--, -1.0, --, --],
+ mask=[ True, False, True, True],
+ fill_value=1e+20)
>>> fixed = np.ma.fix_invalid(x)
>>> fixed.data
- array([ 1.00000000e+00, -1.00000000e+00, 1.00000000e+20,
- 1.00000000e+20])
+ array([ 1.e+00, -1.e+00, 1.e+20, 1.e+20])
>>> x.data
- array([ 1., -1., NaN, Inf])
+ array([ 1., -1., nan, inf])
"""
a = masked_array(a, copy=copy, mask=mask, subok=True)
@@ -781,6 +775,10 @@ def fix_invalid(a, mask=nomask, copy=True, fill_value=None):
a._data[invalid] = fill_value
return a
+def is_string_or_list_of_strings(val):
+ return (isinstance(val, basestring) or
+ (isinstance(val, list) and val and
+ builtins.all(isinstance(s, basestring) for s in val)))
###############################################################################
# Ufuncs #
@@ -802,7 +800,7 @@ class _DomainCheckInterval(object):
def __init__(self, a, b):
"domain_check_interval(a,b)(x) = true where x < a or y > b"
- if (a > b):
+ if a > b:
(a, b) = (b, a)
self.a = a
self.b = b
@@ -1062,7 +1060,7 @@ class _MaskedBinaryOperation(_MaskedUFunc):
if t.shape == ():
t = t.reshape(1)
if m is not nomask:
- m = make_mask(m, copy=1)
+ m = make_mask(m, copy=True)
m.shape = (1,)
if m is nomask:
@@ -1167,7 +1165,7 @@ class _DomainedBinaryOperation(_MaskedUFunc):
if domain is not None:
m |= domain(da, db)
# Take care of the scalar case first
- if (not m.ndim):
+ if not m.ndim:
if m:
return masked
else:
@@ -1199,7 +1197,6 @@ exp = _MaskedUnaryOperation(umath.exp)
conjugate = _MaskedUnaryOperation(umath.conjugate)
sin = _MaskedUnaryOperation(umath.sin)
cos = _MaskedUnaryOperation(umath.cos)
-tan = _MaskedUnaryOperation(umath.tan)
arctan = _MaskedUnaryOperation(umath.arctan)
arcsinh = _MaskedUnaryOperation(umath.arcsinh)
sinh = _MaskedUnaryOperation(umath.sinh)
@@ -1345,9 +1342,9 @@ def make_mask_descr(ndtype):
--------
>>> import numpy.ma as ma
>>> dtype = np.dtype({'names':['foo', 'bar'],
- 'formats':[np.float32, int]})
+ ... 'formats':[np.float32, np.int64]})
>>> dtype
- dtype([('foo', '<f4'), ('bar', '<i4')])
+ dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_descr(dtype)
dtype([('foo', '|b1'), ('bar', '|b1')])
>>> ma.make_mask_descr(np.float32)
@@ -1380,13 +1377,12 @@ def getmask(a):
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
- masked_array(data =
- [[1 --]
- [3 4]],
- mask =
- [[False True]
- [False False]],
- fill_value=999999)
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=2)
>>> ma.getmask(a)
array([[False, True],
[False, False]])
@@ -1401,12 +1397,11 @@ def getmask(a):
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
- masked_array(data =
- [[1 2]
- [3 4]],
- mask =
- False,
- fill_value=999999)
+ masked_array(
+ data=[[1, 2],
+ [3, 4]],
+ mask=False,
+ fill_value=999999)
>>> ma.nomask
False
>>> ma.getmask(b) == ma.nomask
@@ -1444,13 +1439,12 @@ def getmaskarray(arr):
>>> import numpy.ma as ma
>>> a = ma.masked_equal([[1,2],[3,4]], 2)
>>> a
- masked_array(data =
- [[1 --]
- [3 4]],
- mask =
- [[False True]
- [False False]],
- fill_value=999999)
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=2)
>>> ma.getmaskarray(a)
array([[False, True],
[False, False]])
@@ -1459,13 +1453,12 @@ def getmaskarray(arr):
>>> b = ma.masked_array([[1,2],[3,4]])
>>> b
- masked_array(data =
- [[1 2]
- [3 4]],
- mask =
- False,
- fill_value=999999)
- >>> >ma.getmaskarray(b)
+ masked_array(
+ data=[[1, 2],
+ [3, 4]],
+ mask=False,
+ fill_value=999999)
+ >>> ma.getmaskarray(b)
array([[False, False],
[False, False]])
@@ -1503,9 +1496,9 @@ def is_mask(m):
>>> import numpy.ma as ma
>>> m = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> m
- masked_array(data = [-- 1 -- 2 3],
- mask = [ True False True False False],
- fill_value=999999)
+ masked_array(data=[--, 1, --, 2, 3],
+ mask=[ True, False, True, False, False],
+ fill_value=0)
>>> ma.is_mask(m)
False
>>> ma.is_mask(m.mask)
@@ -1526,14 +1519,14 @@ def is_mask(m):
Arrays with complex dtypes don't return True.
>>> dtype = np.dtype({'names':['monty', 'pithon'],
- 'formats':[bool, bool]})
+ ... 'formats':[bool, bool]})
>>> dtype
dtype([('monty', '|b1'), ('pithon', '|b1')])
>>> m = np.array([(True, False), (False, True), (True, False)],
- dtype=dtype)
+ ... dtype=dtype)
>>> m
- array([(True, False), (False, True), (True, False)],
- dtype=[('monty', '|b1'), ('pithon', '|b1')])
+ array([( True, False), (False, True), ( True, False)],
+ dtype=[('monty', '?'), ('pithon', '?')])
>>> ma.is_mask(m)
False
@@ -1561,7 +1554,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
Return `m` as a boolean mask, creating a copy if necessary or requested.
The function can accept any sequence that is convertible to integers,
or ``nomask``. Does not require that contents must be 0s and 1s, values
- of 0 are interepreted as False, everything else as True.
+ of 0 are interpreted as False, everything else as True.
Parameters
----------
@@ -1599,7 +1592,7 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
>>> m = np.zeros(4)
>>> m
- array([ 0., 0., 0., 0.])
+ array([0., 0., 0., 0.])
>>> ma.make_mask(m)
False
>>> ma.make_mask(m, shrink=False)
@@ -1615,11 +1608,11 @@ def make_mask(m, copy=False, shrink=True, dtype=MaskType):
>>> arr
[(1, 0), (0, 1), (1, 0), (1, 0)]
>>> dtype = np.dtype({'names':['man', 'mouse'],
- 'formats':[int, int]})
+ ... 'formats':[np.int64, np.int64]})
>>> arr = np.array(arr, dtype=dtype)
>>> arr
array([(1, 0), (0, 1), (1, 0), (1, 0)],
- dtype=[('man', '<i4'), ('mouse', '<i4')])
+ dtype=[('man', '<i8'), ('mouse', '<i8')])
>>> ma.make_mask(arr, dtype=dtype)
array([(True, False), (False, True), (True, False), (True, False)],
dtype=[('man', '|b1'), ('mouse', '|b1')])
@@ -1678,9 +1671,9 @@ def make_mask_none(newshape, dtype=None):
Defining a more complex dtype.
>>> dtype = np.dtype({'names':['foo', 'bar'],
- 'formats':[np.float32, int]})
+ ... 'formats':[np.float32, np.int64]})
>>> dtype
- dtype([('foo', '<f4'), ('bar', '<i4')])
+ dtype([('foo', '<f4'), ('bar', '<i8')])
>>> ma.make_mask_none((3,), dtype=dtype)
array([(False, False), (False, False), (False, False)],
dtype=[('foo', '|b1'), ('bar', '|b1')])
@@ -1750,7 +1743,7 @@ def mask_or(m1, m2, copy=False, shrink=True):
if m1 is m2 and is_mask(m1):
return m1
(dtype1, dtype2) = (getattr(m1, 'dtype', None), getattr(m2, 'dtype', None))
- if (dtype1 != dtype2):
+ if dtype1 != dtype2:
raise ValueError("Incompatible dtypes '%s'<>'%s'" % (dtype1, dtype2))
if dtype1.names is not None:
# Allocate an output mask array with the properly broadcast shape.
@@ -1778,16 +1771,16 @@ def flatten_mask(mask):
Examples
--------
>>> mask = np.array([0, 0, 1])
- >>> flatten_mask(mask)
+ >>> np.ma.flatten_mask(mask)
array([False, False, True])
>>> mask = np.array([(0, 0), (0, 1)], dtype=[('a', bool), ('b', bool)])
- >>> flatten_mask(mask)
+ >>> np.ma.flatten_mask(mask)
array([False, False, False, True])
>>> mdtype = [('a', bool), ('b', [('ba', bool), ('bb', bool)])]
>>> mask = np.array([(0, (0, 0)), (0, (0, 1))], dtype=mdtype)
- >>> flatten_mask(mask)
+ >>> np.ma.flatten_mask(mask)
array([False, False, False, False, False, True])
"""
@@ -1872,38 +1865,39 @@ def masked_where(condition, a, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_where(a <= 2, a)
- masked_array(data = [-- -- -- 3],
- mask = [ True True True False],
- fill_value=999999)
+ masked_array(data=[--, --, --, 3],
+ mask=[ True, True, True, False],
+ fill_value=999999)
Mask array `b` conditional on `a`.
>>> b = ['a', 'b', 'c', 'd']
>>> ma.masked_where(a == 2, b)
- masked_array(data = [a b -- d],
- mask = [False False True False],
- fill_value=N/A)
+ masked_array(data=['a', 'b', --, 'd'],
+ mask=[False, False, True, False],
+ fill_value='N/A',
+ dtype='<U1')
Effect of the `copy` argument.
>>> c = ma.masked_where(a <= 2, a)
>>> c
- masked_array(data = [-- -- -- 3],
- mask = [ True True True False],
- fill_value=999999)
+ masked_array(data=[--, --, --, 3],
+ mask=[ True, True, True, False],
+ fill_value=999999)
>>> c[0] = 99
>>> c
- masked_array(data = [99 -- -- 3],
- mask = [False True True False],
- fill_value=999999)
+ masked_array(data=[99, --, --, 3],
+ mask=[False, True, True, False],
+ fill_value=999999)
>>> a
array([0, 1, 2, 3])
>>> c = ma.masked_where(a <= 2, a, copy=False)
>>> c[0] = 99
>>> c
- masked_array(data = [99 -- -- 3],
- mask = [False True True False],
- fill_value=999999)
+ masked_array(data=[99, --, --, 3],
+ mask=[False, True, True, False],
+ fill_value=999999)
>>> a
array([99, 1, 2, 3])
@@ -1912,19 +1906,19 @@ def masked_where(condition, a, copy=True):
>>> a = np.arange(4)
>>> a = ma.masked_where(a == 2, a)
>>> a
- masked_array(data = [0 1 -- 3],
- mask = [False False True False],
- fill_value=999999)
+ masked_array(data=[0, 1, --, 3],
+ mask=[False, False, True, False],
+ fill_value=999999)
>>> b = np.arange(4)
>>> b = ma.masked_where(b == 0, b)
>>> b
- masked_array(data = [-- 1 2 3],
- mask = [ True False False False],
- fill_value=999999)
+ masked_array(data=[--, 1, 2, 3],
+ mask=[ True, False, False, False],
+ fill_value=999999)
>>> ma.masked_where(a == 3, b)
- masked_array(data = [-- 1 -- --],
- mask = [ True False True True],
- fill_value=999999)
+ masked_array(data=[--, 1, --, --],
+ mask=[ True, False, True, True],
+ fill_value=999999)
"""
# Make sure that condition is a valid standard-type mask.
@@ -1964,9 +1958,9 @@ def masked_greater(x, value, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater(a, 2)
- masked_array(data = [0 1 2 --],
- mask = [False False False True],
- fill_value=999999)
+ masked_array(data=[0, 1, 2, --],
+ mask=[False, False, False, True],
+ fill_value=999999)
"""
return masked_where(greater(x, value), x, copy=copy)
@@ -1990,9 +1984,9 @@ def masked_greater_equal(x, value, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_greater_equal(a, 2)
- masked_array(data = [0 1 -- --],
- mask = [False False True True],
- fill_value=999999)
+ masked_array(data=[0, 1, --, --],
+ mask=[False, False, True, True],
+ fill_value=999999)
"""
return masked_where(greater_equal(x, value), x, copy=copy)
@@ -2016,9 +2010,9 @@ def masked_less(x, value, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less(a, 2)
- masked_array(data = [-- -- 2 3],
- mask = [ True True False False],
- fill_value=999999)
+ masked_array(data=[--, --, 2, 3],
+ mask=[ True, True, False, False],
+ fill_value=999999)
"""
return masked_where(less(x, value), x, copy=copy)
@@ -2042,9 +2036,9 @@ def masked_less_equal(x, value, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_less_equal(a, 2)
- masked_array(data = [-- -- -- 3],
- mask = [ True True True False],
- fill_value=999999)
+ masked_array(data=[--, --, --, 3],
+ mask=[ True, True, True, False],
+ fill_value=999999)
"""
return masked_where(less_equal(x, value), x, copy=copy)
@@ -2068,9 +2062,9 @@ def masked_not_equal(x, value, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_not_equal(a, 2)
- masked_array(data = [-- -- 2 --],
- mask = [ True True False True],
- fill_value=999999)
+ masked_array(data=[--, --, 2, --],
+ mask=[ True, True, False, True],
+ fill_value=999999)
"""
return masked_where(not_equal(x, value), x, copy=copy)
@@ -2096,9 +2090,9 @@ def masked_equal(x, value, copy=True):
>>> a
array([0, 1, 2, 3])
>>> ma.masked_equal(a, 2)
- masked_array(data = [0 1 -- 3],
- mask = [False False True False],
- fill_value=999999)
+ masked_array(data=[0, 1, --, 3],
+ mask=[False, False, True, False],
+ fill_value=2)
"""
output = masked_where(equal(x, value), x, copy=copy)
@@ -2127,16 +2121,16 @@ def masked_inside(x, v1, v2, copy=True):
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_inside(x, -0.3, 0.3)
- masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
- mask = [False False True True False False],
- fill_value=1e+20)
+ masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
+ mask=[False, False, True, True, False, False],
+ fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_inside(x, 0.3, -0.3)
- masked_array(data = [0.31 1.2 -- -- -0.4 -1.1],
- mask = [False False True True False False],
- fill_value=1e+20)
+ masked_array(data=[0.31, 1.2, --, --, -0.4, -1.1],
+ mask=[False, False, True, True, False, False],
+ fill_value=1e+20)
"""
if v2 < v1:
@@ -2167,16 +2161,16 @@ def masked_outside(x, v1, v2, copy=True):
>>> import numpy.ma as ma
>>> x = [0.31, 1.2, 0.01, 0.2, -0.4, -1.1]
>>> ma.masked_outside(x, -0.3, 0.3)
- masked_array(data = [-- -- 0.01 0.2 -- --],
- mask = [ True True False False True True],
- fill_value=1e+20)
+ masked_array(data=[--, --, 0.01, 0.2, --, --],
+ mask=[ True, True, False, False, True, True],
+ fill_value=1e+20)
The order of `v1` and `v2` doesn't matter.
>>> ma.masked_outside(x, 0.3, -0.3)
- masked_array(data = [-- -- 0.01 0.2 -- --],
- mask = [ True True False False True True],
- fill_value=1e+20)
+ masked_array(data=[--, --, 0.01, 0.2, --, --],
+ mask=[ True, True, False, False, True, True],
+ fill_value=1e+20)
"""
if v2 < v1:
@@ -2221,20 +2215,27 @@ def masked_object(x, value, copy=True, shrink=True):
>>> food = np.array(['green_eggs', 'ham'], dtype=object)
>>> # don't eat spoiled food
>>> eat = ma.masked_object(food, 'green_eggs')
- >>> print(eat)
- [-- ham]
+ >>> eat
+ masked_array(data=[--, 'ham'],
+ mask=[ True, False],
+ fill_value='green_eggs',
+ dtype=object)
>>> # plain ol` ham is boring
>>> fresh_food = np.array(['cheese', 'ham', 'pineapple'], dtype=object)
>>> eat = ma.masked_object(fresh_food, 'green_eggs')
- >>> print(eat)
- [cheese ham pineapple]
+ >>> eat
+ masked_array(data=['cheese', 'ham', 'pineapple'],
+ mask=False,
+ fill_value='green_eggs',
+ dtype=object)
Note that `mask` is set to ``nomask`` if possible.
>>> eat
- masked_array(data = [cheese ham pineapple],
- mask = False,
- fill_value=?)
+ masked_array(data=['cheese', 'ham', 'pineapple'],
+ mask=False,
+ fill_value='green_eggs',
+ dtype=object)
"""
if isMaskedArray(x):
@@ -2289,16 +2290,16 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
>>> import numpy.ma as ma
>>> x = np.array([1, 1.1, 2, 1.1, 3])
>>> ma.masked_values(x, 1.1)
- masked_array(data = [1.0 -- 2.0 -- 3.0],
- mask = [False True False True False],
- fill_value=1.1)
+ masked_array(data=[1.0, --, 2.0, --, 3.0],
+ mask=[False, True, False, True, False],
+ fill_value=1.1)
Note that `mask` is set to ``nomask`` if possible.
>>> ma.masked_values(x, 1.5)
- masked_array(data = [ 1. 1.1 2. 1.1 3. ],
- mask = False,
- fill_value=1.5)
+ masked_array(data=[1. , 1.1, 2. , 1.1, 3. ],
+ mask=False,
+ fill_value=1.5)
For integers, the fill value will be different in general to the
result of ``masked_equal``.
@@ -2307,13 +2308,13 @@ def masked_values(x, value, rtol=1e-5, atol=1e-8, copy=True, shrink=True):
>>> x
array([0, 1, 2, 3, 4])
>>> ma.masked_values(x, 2)
- masked_array(data = [0 1 -- 3 4],
- mask = [False False True False False],
- fill_value=2)
+ masked_array(data=[0, 1, --, 3, 4],
+ mask=[False, False, True, False, False],
+ fill_value=2)
>>> ma.masked_equal(x, 2)
- masked_array(data = [0 1 -- 3 4],
- mask = [False False True False False],
- fill_value=999999)
+ masked_array(data=[0, 1, --, 3, 4],
+ mask=[False, False, True, False, False],
+ fill_value=2)
"""
xnew = filled(x, value)
@@ -2347,11 +2348,11 @@ def masked_invalid(a, copy=True):
>>> a[2] = np.NaN
>>> a[3] = np.PINF
>>> a
- array([ 0., 1., NaN, Inf, 4.])
+ array([ 0., 1., nan, inf, 4.])
>>> ma.masked_invalid(a)
- masked_array(data = [0.0 1.0 -- -- 4.0],
- mask = [False False True True False],
- fill_value=1e+20)
+ masked_array(data=[0.0, 1.0, --, --, 4.0],
+ mask=[False, False, True, True, False],
+ fill_value=1e+20)
"""
a = np.array(a, copy=copy, subok=True)
@@ -2512,7 +2513,7 @@ def flatten_structured_array(a):
--------
>>> ndtype = [('a', int), ('b', float)]
>>> a = np.array([(1, 1), (2, 2)], dtype=ndtype)
- >>> flatten_structured_array(a)
+ >>> np.ma.flatten_structured_array(a)
array([[1., 1.],
[2., 2.]])
@@ -2680,17 +2681,13 @@ class MaskedIterator(object):
--------
>>> x = np.ma.array([3, 2], mask=[0, 1])
>>> fl = x.flat
- >>> fl.next()
+ >>> next(fl)
3
- >>> fl.next()
- masked_array(data = --,
- mask = True,
- fill_value = 1e+20)
- >>> fl.next()
+ >>> next(fl)
+ masked
+ >>> next(fl)
Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- File "/home/ralf/python/numpy/numpy/ma/core.py", line 2243, in next
- d = self.dataiter.next()
+ ...
StopIteration
"""
@@ -3012,11 +3009,13 @@ class MaskedArray(ndarray):
except (TypeError, AttributeError):
# When _mask.shape is not writable (because it's a void)
pass
- # Finalize the fill_value for structured arrays
- if self.dtype.names is not None:
- if self._fill_value is None:
- self._fill_value = _check_fill_value(None, self.dtype)
- return
+
+ # Finalize the fill_value
+ if self._fill_value is not None:
+ self._fill_value = _check_fill_value(self._fill_value, self.dtype)
+ elif self.dtype.names is not None:
+ # Finalize the default fill_value for structured arrays
+ self._fill_value = _check_fill_value(None, self.dtype)
def __array_wrap__(self, obj, context=None):
"""
@@ -3076,7 +3075,7 @@ class MaskedArray(ndarray):
def view(self, dtype=None, type=None, fill_value=None):
"""
- Return a view of the MaskedArray data
+ Return a view of the MaskedArray data.
Parameters
----------
@@ -3090,6 +3089,14 @@ class MaskedArray(ndarray):
type : Python type, optional
Type of the returned view, either ndarray or a subclass. The
default None results in type preservation.
+ fill_value : scalar, optional
+ The value to use for invalid entries (None by default).
+ If None, then this argument is inferred from the passed `dtype`, or
+ in its absence the original array, as discussed in the notes below.
+
+ See Also
+ --------
+ numpy.ndarray.view : Equivalent method on ndarray object.
Notes
-----
@@ -3142,7 +3149,7 @@ class MaskedArray(ndarray):
# also make the mask be a view (so attr changes to the view's
# mask do no affect original object's mask)
# (especially important to avoid affecting np.masked singleton)
- if (getmask(output) is not nomask):
+ if getmask(output) is not nomask:
output._mask = output._mask.view()
# Make sure to reset the _fill_value if needed
@@ -3155,7 +3162,6 @@ class MaskedArray(ndarray):
else:
output.fill_value = fill_value
return output
- view.__doc__ = ndarray.view.__doc__
def __getitem__(self, indx):
"""
@@ -3244,7 +3250,7 @@ class MaskedArray(ndarray):
# Inherit attributes from self
dout._update_from(self)
# Check the fill_value
- if isinstance(indx, basestring):
+ if is_string_or_list_of_strings(indx):
if self._fill_value is not None:
dout._fill_value = self._fill_value[indx]
@@ -3381,7 +3387,7 @@ class MaskedArray(ndarray):
if mask is masked:
mask = True
- if (current_mask is nomask):
+ if current_mask is nomask:
# Make sure the mask is set
# Just don't do anything if there's nothing to do.
if mask is nomask:
@@ -3442,39 +3448,43 @@ class MaskedArray(ndarray):
_set_mask = __setmask__
- def _get_mask(self):
- """Return the current mask.
+ @property
+ def mask(self):
+ """ Current mask. """
- """
# We could try to force a reshape, but that wouldn't work in some
# cases.
- return self._mask
+ # Return a view so that the dtype and shape cannot be changed in place
+ # This still preserves nomask by identity
+ return self._mask.view()
- mask = property(fget=_get_mask, fset=__setmask__, doc="Mask")
+ @mask.setter
+ def mask(self, value):
+ self.__setmask__(value)
- def _get_recordmask(self):
+ @property
+ def recordmask(self):
"""
- Return the mask of the records.
-
- A record is masked when all the fields are masked.
+ Get or set the mask of the array if it has no named fields. For
+ structured arrays, returns a ndarray of booleans where entries are
+ ``True`` if **all** the fields are masked, ``False`` otherwise:
+ >>> x = np.ma.array([(1, 1), (2, 2), (3, 3), (4, 4), (5, 5)],
+ ... mask=[(0, 0), (1, 0), (1, 1), (0, 1), (0, 0)],
+ ... dtype=[('a', int), ('b', int)])
+ >>> x.recordmask
+ array([False, False, True, False, False])
"""
+
_mask = self._mask.view(ndarray)
if _mask.dtype.names is None:
return _mask
return np.all(flatten_structured_array(_mask), axis=-1)
- def _set_recordmask(self):
- """
- Return the mask of the records.
-
- A record is masked when all the fields are masked.
-
- """
+ @recordmask.setter
+ def recordmask(self, mask):
raise NotImplementedError("Coming soon: setting the mask per records!")
- recordmask = property(fget=_get_recordmask)
-
def harden_mask(self):
"""
Force the mask to hard.
@@ -3505,8 +3515,10 @@ class MaskedArray(ndarray):
self._hardmask = False
return self
- hardmask = property(fget=lambda self: self._hardmask,
- doc="Hardness of the mask")
+ @property
+ def hardmask(self):
+ """ Hardness of the mask """
+ return self._hardmask
def unshare_mask(self):
"""
@@ -3526,8 +3538,10 @@ class MaskedArray(ndarray):
self._sharedmask = False
return self
- sharedmask = property(fget=lambda self: self._sharedmask,
- doc="Share status of the mask (read-only).")
+ @property
+ def sharedmask(self):
+ """ Share status of the mask (read-only). """
+ return self._sharedmask
def shrink_mask(self):
"""
@@ -3548,6 +3562,11 @@ class MaskedArray(ndarray):
array([[False, False],
[False, False]])
>>> x.shrink_mask()
+ masked_array(
+ data=[[1, 2],
+ [3, 4]],
+ mask=False,
+ fill_value=999999)
>>> x.mask
False
@@ -3555,39 +3574,46 @@ class MaskedArray(ndarray):
self._mask = _shrink_mask(self._mask)
return self
- baseclass = property(fget=lambda self: self._baseclass,
- doc="Class of the underlying data (read-only).")
+ @property
+ def baseclass(self):
+ """ Class of the underlying data (read-only). """
+ return self._baseclass
def _get_data(self):
- """Return the current data, as a view of the original
- underlying data.
+ """
+ Returns the underlying data, as a view of the masked array.
+
+ If the underlying data is a subclass of :class:`numpy.ndarray`, it is
+ returned as such.
+ >>> x = np.ma.array(np.matrix([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
+ >>> x.data
+ matrix([[1, 2],
+ [3, 4]])
+
+ The type of the data can be accessed through the :attr:`baseclass`
+ attribute.
"""
return ndarray.view(self, self._baseclass)
_data = property(fget=_get_data)
data = property(fget=_get_data)
- def _get_flat(self):
- "Return a flat iterator."
+ @property
+ def flat(self):
+ """ Return a flat iterator, or set a flattened version of self to value. """
return MaskedIterator(self)
- def _set_flat(self, value):
- "Set a flattened version of self to value."
+ @flat.setter
+ def flat(self, value):
y = self.ravel()
y[:] = value
- flat = property(fget=_get_flat, fset=_set_flat,
- doc="Flat version of the array.")
-
- def get_fill_value(self):
+ @property
+ def fill_value(self):
"""
- Return the filling value of the masked array.
-
- Returns
- -------
- fill_value : scalar
- The filling value.
+ The filling value of the masked array is a scalar. When setting, None
+ will set to a default based on the data type.
Examples
--------
@@ -3600,8 +3626,17 @@ class MaskedArray(ndarray):
(1e+20+0j)
>>> x = np.ma.array([0, 1.], fill_value=-np.inf)
- >>> x.get_fill_value()
+ >>> x.fill_value
-inf
+ >>> x.fill_value = np.pi
+ >>> x.fill_value
+ 3.1415926535897931 # may vary
+
+ Reset to default:
+
+ >>> x.fill_value = None
+ >>> x.fill_value
+ 1e+20
"""
if self._fill_value is None:
@@ -3615,36 +3650,8 @@ class MaskedArray(ndarray):
return self._fill_value[()]
return self._fill_value
- def set_fill_value(self, value=None):
- """
- Set the filling value of the masked array.
-
- Parameters
- ----------
- value : scalar, optional
- The new filling value. Default is None, in which case a default
- based on the data type is used.
-
- See Also
- --------
- ma.set_fill_value : Equivalent function.
-
- Examples
- --------
- >>> x = np.ma.array([0, 1.], fill_value=-np.inf)
- >>> x.fill_value
- -inf
- >>> x.set_fill_value(np.pi)
- >>> x.fill_value
- 3.1415926535897931
-
- Reset to default:
-
- >>> x.set_fill_value()
- >>> x.fill_value
- 1e+20
-
- """
+ @fill_value.setter
+ def fill_value(self, value=None):
target = _check_fill_value(value, self.dtype)
_fill_value = self._fill_value
if _fill_value is None:
@@ -3654,8 +3661,9 @@ class MaskedArray(ndarray):
# Don't overwrite the attribute, just fill it (for propagation)
_fill_value[()] = target
- fill_value = property(fget=get_fill_value, fset=set_fill_value,
- doc="Filling value.")
+ # kept for compatibility
+ get_fill_value = fill_value.fget
+ set_fill_value = fill_value.fset
def filled(self, fill_value=None):
"""
@@ -3685,9 +3693,9 @@ class MaskedArray(ndarray):
--------
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
- array([1, 2, -999, 4, -999])
+ array([ 1, 2, -999, 4, -999])
>>> type(x.filled())
- <type 'numpy.ndarray'>
+ <class 'numpy.ndarray'>
Subclassing is preserved. This means that if, e.g., the data part of
the masked array is a recarray, `filled` returns a recarray:
@@ -3752,7 +3760,7 @@ class MaskedArray(ndarray):
>>> x.compressed()
array([0, 1])
>>> type(x.compressed())
- <type 'numpy.ndarray'>
+ <class 'numpy.ndarray'>
"""
data = ndarray.ravel(self._data)
@@ -3794,25 +3802,29 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print(x)
- [[1 -- 3]
- [-- 5 --]
- [7 -- 9]]
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
>>> x.compress([1, 0, 1])
- masked_array(data = [1 3],
- mask = [False False],
- fill_value=999999)
+ masked_array(data=[1, 3],
+ mask=[False, False],
+ fill_value=999999)
>>> x.compress([1, 0, 1], axis=1)
- masked_array(data =
- [[1 3]
- [-- --]
- [7 9]],
- mask =
- [[False False]
- [ True True]
- [False False]],
- fill_value=999999)
+ masked_array(
+ data=[[1, 3],
+ [--, --],
+ [7, 9]],
+ mask=[[False, False],
+ [ True, True],
+ [False, False]],
+ fill_value=999999)
"""
# Get the basic components
@@ -4016,6 +4028,16 @@ class MaskedArray(ndarray):
check = check.view(type(self))
check._update_from(self)
check._mask = mask
+
+ # Cast fill value to bool_ if needed. If it cannot be cast, the
+ # default boolean fill value is used.
+ if check._fill_value is not None:
+ try:
+ fill = _check_fill_value(check._fill_value, np.bool_)
+ except (TypeError, ValueError):
+ fill = _check_fill_value(None, np.bool_)
+ check._fill_value = fill
+
return check
def __eq__(self, other):
@@ -4310,75 +4332,59 @@ class MaskedArray(ndarray):
raise MaskError('Cannot convert masked element to a Python long.')
return long(self.item())
-
- def get_imag(self):
+ @property
+ def imag(self):
"""
- Return the imaginary part of the masked array.
+ The imaginary part of the masked array.
- The returned array is a view on the imaginary part of the `MaskedArray`
- whose `get_imag` method is called.
-
- Parameters
- ----------
- None
-
- Returns
- -------
- result : MaskedArray
- The imaginary part of the masked array.
+ This property is a view on the imaginary part of this `MaskedArray`.
See Also
--------
- get_real, real, imag
+ real
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
- >>> x.get_imag()
- masked_array(data = [1.0 -- 1.6],
- mask = [False True False],
- fill_value = 1e+20)
+ >>> x.imag
+ masked_array(data=[1.0, --, 1.6],
+ mask=[False, True, False],
+ fill_value=1e+20)
"""
result = self._data.imag.view(type(self))
result.__setmask__(self._mask)
return result
- imag = property(fget=get_imag, doc="Imaginary part.")
+ # kept for compatibility
+ get_imag = imag.fget
- def get_real(self):
+ @property
+ def real(self):
"""
- Return the real part of the masked array.
-
- The returned array is a view on the real part of the `MaskedArray`
- whose `get_real` method is called.
+ The real part of the masked array.
- Parameters
- ----------
- None
-
- Returns
- -------
- result : MaskedArray
- The real part of the masked array.
+ This property is a view on the real part of this `MaskedArray`.
See Also
--------
- get_imag, real, imag
+ imag
Examples
--------
>>> x = np.ma.array([1+1.j, -2j, 3.45+1.6j], mask=[False, True, False])
- >>> x.get_real()
- masked_array(data = [1.0 -- 3.45],
- mask = [False True False],
- fill_value = 1e+20)
+ >>> x.real
+ masked_array(data=[1.0, --, 3.45],
+ mask=[False, True, False],
+ fill_value=1e+20)
"""
result = self._data.real.view(type(self))
result.__setmask__(self._mask)
return result
- real = property(fget=get_real, doc="Real part")
+
+ # kept for compatibility
+ get_real = real.fget
def count(self, axis=None, keepdims=np._NoValue):
"""
@@ -4418,13 +4424,12 @@ class MaskedArray(ndarray):
>>> a = ma.arange(6).reshape((2, 3))
>>> a[1, :] = ma.masked
>>> a
- masked_array(data =
- [[0 1 2]
- [-- -- --]],
- mask =
- [[False False False]
- [ True True True]],
- fill_value = 999999)
+ masked_array(
+ data=[[0, 1, 2],
+ [--, --, --]],
+ mask=[[False, False, False],
+ [ True, True, True]],
+ fill_value=999999)
>>> a.count()
3
@@ -4450,7 +4455,7 @@ class MaskedArray(ndarray):
if m is nomask:
# compare to _count_reduce_items in _methods.py
- if self.shape is ():
+ if self.shape == ():
if axis not in (None, 0):
raise np.AxisError(axis=axis, ndim=self.ndim)
return 1
@@ -4509,12 +4514,20 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print(x)
- [[1 -- 3]
- [-- 5 --]
- [7 -- 9]]
- >>> print(x.ravel())
- [1 -- 3 -- 5 -- 7 -- 9]
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.ravel()
+ masked_array(data=[1, --, 3, --, 5, --, 7, --, 9],
+ mask=[False, True, False, True, False, True, False, True,
+ False],
+ fill_value=999999)
"""
r = ndarray.ravel(self._data, order=order).view(type(self))
@@ -4563,15 +4576,25 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array([[1,2],[3,4]], mask=[1,0,0,1])
- >>> print(x)
- [[-- 2]
- [3 --]]
+ >>> x
+ masked_array(
+ data=[[--, 2],
+ [3, --]],
+ mask=[[ True, False],
+ [False, True]],
+ fill_value=999999)
>>> x = x.reshape((4,1))
- >>> print(x)
- [[--]
- [2]
- [3]
- [--]]
+ >>> x
+ masked_array(
+ data=[[--],
+ [2],
+ [3],
+ [--]],
+ mask=[[ True],
+ [False],
+ [False],
+ [ True]],
+ fill_value=999999)
"""
kwargs.update(order=kwargs.get('order', 'C'))
@@ -4628,21 +4651,36 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print(x)
- [[1 -- 3]
- [-- 5 --]
- [7 -- 9]]
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
>>> x.put([0,4,8],[10,20,30])
- >>> print(x)
- [[10 -- 3]
- [-- 20 --]
- [7 -- 30]]
+ >>> x
+ masked_array(
+ data=[[10, --, 3],
+ [--, 20, --],
+ [7, --, 30]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
>>> x.put(4,999)
- >>> print(x)
- [[10 -- 3]
- [-- 999 --]
- [7 -- 30]]
+ >>> x
+ masked_array(
+ data=[[10, --, 3],
+ [--, 999, --],
+ [7, --, 30]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
"""
# Hard mask: Get rid of the values/indices that fall on masked data
@@ -4682,14 +4720,14 @@ class MaskedArray(ndarray):
--------
>>> x = np.ma.array([1, 2, 3], mask=[0, 1, 1])
>>> x.ids()
- (166670640, 166659832)
+ (166670640, 166659832) # may vary
If the array has no mask, the address of `nomask` is returned. This address
is typically not close to the data in memory:
>>> x = np.ma.array([1, 2, 3])
>>> x.ids()
- (166691080, 3083169284L)
+ (166691080, 3083169284L) # may vary
"""
if self._mask is nomask:
@@ -4838,13 +4876,12 @@ class MaskedArray(ndarray):
>>> import numpy.ma as ma
>>> x = ma.array(np.eye(3))
>>> x
- masked_array(data =
- [[ 1. 0. 0.]
- [ 0. 1. 0.]
- [ 0. 0. 1.]],
- mask =
- False,
- fill_value=1e+20)
+ masked_array(
+ data=[[1., 0., 0.],
+ [0., 1., 0.],
+ [0., 0., 1.]],
+ mask=False,
+ fill_value=1e+20)
>>> x.nonzero()
(array([0, 1, 2]), array([0, 1, 2]))
@@ -4852,15 +4889,14 @@ class MaskedArray(ndarray):
>>> x[1, 1] = ma.masked
>>> x
- masked_array(data =
- [[1.0 0.0 0.0]
- [0.0 -- 0.0]
- [0.0 0.0 1.0]],
- mask =
- [[False False False]
- [False True False]
- [False False False]],
- fill_value=1e+20)
+ masked_array(
+ data=[[1.0, 0.0, 0.0],
+ [0.0, --, 0.0],
+ [0.0, 0.0, 1.0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1e+20)
>>> x.nonzero()
(array([0, 2]), array([0, 2]))
@@ -4877,13 +4913,12 @@ class MaskedArray(ndarray):
>>> a = ma.array([[1,2,3],[4,5,6],[7,8,9]])
>>> a > 3
- masked_array(data =
- [[False False False]
- [ True True True]
- [ True True True]],
- mask =
- False,
- fill_value=999999)
+ masked_array(
+ data=[[False, False, False],
+ [ True, True, True],
+ [ True, True, True]],
+ mask=False,
+ fill_value=True)
>>> ma.nonzero(a > 3)
(array([1, 1, 1, 2, 2, 2]), array([0, 1, 2, 0, 1, 2]))
@@ -4965,18 +5000,27 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print(x)
- [[1 -- 3]
- [-- 5 --]
- [7 -- 9]]
- >>> print(x.sum())
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.sum()
25
- >>> print(x.sum(axis=1))
- [4 5 16]
- >>> print(x.sum(axis=0))
- [8 5 12]
+ >>> x.sum(axis=1)
+ masked_array(data=[4, 5, 16],
+ mask=[False, False, False],
+ fill_value=999999)
+ >>> x.sum(axis=0)
+ masked_array(data=[8, 5, 12],
+ mask=[False, False, False],
+ fill_value=999999)
>>> print(type(x.sum(axis=0, dtype=np.int64)[0]))
- <type 'numpy.int64'>
+ <class 'numpy.int64'>
"""
kwargs = {} if keepdims is np._NoValue else {'keepdims': keepdims}
@@ -4997,7 +5041,7 @@ class MaskedArray(ndarray):
result = self.filled(0).sum(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
@@ -5027,8 +5071,11 @@ class MaskedArray(ndarray):
Examples
--------
>>> marr = np.ma.array(np.arange(10), mask=[0,0,0,1,1,1,0,0,0,0])
- >>> print(marr.cumsum())
- [0 1 3 -- -- -- 9 16 24 33]
+ >>> marr.cumsum()
+ masked_array(data=[0, 1, 3, --, --, --, 9, 16, 24, 33],
+ mask=[False, False, False, True, True, True, False, False,
+ False, False],
+ fill_value=999999)
"""
result = self.filled(0).cumsum(axis=axis, dtype=dtype, out=out)
@@ -5076,7 +5123,7 @@ class MaskedArray(ndarray):
result = self.filled(1).prod(axis, dtype=dtype, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
return out
@@ -5132,9 +5179,9 @@ class MaskedArray(ndarray):
--------
>>> a = np.ma.array([1,2,3], mask=[False, False, True])
>>> a
- masked_array(data = [1 2 --],
- mask = [False False True],
- fill_value = 999999)
+ masked_array(data=[1, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> a.mean()
1.5
@@ -5155,7 +5202,7 @@ class MaskedArray(ndarray):
out.flat = result
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = getmask(result)
return out
@@ -5187,9 +5234,9 @@ class MaskedArray(ndarray):
--------
>>> a = np.ma.array([1,2,3])
>>> a.anom()
- masked_array(data = [-1. 0. 1.],
- mask = False,
- fill_value = 1e+20)
+ masked_array(data=[-1., 0., 1.],
+ mask=False,
+ fill_value=1e+20)
"""
m = self.mean(axis, dtype)
@@ -5197,9 +5244,9 @@ class MaskedArray(ndarray):
return m
if not axis:
- return (self - m)
+ return self - m
else:
- return (self - expand_dims(m, axis))
+ return self - expand_dims(m, axis)
def var(self, axis=None, dtype=None, out=None, ddof=0,
keepdims=np._NoValue):
@@ -5314,7 +5361,7 @@ class MaskedArray(ndarray):
out.__setmask__(self._mask)
return out
- def argsort(self, axis=np._NoValue, kind='quicksort', order=None,
+ def argsort(self, axis=np._NoValue, kind=None, order=None,
endwith=True, fill_value=None):
"""
Return an ndarray of indices that sort the array along the
@@ -5334,7 +5381,7 @@ class MaskedArray(ndarray):
Until then, the axis should be given explicitly when
``arr.ndim > 1``, to avoid a FutureWarning.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm.
+ The sorting algorithm used.
order : list, optional
When `a` is an array with fields defined, this argument specifies
which fields to compare first, second, etc. Not all fields need be
@@ -5369,9 +5416,9 @@ class MaskedArray(ndarray):
--------
>>> a = np.ma.array([3,2,1], mask=[False, False, True])
>>> a
- masked_array(data = [3 2 --],
- mask = [False False True],
- fill_value = 999999)
+ masked_array(data=[3, 2, --],
+ mask=[False, False, True],
+ fill_value=999999)
>>> a.argsort()
array([1, 0, 2])
@@ -5419,15 +5466,19 @@ class MaskedArray(ndarray):
Examples
--------
- >>> x = np.ma.array(arange(4), mask=[1,1,0,0])
+ >>> x = np.ma.array(np.arange(4), mask=[1,1,0,0])
>>> x.shape = (2,2)
- >>> print(x)
- [[-- --]
- [2 3]]
- >>> print(x.argmin(axis=0, fill_value=-1))
- [0 0]
- >>> print(x.argmin(axis=0, fill_value=9))
- [1 1]
+ >>> x
+ masked_array(
+ data=[[--, --],
+ [2, 3]],
+ mask=[[ True, True],
+ [False, False]],
+ fill_value=999999)
+ >>> x.argmin(axis=0, fill_value=-1)
+ array([0, 0])
+ >>> x.argmin(axis=0, fill_value=9)
+ array([1, 1])
"""
if fill_value is None:
@@ -5472,7 +5523,7 @@ class MaskedArray(ndarray):
d = self.filled(fill_value).view(ndarray)
return d.argmax(axis, out=out)
- def sort(self, axis=-1, kind='quicksort', order=None,
+ def sort(self, axis=-1, kind=None, order=None,
endwith=True, fill_value=None):
"""
Sort the array, in-place
@@ -5485,7 +5536,7 @@ class MaskedArray(ndarray):
Axis along which to sort. If None, the array is flattened before
sorting. The default is -1, which sorts along the last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
- Sorting algorithm. Default is 'quicksort'.
+ The sorting algorithm used.
order : list, optional
When `a` is a structured array, this argument specifies which fields
to compare first, second, and so on. This list does not need to
@@ -5493,7 +5544,7 @@ class MaskedArray(ndarray):
endwith : {True, False}, optional
Whether missing values (if any) should be treated as the largest values
(True) or the smallest values (False)
- When the array contains unmasked values at the same extremes of the
+ When the array contains unmasked values sorting at the same extremes of the
datatype, the ordering of these values and the masked values is
undefined.
fill_value : {var}, optional
@@ -5518,23 +5569,29 @@ class MaskedArray(ndarray):
Examples
--------
- >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
+ >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Default
>>> a.sort()
- >>> print(a)
- [1 3 5 -- --]
+ >>> a
+ masked_array(data=[1, 3, 5, --, --],
+ mask=[False, False, False, True, True],
+ fill_value=999999)
- >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
+ >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # Put missing values in the front
>>> a.sort(endwith=False)
- >>> print(a)
- [-- -- 1 3 5]
+ >>> a
+ masked_array(data=[--, --, 1, 3, 5],
+ mask=[ True, True, False, False, False],
+ fill_value=999999)
- >>> a = ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
+ >>> a = np.ma.array([1, 2, 5, 4, 3],mask=[0, 1, 0, 1, 0])
>>> # fill_value takes over endwith
>>> a.sort(endwith=False, fill_value=3)
- >>> print(a)
- [1 -- -- 3 5]
+ >>> a
+ masked_array(data=[1, --, --, 3, 5],
+ mask=[False, True, True, False, False],
+ fill_value=999999)
"""
if self._mask is nomask:
@@ -5564,6 +5621,10 @@ class MaskedArray(ndarray):
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of `minimum_fill_value`.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
Returns
-------
@@ -5600,7 +5661,7 @@ class MaskedArray(ndarray):
result = self.filled(fill_value).min(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
@@ -5640,27 +5701,36 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array(np.arange(6), mask=[0 ,1, 0, 0, 0 ,1]).reshape(3, 2)
- >>> print(x)
- [[0 --]
- [2 3]
- [4 --]]
+ >>> x
+ masked_array(
+ data=[[0, --],
+ [2, 3],
+ [4, --]],
+ mask=[[False, True],
+ [False, False],
+ [False, True]],
+ fill_value=999999)
>>> x.mini()
- 0
+ masked_array(data=0,
+ mask=False,
+ fill_value=999999)
>>> x.mini(axis=0)
- masked_array(data = [0 3],
- mask = [False False],
- fill_value = 999999)
- >>> print(x.mini(axis=1))
- [0 2 4]
+ masked_array(data=[0, 3],
+ mask=[False, False],
+ fill_value=999999)
+ >>> x.mini(axis=1)
+ masked_array(data=[0, 2, 4],
+ mask=[False, False, False],
+ fill_value=999999)
There is a small difference between `mini` and `min`:
>>> x[:,1].mini(axis=0)
- masked_array(data = --,
- mask = True,
- fill_value = 999999)
+ masked_array(data=3,
+ mask=False,
+ fill_value=999999)
>>> x[:,1].min(axis=0)
- masked
+ 3
"""
# 2016-04-13, 1.13.0, gh-8764
@@ -5685,6 +5755,10 @@ class MaskedArray(ndarray):
fill_value : {var}, optional
Value used to fill in the masked values.
If None, use the output of maximum_fill_value().
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
Returns
-------
@@ -5721,7 +5795,7 @@ class MaskedArray(ndarray):
result = self.filled(fill_value).max(axis=axis, out=out, **kwargs)
if isinstance(out, MaskedArray):
outmask = getmask(out)
- if (outmask is nomask):
+ if outmask is nomask:
outmask = out._mask = make_mask_none(out.shape)
outmask.flat = newmask
else:
@@ -5749,6 +5823,10 @@ class MaskedArray(ndarray):
but the type will be cast if necessary.
fill_value : {var}, optional
Value used to fill in the masked values.
+ keepdims : bool, optional
+ If this is set to True, the axes which are reduced are left
+ in the result as dimensions with size one. With this option,
+ the result will broadcast correctly against the array.
Returns
-------
@@ -5809,7 +5887,6 @@ class MaskedArray(ndarray):
return out[()]
# Array methods
- clip = _arraymethod('clip', onmask=False)
copy = _arraymethod('copy')
diagonal = _arraymethod('diagonal')
flatten = _arraymethod('flatten')
@@ -5876,7 +5953,7 @@ class MaskedArray(ndarray):
returns bytes not strings.
"""
- return self.tobytes(fill_value, order='C')
+ return self.tobytes(fill_value, order=order)
def tobytes(self, fill_value=None, order='C'):
"""
@@ -5913,7 +5990,7 @@ class MaskedArray(ndarray):
--------
>>> x = np.ma.array(np.array([[1, 2], [3, 4]]), mask=[[0, 1], [1, 0]])
>>> x.tobytes()
- '\\x01\\x00\\x00\\x00?B\\x0f\\x00?B\\x0f\\x00\\x04\\x00\\x00\\x00'
+ b'\\x01\\x00\\x00\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00?B\\x0f\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x00\\x00\\x00'
"""
return self.filled(fill_value).tobytes(order=order)
@@ -5961,14 +6038,20 @@ class MaskedArray(ndarray):
Examples
--------
>>> x = np.ma.array([[1,2,3],[4,5,6],[7,8,9]], mask=[0] + [1,0]*4)
- >>> print(x)
- [[1 -- 3]
- [-- 5 --]
- [7 -- 9]]
- >>> print(x.toflex())
- [[(1, False) (2, True) (3, False)]
- [(4, True) (5, False) (6, True)]
- [(7, False) (8, True) (9, False)]]
+ >>> x
+ masked_array(
+ data=[[1, --, 3],
+ [--, 5, --],
+ [7, --, 9]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
+ >>> x.toflex()
+ array([[(1, False), (2, True), (3, False)],
+ [(4, True), (5, False), (6, True)],
+ [(7, False), (8, True), (9, False)]],
+ dtype=[('_data', '<i8'), ('_mask', '?')])
"""
# Get the basic dtype.
@@ -6068,12 +6151,11 @@ class mvoid(MaskedArray):
_data.fill_value = fill_value
return _data
- def _get_data(self):
+ @property
+ def _data(self):
# Make sure that the _data part is a np.void
return super(mvoid, self)._data[()]
- _data = property(fget=_get_data)
-
def __getitem__(self, indx):
"""
Get the index.
@@ -6215,15 +6297,14 @@ def isMaskedArray(x):
[ 0., 0., 1.]])
>>> m = ma.masked_values(a, 0)
>>> m
- masked_array(data =
- [[1.0 -- --]
- [-- 1.0 --]
- [-- -- 1.0]],
- mask =
- [[False True True]
- [ True False True]
- [ True True False]],
- fill_value=0.0)
+ masked_array(
+ data=[[1.0, --, --],
+ [--, 1.0, --],
+ [--, --, 1.0]],
+ mask=[[False, True, True],
+ [ True, False, True],
+ [ True, True, False]],
+ fill_value=0.0)
>>> ma.isMaskedArray(a)
False
>>> ma.isMaskedArray(m)
@@ -6327,7 +6408,7 @@ class MaskedConstant(MaskedArray):
def __copy__(self):
return self
-
+
def __deepcopy__(self, memo):
return self
@@ -6387,16 +6468,16 @@ def is_masked(x):
>>> import numpy.ma as ma
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 0)
>>> x
- masked_array(data = [-- 1 -- 2 3],
- mask = [ True False True False False],
- fill_value=999999)
+ masked_array(data=[--, 1, --, 2, 3],
+ mask=[ True, False, True, False, False],
+ fill_value=0)
>>> ma.is_masked(x)
True
>>> x = ma.masked_equal([0, 1, 0, 2, 3], 42)
>>> x
- masked_array(data = [0 1 0 2 3],
- mask = False,
- fill_value=999999)
+ masked_array(data=[0, 1, 0, 2, 3],
+ mask=False,
+ fill_value=42)
>>> ma.is_masked(x)
False
@@ -6656,7 +6737,7 @@ def power(a, b, third=None):
invalid = np.logical_not(np.isfinite(result.view(ndarray)))
# Add the initial mask
if m is not nomask:
- if not (result.ndim):
+ if not result.ndim:
return masked
result._mask = np.logical_or(m, invalid)
# Fix the invalid parts
@@ -6671,7 +6752,7 @@ def power(a, b, third=None):
argmin = _frommethod('argmin')
argmax = _frommethod('argmax')
-def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fill_value=None):
+def argsort(a, axis=np._NoValue, kind=None, order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.asanyarray(a)
@@ -6686,7 +6767,7 @@ def argsort(a, axis=np._NoValue, kind='quicksort', order=None, endwith=True, fil
return a.argsort(axis=axis, kind=kind, order=order)
argsort.__doc__ = MaskedArray.argsort.__doc__
-def sort(a, axis=-1, kind='quicksort', order=None, endwith=True, fill_value=None):
+def sort(a, axis=-1, kind=None, order=None, endwith=True, fill_value=None):
"Function version of the eponymous method."
a = np.array(a, copy=True, subok=True)
if axis is None:
@@ -6746,17 +6827,17 @@ def concatenate(arrays, axis=0):
>>> a[1] = ma.masked
>>> b = ma.arange(2, 5)
>>> a
- masked_array(data = [0 -- 2],
- mask = [False True False],
- fill_value = 999999)
+ masked_array(data=[0, --, 2],
+ mask=[False, True, False],
+ fill_value=999999)
>>> b
- masked_array(data = [2 3 4],
- mask = False,
- fill_value = 999999)
+ masked_array(data=[2, 3, 4],
+ mask=False,
+ fill_value=999999)
>>> ma.concatenate([a, b])
- masked_array(data = [0 -- 2 2 3 4],
- mask = [False True False False False False],
- fill_value = 999999)
+ masked_array(data=[0, --, 2, 2, 3, 4],
+ mask=[False, True, False, False, False, False],
+ fill_value=999999)
"""
d = np.concatenate([getdata(a) for a in arrays], axis)
@@ -6911,24 +6992,21 @@ def transpose(a, axes=None):
>>> import numpy.ma as ma
>>> x = ma.arange(4).reshape((2,2))
>>> x[1, 1] = ma.masked
- >>>> x
- masked_array(data =
- [[0 1]
- [2 --]],
- mask =
- [[False False]
- [False True]],
- fill_value = 999999)
+ >>> x
+ masked_array(
+ data=[[0, 1],
+ [2, --]],
+ mask=[[False, False],
+ [False, True]],
+ fill_value=999999)
>>> ma.transpose(x)
- masked_array(data =
- [[0 2]
- [1 --]],
- mask =
- [[False False]
- [False True]],
- fill_value = 999999)
-
+ masked_array(
+ data=[[0, 2],
+ [1, --]],
+ mask=[[False, False],
+ [False, True]],
+ fill_value=999999)
"""
# We can't use 'frommethod', as 'transpose' doesn't take keywords
try:
@@ -6975,39 +7053,39 @@ def resize(x, new_shape):
>>> a = ma.array([[1, 2] ,[3, 4]])
>>> a[0, 1] = ma.masked
>>> a
- masked_array(data =
- [[1 --]
- [3 4]],
- mask =
- [[False True]
- [False False]],
- fill_value = 999999)
+ masked_array(
+ data=[[1, --],
+ [3, 4]],
+ mask=[[False, True],
+ [False, False]],
+ fill_value=999999)
>>> np.resize(a, (3, 3))
- array([[1, 2, 3],
- [4, 1, 2],
- [3, 4, 1]])
+ masked_array(
+ data=[[1, 2, 3],
+ [4, 1, 2],
+ [3, 4, 1]],
+ mask=False,
+ fill_value=999999)
>>> ma.resize(a, (3, 3))
- masked_array(data =
- [[1 -- 3]
- [4 1 --]
- [3 4 1]],
- mask =
- [[False True False]
- [False False True]
- [False False False]],
- fill_value = 999999)
+ masked_array(
+ data=[[1, --, 3],
+ [4, 1, --],
+ [3, 4, 1]],
+ mask=[[False, True, False],
+ [False, False, True],
+ [False, False, False]],
+ fill_value=999999)
A MaskedArray is always returned, regardless of the input type.
>>> a = np.array([[1, 2] ,[3, 4]])
>>> ma.resize(a, (3, 3))
- masked_array(data =
- [[1 2 3]
- [4 1 2]
- [3 4 1]],
- mask =
- False,
- fill_value = 999999)
+ masked_array(
+ data=[[1, 2, 3],
+ [4, 1, 2],
+ [3, 4, 1]],
+ mask=False,
+ fill_value=999999)
"""
# We can't use _frommethods here, as N.resize is notoriously whiny.
@@ -7020,23 +7098,6 @@ def resize(x, new_shape):
return result
-def rank(obj):
- """
- maskedarray version of the numpy function.
-
- .. note::
- Deprecated since 1.10.0
-
- """
- # 2015-04-12, 1.10.0
- warnings.warn(
- "`rank` is deprecated; use the `ndim` function instead. ",
- np.VisibleDeprecationWarning, stacklevel=2)
- return np.ndim(getdata(obj))
-
-rank.__doc__ = np.rank.__doc__
-
-
def ndim(obj):
"""
maskedarray version of the numpy function.
@@ -7076,7 +7137,7 @@ def where(condition, x=_NoValue, y=_NoValue):
Parameters
----------
condition : array_like, bool
- Where True, yield `x`, otherwise yield `y`.
+ Where True, yield `x`, otherwise yield `y`.
x, y : array_like, optional
Values from which to choose. `x`, `y` and `condition` need to be
broadcastable to some shape.
@@ -7098,14 +7159,24 @@ def where(condition, x=_NoValue, y=_NoValue):
>>> x = np.ma.array(np.arange(9.).reshape(3, 3), mask=[[0, 1, 0],
... [1, 0, 1],
... [0, 1, 0]])
- >>> print(x)
- [[0.0 -- 2.0]
- [-- 4.0 --]
- [6.0 -- 8.0]]
- >>> print(np.ma.where(x > 5, x, -3.1416))
- [[-3.1416 -- -3.1416]
- [-- -3.1416 --]
- [6.0 -- 8.0]]
+ >>> x
+ masked_array(
+ data=[[0.0, --, 2.0],
+ [--, 4.0, --],
+ [6.0, --, 8.0]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=1e+20)
+ >>> np.ma.where(x > 5, x, -3.1416)
+ masked_array(
+ data=[[-3.1416, --, -3.1416],
+ [--, -3.1416, --],
+ [6.0, --, 8.0]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=1e+20)
"""
@@ -7185,9 +7256,9 @@ def choose(indices, choices, out=None, mode='raise'):
>>> choice = np.array([[1,1,1], [2,2,2], [3,3,3]])
>>> a = np.array([2, 1, 0])
>>> np.ma.choose(a, choice)
- masked_array(data = [3 2 1],
- mask = False,
- fill_value=999999)
+ masked_array(data=[3, 2, 1],
+ mask=False,
+ fill_value=999999)
"""
def fmask(x):
@@ -7209,7 +7280,7 @@ def choose(indices, choices, out=None, mode='raise'):
# Construct the mask
outputmask = np.choose(c, masks, mode=mode)
outputmask = make_mask(mask_or(outputmask, getmask(indices)),
- copy=0, shrink=True)
+ copy=False, shrink=True)
# Get the choices.
d = np.choose(c, data, mode=mode, out=out).view(MaskedArray)
if out is not None:
@@ -7310,25 +7381,23 @@ def mask_rowcols(a, axis=None):
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
- masked_array(data =
- [[0 0 0]
- [0 -- 0]
- [0 0 0]],
- mask =
- [[False False False]
- [False True False]
- [False False False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
>>> ma.mask_rowcols(a)
- masked_array(data =
- [[0 -- 0]
- [-- -- --]
- [0 -- 0]],
- mask =
- [[False True False]
- [ True True True]
- [False True False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, --, 0],
+ [--, --, --],
+ [0, --, 0]],
+ mask=[[False, True, False],
+ [ True, True, True],
+ [False, True, False]],
+ fill_value=1)
"""
a = array(a, subok=False)
@@ -7389,24 +7458,22 @@ def dot(a, b, strict=False, out=None):
Examples
--------
- >>> a = ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
- >>> b = ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
+ >>> a = np.ma.array([[1, 2, 3], [4, 5, 6]], mask=[[1, 0, 0], [0, 0, 0]])
+ >>> b = np.ma.array([[1, 2], [3, 4], [5, 6]], mask=[[1, 0], [0, 0], [0, 0]])
>>> np.ma.dot(a, b)
- masked_array(data =
- [[21 26]
- [45 64]],
- mask =
- [[False False]
- [False False]],
- fill_value = 999999)
+ masked_array(
+ data=[[21, 26],
+ [45, 64]],
+ mask=[[False, False],
+ [False, False]],
+ fill_value=999999)
>>> np.ma.dot(a, b, strict=True)
- masked_array(data =
- [[-- --]
- [-- 64]],
- mask =
- [[ True True]
- [ True False]],
- fill_value = 999999)
+ masked_array(
+ data=[[--, --],
+ [--, 64]],
+ mask=[[ True, True],
+ [ True, False]],
+ fill_value=999999)
"""
# !!!: Works only with 2D arrays. There should be a way to get it to run
@@ -7465,7 +7532,7 @@ def outer(a, b):
return masked_array(d)
ma = getmaskarray(a)
mb = getmaskarray(b)
- m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=0)
+ m = make_mask(1 - np.outer(1 - ma, 1 - mb), copy=False)
return masked_array(d, mask=m)
outer.__doc__ = doc_note(np.outer.__doc__,
"Masked values are replaced by 0.")
@@ -7574,18 +7641,18 @@ def allequal(a, b, fill_value=True):
Examples
--------
- >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
+ >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
- masked_array(data = [10000000000.0 1e-07 --],
- mask = [False False True],
- fill_value=1e+20)
+ masked_array(data=[10000000000.0, 1e-07, --],
+ mask=[False, False, True],
+ fill_value=1e+20)
- >>> b = array([1e10, 1e-7, -42.0])
+ >>> b = np.array([1e10, 1e-7, -42.0])
>>> b
array([ 1.00000000e+10, 1.00000000e-07, -4.20000000e+01])
- >>> ma.allequal(a, b, fill_value=False)
+ >>> np.ma.allequal(a, b, fill_value=False)
False
- >>> ma.allequal(a, b)
+ >>> np.ma.allequal(a, b)
True
"""
@@ -7651,29 +7718,29 @@ def allclose(a, b, masked_equal=True, rtol=1e-5, atol=1e-8):
Examples
--------
- >>> a = ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
+ >>> a = np.ma.array([1e10, 1e-7, 42.0], mask=[0, 0, 1])
>>> a
- masked_array(data = [10000000000.0 1e-07 --],
- mask = [False False True],
- fill_value = 1e+20)
- >>> b = ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
- >>> ma.allclose(a, b)
+ masked_array(data=[10000000000.0, 1e-07, --],
+ mask=[False, False, True],
+ fill_value=1e+20)
+ >>> b = np.ma.array([1e10, 1e-8, -42.0], mask=[0, 0, 1])
+ >>> np.ma.allclose(a, b)
False
- >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
- >>> b = ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
- >>> ma.allclose(a, b)
+ >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
+ >>> b = np.ma.array([1.00001e10, 1e-9, -42.0], mask=[0, 0, 1])
+ >>> np.ma.allclose(a, b)
True
- >>> ma.allclose(a, b, masked_equal=False)
+ >>> np.ma.allclose(a, b, masked_equal=False)
False
Masked values are not compared directly.
- >>> a = ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
- >>> b = ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
- >>> ma.allclose(a, b)
+ >>> a = np.ma.array([1e10, 1e-8, 42.0], mask=[0, 0, 1])
+ >>> b = np.ma.array([1.00001e10, 1e-9, 42.0], mask=[0, 0, 1])
+ >>> np.ma.allclose(a, b)
True
- >>> ma.allclose(a, b, masked_equal=False)
+ >>> np.ma.allclose(a, b, masked_equal=False)
False
"""
@@ -7740,15 +7807,14 @@ def asarray(a, dtype=None, order=None):
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
- array([[ 0., 1., 2., 3., 4.],
- [ 5., 6., 7., 8., 9.]])
+ array([[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]])
>>> np.ma.asarray(x)
- masked_array(data =
- [[ 0. 1. 2. 3. 4.]
- [ 5. 6. 7. 8. 9.]],
- mask =
- False,
- fill_value = 1e+20)
+ masked_array(
+ data=[[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]],
+ mask=False,
+ fill_value=1e+20)
>>> type(np.ma.asarray(x))
<class 'numpy.ma.core.MaskedArray'>
@@ -7788,15 +7854,14 @@ def asanyarray(a, dtype=None):
--------
>>> x = np.arange(10.).reshape(2, 5)
>>> x
- array([[ 0., 1., 2., 3., 4.],
- [ 5., 6., 7., 8., 9.]])
+ array([[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]])
>>> np.ma.asanyarray(x)
- masked_array(data =
- [[ 0. 1. 2. 3. 4.]
- [ 5. 6. 7. 8. 9.]],
- mask =
- False,
- fill_value = 1e+20)
+ masked_array(
+ data=[[0., 1., 2., 3., 4.],
+ [5., 6., 7., 8., 9.]],
+ mask=False,
+ fill_value=1e+20)
>>> type(np.ma.asanyarray(x))
<class 'numpy.ma.core.MaskedArray'>
@@ -7821,93 +7886,6 @@ def _pickle_warn(method):
stacklevel=3)
-def dump(a, F):
- """
- Pickle a masked array to a file.
-
- This is a wrapper around ``cPickle.dump``.
-
- Parameters
- ----------
- a : MaskedArray
- The array to be pickled.
- F : str or file-like object
- The file to pickle `a` to. If a string, the full path to the file.
-
- """
- _pickle_warn('dump')
- if not hasattr(F, 'readline'):
- with open(F, 'w') as F:
- pickle.dump(a, F)
- else:
- pickle.dump(a, F)
-
-
-def dumps(a):
- """
- Return a string corresponding to the pickling of a masked array.
-
- This is a wrapper around ``cPickle.dumps``.
-
- Parameters
- ----------
- a : MaskedArray
- The array for which the string representation of the pickle is
- returned.
-
- """
- _pickle_warn('dumps')
- return pickle.dumps(a)
-
-
-def load(F):
- """
- Wrapper around ``cPickle.load`` which accepts either a file-like object
- or a filename.
-
- Parameters
- ----------
- F : str or file
- The file or file name to load.
-
- See Also
- --------
- dump : Pickle an array
-
- Notes
- -----
- This is different from `numpy.load`, which does not use cPickle but loads
- the NumPy binary .npy format.
-
- """
- _pickle_warn('load')
- if not hasattr(F, 'readline'):
- with open(F, 'r') as F:
- return pickle.load(F)
- else:
- return pickle.load(F)
-
-
-def loads(strg):
- """
- Load a pickle from the current string.
-
- The result of ``cPickle.loads(strg)`` is returned.
-
- Parameters
- ----------
- strg : str
- The string to load.
-
- See Also
- --------
- dumps : Return a string corresponding to the pickling of a masked array.
-
- """
- _pickle_warn('loads')
- return pickle.loads(strg)
-
-
def fromfile(file, dtype=float, count=-1, sep=''):
raise NotImplementedError(
"fromfile() not yet implemented for a MaskedArray.")
@@ -7940,39 +7918,38 @@ def fromflex(fxarray):
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[0] + [1, 0] * 4)
>>> rec = x.toflex()
>>> rec
- array([[(0, False), (1, True), (2, False)],
- [(3, True), (4, False), (5, True)],
- [(6, False), (7, True), (8, False)]],
- dtype=[('_data', '<i4'), ('_mask', '|b1')])
+ array([[(0, False), (1, True), (2, False)],
+ [(3, True), (4, False), (5, True)],
+ [(6, False), (7, True), (8, False)]],
+ dtype=[('_data', '<i8'), ('_mask', '?')])
>>> x2 = np.ma.fromflex(rec)
>>> x2
- masked_array(data =
- [[0 -- 2]
- [-- 4 --]
- [6 -- 8]],
- mask =
- [[False True False]
- [ True False True]
- [False True False]],
- fill_value = 999999)
+ masked_array(
+ data=[[0, --, 2],
+ [--, 4, --],
+ [6, --, 8]],
+ mask=[[False, True, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
Extra fields can be present in the structured array but are discarded:
>>> dt = [('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')]
>>> rec2 = np.zeros((2, 2), dtype=dt)
>>> rec2
- array([[(0, False, 0.0), (0, False, 0.0)],
- [(0, False, 0.0), (0, False, 0.0)]],
- dtype=[('_data', '<i4'), ('_mask', '|b1'), ('field3', '<f4')])
+ array([[(0, False, 0.), (0, False, 0.)],
+ [(0, False, 0.), (0, False, 0.)]],
+ dtype=[('_data', '<i4'), ('_mask', '?'), ('field3', '<f4')])
>>> y = np.ma.fromflex(rec2)
>>> y
- masked_array(data =
- [[0 0]
- [0 0]],
- mask =
- [[False False]
- [False False]],
- fill_value = 999999)
+ masked_array(
+ data=[[0, 0],
+ [0, 0]],
+ mask=[[False, False],
+ [False, False]],
+ fill_value=999999,
+ dtype=int32)
"""
return masked_array(fxarray['_data'], mask=fxarray['_mask'])
@@ -8073,7 +8050,10 @@ def append(a, b, axis=None):
>>> import numpy.ma as ma
>>> a = ma.masked_values([1, 2, 3], 2)
>>> b = ma.masked_values([[4, 5, 6], [7, 8, 9]], 7)
- >>> print(ma.append(a, b))
- [1 -- 3 4 5 6 -- 8 9]
+ >>> ma.append(a, b)
+ masked_array(data=[1, --, 3, 4, 5, 6, --, 8, 9],
+ mask=[False, True, False, False, False, False, True, False,
+ False],
+ fill_value=999999)
"""
return concatenate([a, b], axis)
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 3be4d3625..de1aa3af8 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -81,15 +81,14 @@ def count_masked(arr, axis=None):
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
- masked_array(data =
- [[0 1 2]
- [-- 4 --]
- [6 -- 8]],
- mask =
- [[False False False]
- [ True False True]
- [False True False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, 1, 2],
+ [--, 4, --],
+ [6, --, 8]],
+ mask=[[False, False, False],
+ [ True, False, True],
+ [False, True, False]],
+ fill_value=999999)
>>> ma.count_masked(a)
3
@@ -132,15 +131,15 @@ def masked_all(shape, dtype=float):
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
- masked_array(data =
- [[-- -- --]
- [-- -- --]
- [-- -- --]],
- mask =
- [[ True True True]
- [ True True True]
- [ True True True]],
- fill_value=1e+20)
+ masked_array(
+ data=[[--, --, --],
+ [--, --, --],
+ [--, --, --]],
+ mask=[[ True, True, True],
+ [ True, True, True],
+ [ True, True, True]],
+ fill_value=1e+20,
+ dtype=float64)
The `dtype` parameter defines the underlying data type.
@@ -188,16 +187,16 @@ def masked_all_like(arr):
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
- array([[ 0., 0., 0.],
- [ 0., 0., 0.]], dtype=float32)
+ array([[0., 0., 0.],
+ [0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
- masked_array(data =
- [[-- -- --]
- [-- -- --]],
- mask =
- [[ True True True]
- [ True True True]],
- fill_value=1e+20)
+ masked_array(
+ data=[[--, --, --],
+ [--, --, --]],
+ mask=[[ True, True, True],
+ [ True, True, True]],
+ fill_value=1e+20,
+ dtype=float32)
The dtype of the masked array matches the dtype of `arr`.
@@ -391,7 +390,6 @@ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
- j = i.copy()
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
@@ -492,28 +490,45 @@ if apply_over_axes.__doc__ is not None:
Examples
--------
- >>> a = ma.arange(24).reshape(2,3,4)
- >>> a[:,0,1] = ma.masked
- >>> a[:,1,:] = ma.masked
- >>> print(a)
- [[[0 -- 2 3]
- [-- -- -- --]
- [8 9 10 11]]
-
- [[12 -- 14 15]
- [-- -- -- --]
- [20 21 22 23]]]
- >>> print(ma.apply_over_axes(ma.sum, a, [0,2]))
- [[[46]
- [--]
- [124]]]
+ >>> a = np.ma.arange(24).reshape(2,3,4)
+ >>> a[:,0,1] = np.ma.masked
+ >>> a[:,1,:] = np.ma.masked
+ >>> a
+ masked_array(
+ data=[[[0, --, 2, 3],
+ [--, --, --, --],
+ [8, 9, 10, 11]],
+ [[12, --, 14, 15],
+ [--, --, --, --],
+ [20, 21, 22, 23]]],
+ mask=[[[False, True, False, False],
+ [ True, True, True, True],
+ [False, False, False, False]],
+ [[False, True, False, False],
+ [ True, True, True, True],
+ [False, False, False, False]]],
+ fill_value=999999)
+ >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
+ masked_array(
+ data=[[[46],
+ [--],
+ [124]]],
+ mask=[[[False],
+ [ True],
+ [False]]],
+ fill_value=999999)
Tuple axis arguments to ufuncs are equivalent:
- >>> print(ma.sum(a, axis=(0,2)).reshape((1,-1,1)))
- [[[46]
- [--]
- [124]]]
+ >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
+ masked_array(
+ data=[[[46],
+ [--],
+ [124]]],
+ mask=[[[False],
+ [ True],
+ [False]]],
+ fill_value=999999)
"""
@@ -534,8 +549,11 @@ def average(a, axis=None, weights=None, returned=False):
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
- weight equal to one. If `weights` is complex, the imaginary parts
- are ignored.
+ weight equal to one. The 1-D calculation is::
+
+ avg = sum(a * weights) / sum(weights)
+
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
@@ -558,14 +576,19 @@ def average(a, axis=None, weights=None, returned=False):
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
- >>> print(x)
- [[ 0. 1.]
- [ 2. 3.]
- [ 4. 5.]]
+ >>> x
+ masked_array(
+ data=[[0., 1.],
+ [2., 3.],
+ [4., 5.]],
+ mask=False,
+ fill_value=1e+20)
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
- >>> print(avg)
- [2.66666666667 3.66666666667]
+ >>> avg
+ masked_array(data=[2.6666666666666665, 3.6666666666666665],
+ mask=[False, False],
+ fill_value=1e+20)
"""
a = asarray(a)
@@ -676,9 +699,9 @@ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
- masked_array(data = [ 2. 5.],
- mask = False,
- fill_value = 1e+20)
+ masked_array(data=[2.0, 5.0],
+ mask=[False, False],
+ fill_value=1e+20)
"""
if not hasattr(a, 'mask'):
@@ -856,15 +879,14 @@ def compress_rowcols(x, axis=None):
... [1, 0, 0],
... [0, 0, 0]])
>>> x
- masked_array(data =
- [[-- 1 2]
- [-- 4 5]
- [6 7 8]],
- mask =
- [[ True False False]
- [ True False False]
- [False False False]],
- fill_value = 999999)
+ masked_array(
+ data=[[--, 1, 2],
+ [--, 4, 5],
+ [6, 7, 8]],
+ mask=[[ True, False, False],
+ [ True, False, False],
+ [False, False, False]],
+ fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
@@ -937,25 +959,24 @@ def mask_rows(a, axis=None):
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
- masked_array(data =
- [[0 0 0]
- [0 -- 0]
- [0 0 0]],
- mask =
- [[False False False]
- [False True False]
- [False False False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
+
>>> ma.mask_rows(a)
- masked_array(data =
- [[0 0 0]
- [-- -- --]
- [0 0 0]],
- mask =
- [[False False False]
- [ True True True]
- [False False False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, 0, 0],
+ [--, --, --],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [ True, True, True],
+ [False, False, False]],
+ fill_value=1)
"""
return mask_rowcols(a, 0)
@@ -982,25 +1003,23 @@ def mask_cols(a, axis=None):
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
- masked_array(data =
- [[0 0 0]
- [0 -- 0]
- [0 0 0]],
- mask =
- [[False False False]
- [False True False]
- [False False False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, 0, 0],
+ [0, --, 0],
+ [0, 0, 0]],
+ mask=[[False, False, False],
+ [False, True, False],
+ [False, False, False]],
+ fill_value=1)
>>> ma.mask_cols(a)
- masked_array(data =
- [[0 -- 0]
- [0 -- 0]
- [0 -- 0]],
- mask =
- [[False True False]
- [False True False]
- [False True False]],
- fill_value=999999)
+ masked_array(
+ data=[[0, --, 0],
+ [0, --, 0],
+ [0, --, 0]],
+ mask=[[False, True, False],
+ [False, True, False],
+ [False, True, False]],
+ fill_value=1)
"""
return mask_rowcols(a, 1)
@@ -1078,12 +1097,12 @@ def intersect1d(ar1, ar2, assume_unique=False):
Examples
--------
- >>> x = array([1, 3, 3, 3], mask=[0, 0, 0, 1])
- >>> y = array([3, 1, 1, 1], mask=[0, 0, 0, 1])
- >>> intersect1d(x, y)
- masked_array(data = [1 3 --],
- mask = [False False True],
- fill_value = 999999)
+ >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
+ >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
+ >>> np.ma.intersect1d(x, y)
+ masked_array(data=[1, 3, --],
+ mask=[False, False, True],
+ fill_value=999999)
"""
if assume_unique:
@@ -1216,9 +1235,9 @@ def setdiff1d(ar1, ar2, assume_unique=False):
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
- masked_array(data = [3 --],
- mask = [False True],
- fill_value = 999999)
+ masked_array(data=[3, --],
+ mask=[False, True],
+ fill_value=999999)
"""
if assume_unique:
@@ -1483,7 +1502,9 @@ class mr_class(MAxisConcatenator):
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
- array([1, 2, 3, 0, 0, 4, 5, 6])
+ masked_array(data=[1, 2, 3, ..., 4, 5, 6],
+ mask=False,
+ fill_value=999999)
"""
def __init__(self):
@@ -1514,7 +1535,7 @@ def flatnotmasked_edges(a):
See Also
--------
- flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges,
+ flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
@@ -1524,19 +1545,19 @@ def flatnotmasked_edges(a):
Examples
--------
>>> a = np.ma.arange(10)
- >>> flatnotmasked_edges(a)
- [0,-1]
+ >>> np.ma.flatnotmasked_edges(a)
+ array([0, 9])
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
- >>> flatnotmasked_edges(a)
+ >>> np.ma.flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
- >>> print(flatnotmasked_edges(ma))
+ >>> print(np.ma.flatnotmasked_edges(a))
None
"""
@@ -1575,7 +1596,7 @@ def notmasked_edges(a, axis=None):
See Also
--------
- flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous,
+ flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
clump_masked, clump_unmasked
Examples
@@ -1588,7 +1609,7 @@ def notmasked_edges(a, axis=None):
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
- >>> np.ma.notmasked_edges(ma)
+ >>> np.ma.notmasked_edges(am)
array([0, 6])
"""
@@ -1620,7 +1641,7 @@ def flatnotmasked_contiguous(a):
See Also
--------
- flatnotmasked_edges, notmasked_contiguous, notmasked_edges,
+ flatnotmasked_edges, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
@@ -1680,7 +1701,7 @@ def notmasked_contiguous(a, axis=None):
See Also
--------
- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
@@ -1709,15 +1730,11 @@ def notmasked_contiguous(a, axis=None):
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
- [[slice(0, 1, None), slice(2, 3, None)], # column broken into two segments
- [], # fully masked column
- [slice(0, 1, None)],
- [slice(0, 3, None)]]
+ [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
- [[slice(0, 1, None), slice(2, 4, None)], # row broken into two segments
- [slice(3, 4, None)],
- [slice(0, 1, None), slice(3, 4, None)]]
+ [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
+
"""
a = asarray(a)
nd = a.ndim
@@ -1789,7 +1806,7 @@ def clump_unmasked(a):
See Also
--------
- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_masked
Examples
@@ -1828,7 +1845,7 @@ def clump_masked(a):
See Also
--------
- flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges,
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_unmasked
Examples
diff --git a/numpy/ma/mrecords.py b/numpy/ma/mrecords.py
index daf2f8770..826fb0f64 100644
--- a/numpy/ma/mrecords.py
+++ b/numpy/ma/mrecords.py
@@ -19,7 +19,6 @@ import sys
import warnings
import numpy as np
-import numpy.core.numerictypes as ntypes
from numpy.compat import basestring
from numpy import (
bool_, dtype, ndarray, recarray, array as narray
@@ -167,24 +166,22 @@ class MaskedRecords(MaskedArray, object):
_dict['_baseclass'] = recarray
return
- def _getdata(self):
+ @property
+ def _data(self):
"""
Returns the data as a recarray.
"""
return ndarray.view(self, recarray)
- _data = property(fget=_getdata)
-
- def _getfieldmask(self):
+ @property
+ def _fieldmask(self):
"""
Alias to mask.
"""
return self._mask
- _fieldmask = property(fget=_getfieldmask)
-
def __len__(self):
"""
Returns the length
@@ -211,7 +208,7 @@ class MaskedRecords(MaskedArray, object):
_localdict = ndarray.__getattribute__(self, '__dict__')
_data = ndarray.view(self, _localdict['_baseclass'])
obj = _data.getfield(*res)
- if obj.dtype.fields:
+ if obj.dtype.names is not None:
raise NotImplementedError("MaskedRecords is currently limited to"
"simple records.")
# Get some special attributes
@@ -224,7 +221,8 @@ class MaskedRecords(MaskedArray, object):
except IndexError:
# Couldn't find a mask: use the default (nomask)
pass
- hasmasked = _mask.view((bool, (len(_mask.dtype) or 1))).any()
+ tp_len = len(_mask.dtype)
+ hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any()
if (obj.shape or hasmasked):
obj = obj.view(MaskedArray)
obj._baseclass = ndarray
diff --git a/numpy/ma/tests/test_core.py b/numpy/ma/tests/test_core.py
index 21e0fc41a..b72ce56aa 100644
--- a/numpy/ma/tests/test_core.py
+++ b/numpy/ma/tests/test_core.py
@@ -10,7 +10,6 @@ __author__ = "Pierre GF Gerard-Marchant"
import sys
import warnings
-import pickle
import operator
import itertools
import textwrap
@@ -27,7 +26,7 @@ from numpy.testing import (
assert_raises, assert_warns, suppress_warnings
)
from numpy import ndarray
-from numpy.compat import asbytes, asbytes_nested
+from numpy.compat import asbytes
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal,
assert_equal_records, fail_if_equal, assert_not_equal,
@@ -50,6 +49,7 @@ from numpy.ma.core import (
ravel, repeat, reshape, resize, shape, sin, sinh, sometrue, sort, sqrt,
subtract, sum, take, tan, tanh, transpose, where, zeros,
)
+from numpy.compat import pickle
pi = np.pi
@@ -60,6 +60,11 @@ suppress_copy_mask_on_assignment.filter(
"setting an item on a masked array which has a shared mask will not copy")
+# For parametrized numeric testing
+num_dts = [np.dtype(dt_) for dt_ in '?bhilqBHILQefdgFD']
+num_ids = [dt_.char for dt_ in num_dts]
+
+
class TestMaskedArray(object):
# Base test class for MaskedArrays.
@@ -228,7 +233,7 @@ class TestMaskedArray(object):
x = np.array([('A', 0)], dtype={'names':['f0','f1'],
'formats':['S4','i8'],
'offsets':[0,8]})
- data = array(x) # used to fail due to 'V' padding field in x.dtype.descr
+ array(x) # used to fail due to 'V' padding field in x.dtype.descr
def test_asarray(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf) = self.d
@@ -342,7 +347,7 @@ class TestMaskedArray(object):
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
- m3 = make_mask(m, copy=1)
+ m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
@@ -369,12 +374,12 @@ class TestMaskedArray(object):
y2a = array(x1, mask=m, copy=1)
assert_(y2a._data.__array_interface__ != x1.__array_interface__)
- #assert_( y2a.mask is not m)
+ #assert_( y2a._mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
- #assert_( y2a.mask is not m)
+ #assert_( y2a._mask is not m)
assert_(y2a._mask.__array_interface__ != m.__array_interface__)
assert_(allequal(y2a.mask, 0))
@@ -555,50 +560,55 @@ class TestMaskedArray(object):
True, # Fully masked
False) # Fully unmasked
- for mask in masks:
- a.mask = mask
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled._data, a._data)
- if dtype in (object, int):
- assert_equal(a_pickled.fill_value, 999)
- else:
- assert_equal(a_pickled.fill_value, dtype(999))
- assert_array_equal(a_pickled.mask, mask)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ for mask in masks:
+ a.mask = mask
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled._data, a._data)
+ if dtype in (object, int):
+ assert_equal(a_pickled.fill_value, 999)
+ else:
+ assert_equal(a_pickled.fill_value, dtype(999))
+ assert_array_equal(a_pickled.mask, mask)
def test_pickling_subbaseclass(self):
# Test pickling w/ a subclass of ndarray
x = np.array([(1.0, 2), (3.0, 4)],
dtype=[('x', float), ('y', int)]).view(np.recarray)
a = masked_array(x, mask=[(True, False), (False, True)])
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled, a)
- assert_(isinstance(a_pickled._data, np.recarray))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
+ assert_(isinstance(a_pickled._data, np.recarray))
def test_pickling_maskedconstant(self):
# Test pickling MaskedConstant
mc = np.ma.masked
- mc_pickled = pickle.loads(mc.dumps())
- assert_equal(mc_pickled._baseclass, mc._baseclass)
- assert_equal(mc_pickled._mask, mc._mask)
- assert_equal(mc_pickled._data, mc._data)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ mc_pickled = pickle.loads(pickle.dumps(mc, protocol=proto))
+ assert_equal(mc_pickled._baseclass, mc._baseclass)
+ assert_equal(mc_pickled._mask, mc._mask)
+ assert_equal(mc_pickled._data, mc._data)
def test_pickling_wstructured(self):
# Tests pickling w/ structured array
a = array([(1, 1.), (2, 2.)], mask=[(0, 0), (0, 1)],
dtype=[('a', int), ('b', float)])
- a_pickled = pickle.loads(a.dumps())
- assert_equal(a_pickled._mask, a._mask)
- assert_equal(a_pickled, a)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ a_pickled = pickle.loads(pickle.dumps(a, protocol=proto))
+ assert_equal(a_pickled._mask, a._mask)
+ assert_equal(a_pickled, a)
def test_pickling_keepalignment(self):
# Tests pickling w/ F_CONTIGUOUS arrays
a = arange(10)
a.shape = (-1, 2)
b = a.T
- test = pickle.loads(pickle.dumps(b))
- assert_equal(test, b)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ test = pickle.loads(pickle.dumps(b, protocol=proto))
+ assert_equal(test, b)
def test_single_element_subscript(self):
# Tests single element subscripts of Maskedarrays.
@@ -1410,23 +1420,34 @@ class TestMaskedArrayArithmetic(object):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
+
test = (a == a)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
test = (a == a[0])
assert_equal(test.data, [True, False])
assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test.data, [False, True])
assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
test = (a[0] == b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a == b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
# complicated dtype, 2-dimensional array.
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([[(1, (1, 1)), (2, (2, 2))],
@@ -1436,28 +1457,40 @@ class TestMaskedArrayArithmetic(object):
test = (a[0, 0] == a)
assert_equal(test.data, [[True, False], [False, False]])
assert_equal(test.mask, [[False, False], [False, True]])
+ assert_(test.fill_value == True)
def test_ne_on_structured(self):
# Test the equality of structured arrays
ndtype = [('A', int), ('B', int)]
a = array([(1, 1), (2, 2)], mask=[(0, 1), (0, 0)], dtype=ndtype)
+
test = (a != a)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
test = (a != a[0])
assert_equal(test.data, [False, True])
assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
b = array([(1, 1), (2, 2)], mask=[(1, 0), (0, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test.data, [True, False])
assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
test = (a[0] != b)
assert_equal(test.data, [True, True])
assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
b = array([(1, 1), (2, 2)], mask=[(0, 1), (1, 0)], dtype=ndtype)
test = (a != b)
assert_equal(test.data, [False, False])
assert_equal(test.mask, [False, False])
+ assert_(test.fill_value == True)
+
# complicated dtype, 2-dimensional array.
ndtype = [('A', int), ('B', [('BA', int), ('BB', int)])]
a = array([[(1, (1, 1)), (2, (2, 2))],
@@ -1467,6 +1500,7 @@ class TestMaskedArrayArithmetic(object):
test = (a[0, 0] != a)
assert_equal(test.data, [[False, True], [True, True]])
assert_equal(test.mask, [[False, False], [False, True]])
+ assert_(test.fill_value == True)
def test_eq_ne_structured_extra(self):
# ensure simple examples are symmetric and make sense.
@@ -1502,6 +1536,120 @@ class TestMaskedArrayArithmetic(object):
el_by_el = [m1[name] != m2[name] for name in dt.names]
assert_equal(array(el_by_el, dtype=bool).any(), ne_expected)
+ @pytest.mark.parametrize('dt', ['S', 'U'])
+ @pytest.mark.parametrize('fill', [None, 'A'])
+ def test_eq_for_strings(self, dt, fill):
+ # Test the equality of structured arrays
+ a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill)
+
+ test = (a == a)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a == a[0])
+ assert_equal(test.data, [True, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill)
+ test = (a == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ # test = (a[0] == b) # doesn't work in Python2
+ test = (b == a[0])
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt', ['S', 'U'])
+ @pytest.mark.parametrize('fill', [None, 'A'])
+ def test_ne_for_strings(self, dt, fill):
+ # Test the equality of structured arrays
+ a = array(['a', 'b'], dtype=dt, mask=[0, 1], fill_value=fill)
+
+ test = (a != a)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a != a[0])
+ assert_equal(test.data, [False, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array(['a', 'b'], dtype=dt, mask=[1, 0], fill_value=fill)
+ test = (a != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ # test = (a[0] != b) # doesn't work in Python2
+ test = (b != a[0])
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('fill', [None, 1])
+ def test_eq_for_numeric(self, dt1, dt2, fill):
+ # Test the equality of structured arrays
+ a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
+
+ test = (a == a)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a == a[0])
+ assert_equal(test.data, [True, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
+ test = (a == b)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ # test = (a[0] == b) # doesn't work in Python2
+ test = (b == a[0])
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
+ @pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+ @pytest.mark.parametrize('fill', [None, 1])
+ def test_ne_for_numeric(self, dt1, dt2, fill):
+ # Test the equality of structured arrays
+ a = array([0, 1], dtype=dt1, mask=[0, 1], fill_value=fill)
+
+ test = (a != a)
+ assert_equal(test.data, [False, False])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ test = (a != a[0])
+ assert_equal(test.data, [False, True])
+ assert_equal(test.mask, [False, True])
+ assert_(test.fill_value == True)
+
+ b = array([0, 1], dtype=dt2, mask=[1, 0], fill_value=fill)
+ test = (a != b)
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, True])
+ assert_(test.fill_value == True)
+
+ # test = (a[0] != b) # doesn't work in Python2
+ test = (b != a[0])
+ assert_equal(test.data, [True, True])
+ assert_equal(test.mask, [True, False])
+ assert_(test.fill_value == True)
+
def test_eq_with_None(self):
# Really, comparisons with None should not be done, but check them
# anyway. Note that pep8 will flag these tests.
@@ -1881,6 +2029,17 @@ class TestFillingValues(object):
assert_equal(x.fill_value, 999.)
assert_equal(x._fill_value, np.array(999.))
+ def test_subarray_fillvalue(self):
+ # gh-10483 test multi-field index fill value
+ fields = array([(1, 1, 1)],
+ dtype=[('i', int), ('s', '|S8'), ('f', float)])
+ with suppress_warnings() as sup:
+ sup.filter(FutureWarning, "Numpy has detected")
+ subfields = fields[['i', 'f']]
+ assert_equal(tuple(subfields.fill_value), (999999, 1.e+20))
+ # test comparison does not raise:
+ subfields[1:] == subfields[:-1]
+
def test_fillvalue_exotic_dtype(self):
# Tests yet more exotic flexible dtypes
_check_fill_value = np.ma.core._check_fill_value
@@ -2242,9 +2401,9 @@ class TestMaskedArrayInPlaceArithmetics(object):
assert_equal(xm, y + 1)
(x, _, xm) = self.floatdata
- id1 = x.data.ctypes._data
+ id1 = x.data.ctypes.data
x += 1.
- assert_(id1 == x.data.ctypes._data)
+ assert_(id1 == x.data.ctypes.data)
assert_equal(x, y + 1.)
def test_inplace_addition_array(self):
@@ -2876,6 +3035,13 @@ class TestMaskedArrayMethods(object):
assert_equal(clipped._data, x.clip(2, 8))
assert_equal(clipped._data, mx._data.clip(2, 8))
+ def test_clip_out(self):
+ # gh-14140
+ a = np.arange(10)
+ m = np.ma.MaskedArray(a, mask=[0, 1] * 5)
+ m.clip(0, 5, out=m)
+ assert_equal(m.mask, [0, 1] * 5)
+
def test_compress(self):
# test compress
a = masked_array([1., 2., 3., 4., 5.], fill_value=9999)
@@ -4801,13 +4967,13 @@ class TestMaskedConstant(object):
def test_pickle(self):
from io import BytesIO
- import pickle
- with BytesIO() as f:
- pickle.dump(np.ma.masked, f)
- f.seek(0)
- res = pickle.load(f)
- assert_(res is np.ma.masked)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ with BytesIO() as f:
+ pickle.dump(np.ma.masked, f, protocol=proto)
+ f.seek(0)
+ res = pickle.load(f)
+ assert_(res is np.ma.masked)
def test_copy(self):
# gh-9328
@@ -4980,7 +5146,7 @@ def test_ufunc_with_out_varied():
assert_equal(res_pos.data, expected.data)
-def test_astype():
+def test_astype_mask_ordering():
descr = [('v', int, 3), ('x', [('y', float)])]
x = array([
[([1, 2, 3], (1.0,)), ([1, 2, 3], (2.0,))],
@@ -5012,6 +5178,25 @@ def test_astype():
assert_(x_f2.mask.flags.f_contiguous)
+@pytest.mark.parametrize('dt1', num_dts, ids=num_ids)
+@pytest.mark.parametrize('dt2', num_dts, ids=num_ids)
+@pytest.mark.filterwarnings('ignore::numpy.ComplexWarning')
+def test_astype_basic(dt1, dt2):
+ # See gh-12070
+ src = np.ma.array(ones(3, dt1), fill_value=1)
+ dst = src.astype(dt2)
+
+ assert_(src.fill_value == 1)
+ assert_(src.dtype == dt1)
+ assert_(src.fill_value.dtype == dt1)
+
+ assert_(dst.fill_value == 1)
+ assert_(dst.dtype == dt2)
+ assert_(dst.fill_value.dtype == dt2)
+
+ assert_equal(src, dst)
+
+
def test_fieldless_void():
dt = np.dtype([]) # a void dtype with no fields
x = np.empty(4, dt)
@@ -5025,3 +5210,10 @@ def test_fieldless_void():
mx = np.ma.array(x, mask=x)
assert_equal(mx.dtype, x.dtype)
assert_equal(mx.shape, x.shape)
+
+
+def test_mask_shape_assignment_does_not_break_masked():
+ a = np.ma.masked
+ b = np.ma.array(1, mask=a.mask)
+ b.shape = (1,)
+ assert_equal(a.mask.shape, ())
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index c29bec2bd..836770378 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -14,7 +14,7 @@ import itertools
import numpy as np
from numpy.testing import (
- assert_warns, suppress_warnings, assert_raises,
+ assert_warns, suppress_warnings
)
from numpy.ma.testutils import (
assert_, assert_array_equal, assert_equal, assert_almost_equal
@@ -29,9 +29,8 @@ from numpy.ma.extras import (
ediff1d, apply_over_axes, apply_along_axis, compress_nd, compress_rowcols,
mask_rowcols, clump_masked, clump_unmasked, flatnotmasked_contiguous,
notmasked_contiguous, notmasked_edges, masked_all, masked_all_like, isin,
- diagflat, stack, vstack, hstack
+ diagflat, stack, vstack
)
-import numpy.ma.extras as mae
class TestGeneric(object):
@@ -149,7 +148,7 @@ class TestAverage(object):
ott = array([0., 1., 2., 3.], mask=[True, False, False, False])
assert_equal(2.0, average(ott, axis=0))
assert_equal(2.0, average(ott, weights=[1., 1., 2., 1.]))
- result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_equal(2.0, result)
assert_(wts == 4.0)
ott[:] = masked
@@ -160,7 +159,7 @@ class TestAverage(object):
assert_equal(average(ott, axis=0), [2.0, 0.0])
assert_equal(average(ott, axis=1).mask[0], [True])
assert_equal([2., 0.], average(ott, axis=0))
- result, wts = average(ott, axis=0, returned=1)
+ result, wts = average(ott, axis=0, returned=True)
assert_equal(wts, [1., 0.])
def test_testAverage2(self):
@@ -201,14 +200,14 @@ class TestAverage(object):
# Yet more tests of average!
a = arange(6)
b = arange(6) * 3
- r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
- r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[False, False], [True, False]])
@@ -892,61 +891,51 @@ class TestMedian(object):
expected)
def test_nan(self):
- with suppress_warnings() as w:
- w.record(RuntimeWarning)
- for mask in (False, np.zeros(6, dtype=bool)):
- dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
- dm.mask = mask
-
- # scalar result
- r = np.ma.median(dm, axis=None)
- assert_(np.isscalar(r))
- assert_array_equal(r, np.nan)
- r = np.ma.median(dm.ravel(), axis=0)
- assert_(np.isscalar(r))
- assert_array_equal(r, np.nan)
-
- r = np.ma.median(dm, axis=0)
- assert_equal(type(r), MaskedArray)
- assert_array_equal(r, [1, np.nan, 3])
- r = np.ma.median(dm, axis=1)
- assert_equal(type(r), MaskedArray)
- assert_array_equal(r, [np.nan, 2])
- r = np.ma.median(dm, axis=-1)
- assert_equal(type(r), MaskedArray)
- assert_array_equal(r, [np.nan, 2])
-
+ for mask in (False, np.zeros(6, dtype=bool)):
dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
- dm[:, 2] = np.ma.masked
- assert_array_equal(np.ma.median(dm, axis=None), np.nan)
- assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3])
- assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5])
- assert_equal([x.category is RuntimeWarning for x in w.log],
- [True]*13)
+ dm.mask = mask
+
+ # scalar result
+ r = np.ma.median(dm, axis=None)
+ assert_(np.isscalar(r))
+ assert_array_equal(r, np.nan)
+ r = np.ma.median(dm.ravel(), axis=0)
+ assert_(np.isscalar(r))
+ assert_array_equal(r, np.nan)
+
+ r = np.ma.median(dm, axis=0)
+ assert_equal(type(r), MaskedArray)
+ assert_array_equal(r, [1, np.nan, 3])
+ r = np.ma.median(dm, axis=1)
+ assert_equal(type(r), MaskedArray)
+ assert_array_equal(r, [np.nan, 2])
+ r = np.ma.median(dm, axis=-1)
+ assert_equal(type(r), MaskedArray)
+ assert_array_equal(r, [np.nan, 2])
+
+ dm = np.ma.array([[1, np.nan, 3], [1, 2, 3]])
+ dm[:, 2] = np.ma.masked
+ assert_array_equal(np.ma.median(dm, axis=None), np.nan)
+ assert_array_equal(np.ma.median(dm, axis=0), [1, np.nan, 3])
+ assert_array_equal(np.ma.median(dm, axis=1), [np.nan, 1.5])
def test_out_nan(self):
- with warnings.catch_warnings(record=True):
- warnings.filterwarnings('always', '', RuntimeWarning)
- o = np.ma.masked_array(np.zeros((4,)))
- d = np.ma.masked_array(np.ones((3, 4)))
- d[2, 1] = np.nan
- d[2, 2] = np.ma.masked
- assert_equal(np.ma.median(d, 0, out=o), o)
- o = np.ma.masked_array(np.zeros((3,)))
- assert_equal(np.ma.median(d, 1, out=o), o)
- o = np.ma.masked_array(np.zeros(()))
- assert_equal(np.ma.median(d, out=o), o)
+ o = np.ma.masked_array(np.zeros((4,)))
+ d = np.ma.masked_array(np.ones((3, 4)))
+ d[2, 1] = np.nan
+ d[2, 2] = np.ma.masked
+ assert_equal(np.ma.median(d, 0, out=o), o)
+ o = np.ma.masked_array(np.zeros((3,)))
+ assert_equal(np.ma.median(d, 1, out=o), o)
+ o = np.ma.masked_array(np.zeros(()))
+ assert_equal(np.ma.median(d, out=o), o)
def test_nan_behavior(self):
a = np.ma.masked_array(np.arange(24, dtype=float))
a[::3] = np.ma.masked
a[2] = np.nan
- with suppress_warnings() as w:
- w.record(RuntimeWarning)
- assert_array_equal(np.ma.median(a), np.nan)
- assert_array_equal(np.ma.median(a, axis=0), np.nan)
- assert_(w.log[0].category is RuntimeWarning)
- assert_(w.log[1].category is RuntimeWarning)
+ assert_array_equal(np.ma.median(a), np.nan)
+ assert_array_equal(np.ma.median(a, axis=0), np.nan)
a = np.ma.masked_array(np.arange(24, dtype=float).reshape(2, 3, 4))
a.mask = np.arange(a.size) % 2 == 1
@@ -955,39 +944,26 @@ class TestMedian(object):
a[1, 1, 2] = np.nan
# no axis
- with suppress_warnings() as w:
- w.record(RuntimeWarning)
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_array_equal(np.ma.median(a), np.nan)
- assert_(np.isscalar(np.ma.median(a)))
- assert_(w.log[0].category is RuntimeWarning)
+ assert_array_equal(np.ma.median(a), np.nan)
+ assert_(np.isscalar(np.ma.median(a)))
# axis0
b = np.ma.median(aorig, axis=0)
b[2, 3] = np.nan
b[1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.ma.median(a, 0), b)
- assert_equal(len(w), 1)
+ assert_equal(np.ma.median(a, 0), b)
# axis1
b = np.ma.median(aorig, axis=1)
b[1, 3] = np.nan
b[1, 2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.ma.median(a, 1), b)
- assert_equal(len(w), 1)
+ assert_equal(np.ma.median(a, 1), b)
# axis02
b = np.ma.median(aorig, axis=(0, 2))
b[1] = np.nan
b[2] = np.nan
- with warnings.catch_warnings(record=True) as w:
- warnings.filterwarnings('always', '', RuntimeWarning)
- assert_equal(np.ma.median(a, (0, 2)), b)
- assert_equal(len(w), 1)
+ assert_equal(np.ma.median(a, (0, 2)), b)
def test_ambigous_fill(self):
# 255 is max value, used as filler for sort
diff --git a/numpy/ma/tests/test_mrecords.py b/numpy/ma/tests/test_mrecords.py
index e08dc1326..94e772d55 100644
--- a/numpy/ma/tests/test_mrecords.py
+++ b/numpy/ma/tests/test_mrecords.py
@@ -7,9 +7,6 @@
"""
from __future__ import division, absolute_import, print_function
-import warnings
-import pickle
-
import numpy as np
import numpy.ma as ma
from numpy import recarray
@@ -26,6 +23,7 @@ from numpy.ma.testutils import (
assert_, assert_equal,
assert_equal_records,
)
+from numpy.compat import pickle
class TestMRecords(object):
@@ -288,12 +286,13 @@ class TestMRecords(object):
# Test pickling
base = self.base.copy()
mrec = base.view(mrecarray)
- _ = pickle.dumps(mrec)
- mrec_ = pickle.loads(_)
- assert_equal(mrec_.dtype, mrec.dtype)
- assert_equal_records(mrec_._data, mrec._data)
- assert_equal(mrec_._mask, mrec._mask)
- assert_equal_records(mrec_._mask, mrec._mask)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ _ = pickle.dumps(mrec, protocol=proto)
+ mrec_ = pickle.loads(_)
+ assert_equal(mrec_.dtype, mrec.dtype)
+ assert_equal_records(mrec_._data, mrec._data)
+ assert_equal(mrec_._mask, mrec._mask)
+ assert_equal_records(mrec_._mask, mrec._mask)
def test_filled(self):
# Test filling the array
diff --git a/numpy/ma/tests/test_old_ma.py b/numpy/ma/tests/test_old_ma.py
index d7b1e3c18..7100eccbb 100644
--- a/numpy/ma/tests/test_old_ma.py
+++ b/numpy/ma/tests/test_old_ma.py
@@ -8,7 +8,6 @@ import numpy.core.fromnumeric as fromnumeric
from numpy.testing import (
assert_, assert_raises, assert_equal,
)
-from numpy.ma.testutils import assert_array_equal
from numpy.ma import (
MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
arange, arccos, arcsin, arctan, arctan2, array, average, choose,
@@ -22,6 +21,7 @@ from numpy.ma import (
repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
take, tan, tanh, transpose, where, zeros,
)
+from numpy.compat import pickle
pi = np.pi
@@ -263,14 +263,14 @@ class TestMa(object):
m = make_mask(n)
m2 = make_mask(m)
assert_(m is m2)
- m3 = make_mask(m, copy=1)
+ m3 = make_mask(m, copy=True)
assert_(m is not m3)
x1 = np.arange(5)
y1 = array(x1, mask=m)
assert_(y1._data is not x1)
assert_(allequal(x1, y1._data))
- assert_(y1.mask is m)
+ assert_(y1._mask is m)
y1a = array(y1, copy=0)
# For copy=False, one might expect that the array would just
@@ -280,19 +280,19 @@ class TestMa(object):
y1._mask.__array_interface__)
y2 = array(x1, mask=m3, copy=0)
- assert_(y2.mask is m3)
+ assert_(y2._mask is m3)
assert_(y2[2] is masked)
y2[2] = 9
assert_(y2[2] is not masked)
- assert_(y2.mask is m3)
+ assert_(y2._mask is m3)
assert_(allequal(y2.mask, 0))
y2a = array(x1, mask=m, copy=1)
- assert_(y2a.mask is not m)
+ assert_(y2a._mask is not m)
assert_(y2a[2] is masked)
y2a[2] = 9
assert_(y2a[2] is not masked)
- assert_(y2a.mask is not m)
+ assert_(y2a._mask is not m)
assert_(allequal(y2a.mask, 0))
y3 = array(x1 * 1.0, mask=m)
@@ -318,14 +318,14 @@ class TestMa(object):
assert_(x[3] is masked)
assert_(x[4] is masked)
x[[1, 4]] = [10, 40]
- assert_(x.mask is m)
+ assert_(x._mask is m)
assert_(x[3] is masked)
assert_(x[4] is not masked)
assert_(eq(x, [0, 10, 2, -1, 40]))
x = array(d, mask=m2, copy=True)
x.put([0, 1, 2], [-1, 100, 200])
- assert_(x.mask is not m2)
+ assert_(x._mask is not m2)
assert_(x[3] is masked)
assert_(x[4] is masked)
assert_(eq(x, [-1, 100, 200, 0, 0]))
@@ -549,13 +549,13 @@ class TestMa(object):
def test_testPickle(self):
# Test of pickling
- import pickle
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4, 3)
- s = pickle.dumps(x)
- y = pickle.loads(s)
- assert_(eq(x, y))
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ s = pickle.dumps(x, protocol=proto)
+ y = pickle.loads(s)
+ assert_(eq(x, y))
def test_testMasked(self):
# Test of masked element
@@ -570,7 +570,7 @@ class TestMa(object):
ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
assert_(eq(2.0, average(ott, axis=0)))
assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
- result, wts = average(ott, weights=[1., 1., 2., 1.], returned=1)
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
assert_(eq(2.0, result))
assert_(wts == 4.0)
ott[:] = masked
@@ -581,7 +581,7 @@ class TestMa(object):
assert_(eq(average(ott, axis=0), [2.0, 0.0]))
assert_(average(ott, axis=1)[0] is masked)
assert_(eq([2., 0.], average(ott, axis=0)))
- result, wts = average(ott, axis=0, returned=1)
+ result, wts = average(ott, axis=0, returned=True)
assert_(eq(wts, [1., 0.]))
def test_testAverage2(self):
@@ -622,14 +622,14 @@ class TestMa(object):
a = arange(6)
b = arange(6) * 3
- r1, w1 = average([[a, b], [b, a]], axis=1, returned=1)
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
assert_equal(shape(r1), shape(w1))
assert_equal(r1.shape, w1.shape)
- r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=1)
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
assert_equal(shape(w2), shape(r2))
- r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=1)
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
assert_(shape(w2) == shape(r2))
a2d = array([[1, 2], [0, 4]], float)
a2dm = masked_array(a2d, [[0, 0], [1, 0]])
diff --git a/numpy/ma/tests/test_regression.py b/numpy/ma/tests/test_regression.py
index 96c418a51..b83873a5a 100644
--- a/numpy/ma/tests/test_regression.py
+++ b/numpy/ma/tests/test_regression.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import warnings
-
import numpy as np
from numpy.testing import (
assert_, assert_array_equal, assert_allclose, suppress_warnings
@@ -84,3 +82,12 @@ class TestRegression(object):
assert_(a.mask.shape == (2,))
assert_(b.shape == (2, 2))
assert_(b.mask.shape == (2, 2))
+
+ def test_empty_list_on_structured(self):
+ # See gh-12464. Indexing with empty list should give empty result.
+ ma = np.ma.MaskedArray([(1, 1.), (2, 2.), (3, 3.)], dtype='i4,f4')
+ assert_array_equal(ma[[]], ma[:0])
+
+ def test_masked_array_tostring_fortran(self):
+ ma = np.ma.arange(4).reshape((2,2))
+ assert_array_equal(ma.tostring(order='F'), ma.T.tostring())
diff --git a/numpy/ma/tests/test_subclassing.py b/numpy/ma/tests/test_subclassing.py
index f8ab52bb9..440b36722 100644
--- a/numpy/ma/tests/test_subclassing.py
+++ b/numpy/ma/tests/test_subclassing.py
@@ -66,11 +66,11 @@ class MSubArray(SubArray, MaskedArray):
_data.info = subarr.info
return _data
- def _get_series(self):
+ @property
+ def _series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
- _series = property(fget=_get_series)
msubarray = MSubArray
diff --git a/numpy/ma/timer_comparison.py b/numpy/ma/timer_comparison.py
index 68104ed0a..4ad635e38 100644
--- a/numpy/ma/timer_comparison.py
+++ b/numpy/ma/timer_comparison.py
@@ -430,11 +430,10 @@ if __name__ == '__main__':
setup_cur = "import numpy.ma.core as module\n" + setup_base
(nrepeat, nloop) = (10, 10)
- if 1:
- for i in range(1, 8):
- func = 'tester.test_%i()' % i
- cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
- cur = np.sort(cur)
- print("#%i" % i + 50*'.')
- print(eval("ModuleTester.test_%i.__doc__" % i))
- print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
+ for i in range(1, 8):
+ func = 'tester.test_%i()' % i
+ cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
+ cur = np.sort(cur)
+ print("#%i" % i + 50*'.')
+ print(eval("ModuleTester.test_%i.__doc__" % i))
+ print("core_current : %.3f - %.3f" % (cur[0], cur[1]))
diff --git a/numpy/ma/version.py b/numpy/ma/version.py
deleted file mode 100644
index a2c5c42a8..000000000
--- a/numpy/ma/version.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""Version number
-
-"""
-from __future__ import division, absolute_import, print_function
-
-version = '1.00'
-release = False
-
-if not release:
- from . import core
- from . import extras
- revision = [core.__revision__.split(':')[-1][:-1].strip(),
- extras.__revision__.split(':')[-1][:-1].strip(),]
- version += '.dev%04i' % max([int(rev) for rev in revision])
diff --git a/numpy/matlib.py b/numpy/matlib.py
index 004e5f0c8..604ef470b 100644
--- a/numpy/matlib.py
+++ b/numpy/matlib.py
@@ -2,7 +2,7 @@ from __future__ import division, absolute_import, print_function
import numpy as np
from numpy.matrixlib.defmatrix import matrix, asmatrix
-# need * as we're copying the numpy namespace
+# need * as we're copying the numpy namespace (FIXME: this makes little sense)
from numpy import *
__version__ = np.__version__
@@ -39,11 +39,11 @@ def empty(shape, dtype=None, order='C'):
--------
>>> import numpy.matlib
>>> np.matlib.empty((2, 2)) # filled with random data
- matrix([[ 6.76425276e-320, 9.79033856e-307],
- [ 7.39337286e-309, 3.22135945e-309]]) #random
+ matrix([[ 6.76425276e-320, 9.79033856e-307], # random
+ [ 7.39337286e-309, 3.22135945e-309]])
>>> np.matlib.empty((2, 2), dtype=int)
- matrix([[ 6600475, 0],
- [ 6586976, 22740995]]) #random
+ matrix([[ 6600475, 0], # random
+ [ 6586976, 22740995]])
"""
return ndarray.__new__(matrix, shape, dtype, order=order)
@@ -82,11 +82,11 @@ def ones(shape, dtype=None, order='C'):
Examples
--------
>>> np.matlib.ones((2,3))
- matrix([[ 1., 1., 1.],
- [ 1., 1., 1.]])
+ matrix([[1., 1., 1.],
+ [1., 1., 1.]])
>>> np.matlib.ones(2)
- matrix([[ 1., 1.]])
+ matrix([[1., 1.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
@@ -126,11 +126,11 @@ def zeros(shape, dtype=None, order='C'):
--------
>>> import numpy.matlib
>>> np.matlib.zeros((2, 3))
- matrix([[ 0., 0., 0.],
- [ 0., 0., 0.]])
+ matrix([[0., 0., 0.],
+ [0., 0., 0.]])
>>> np.matlib.zeros(2)
- matrix([[ 0., 0.]])
+ matrix([[0., 0.]])
"""
a = ndarray.__new__(matrix, shape, dtype, order=order)
@@ -210,9 +210,9 @@ def eye(n,M=None, k=0, dtype=float, order='C'):
--------
>>> import numpy.matlib
>>> np.matlib.eye(3, k=1, dtype=float)
- matrix([[ 0., 1., 0.],
- [ 0., 0., 1.],
- [ 0., 0., 0.]])
+ matrix([[0., 1., 0.],
+ [0., 0., 1.],
+ [0., 0., 0.]])
"""
return asmatrix(np.eye(n, M=M, k=k, dtype=dtype, order=order))
@@ -243,19 +243,20 @@ def rand(*args):
Examples
--------
+ >>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.rand(2, 3)
- matrix([[ 0.68340382, 0.67926887, 0.83271405],
- [ 0.00793551, 0.20468222, 0.95253525]]) #random
+ matrix([[0.69646919, 0.28613933, 0.22685145],
+ [0.55131477, 0.71946897, 0.42310646]])
>>> np.matlib.rand((2, 3))
- matrix([[ 0.84682055, 0.73626594, 0.11308016],
- [ 0.85429008, 0.3294825 , 0.89139555]]) #random
+ matrix([[0.9807642 , 0.68482974, 0.4809319 ],
+ [0.39211752, 0.34317802, 0.72904971]])
If the first argument is a tuple, other arguments are ignored:
>>> np.matlib.rand((2, 3), 4)
- matrix([[ 0.46898646, 0.15163588, 0.95188261],
- [ 0.59208621, 0.09561818, 0.00583606]]) #random
+ matrix([[0.43857224, 0.0596779 , 0.39804426],
+ [0.73799541, 0.18249173, 0.17545176]])
"""
if isinstance(args[0], tuple):
@@ -294,18 +295,19 @@ def randn(*args):
Examples
--------
+ >>> np.random.seed(123)
>>> import numpy.matlib
>>> np.matlib.randn(1)
- matrix([[-0.09542833]]) #random
+ matrix([[-1.0856306]])
>>> np.matlib.randn(1, 2, 3)
- matrix([[ 0.16198284, 0.0194571 , 0.18312985],
- [-0.7509172 , 1.61055 , 0.45298599]]) #random
+ matrix([[ 0.99734545, 0.2829785 , -1.50629471],
+ [-0.57860025, 1.65143654, -2.42667924]])
Two-by-four matrix of samples from :math:`N(3, 6.25)`:
>>> 2.5 * np.matlib.randn((2, 4)) + 3
- matrix([[ 4.74085004, 8.89381862, 4.09042411, 4.83721922],
- [ 7.52373709, 5.07933944, -2.64043543, 0.45610557]]) #random
+ matrix([[1.92771843, 6.16484065, 0.83314899, 1.30278462],
+ [2.76322758, 6.72847407, 1.40274501, 1.8900451 ]])
"""
if isinstance(args[0], tuple):
diff --git a/numpy/matrixlib/defmatrix.py b/numpy/matrixlib/defmatrix.py
index 7baa401a8..3c7e8ffc2 100644
--- a/numpy/matrixlib/defmatrix.py
+++ b/numpy/matrixlib/defmatrix.py
@@ -7,6 +7,7 @@ import warnings
import ast
import numpy.core.numeric as N
from numpy.core.numeric import concatenate, isscalar
+from numpy.core.overrides import set_module
# While not in __all__, matrix_power used to be defined here, so we import
# it for backward compatibility.
from numpy.linalg import matrix_power
@@ -33,6 +34,8 @@ def _convert_from_string(data):
newdata.append(newrow)
return newdata
+
+@set_module('numpy')
def asmatrix(data, dtype=None):
"""
Interpret the input as a matrix.
@@ -67,6 +70,8 @@ def asmatrix(data, dtype=None):
"""
return matrix(data, dtype=dtype, copy=False)
+
+@set_module('numpy')
class matrix(N.ndarray):
"""
matrix(data, dtype=None, copy=True)
@@ -99,9 +104,9 @@ class matrix(N.ndarray):
Examples
--------
>>> a = np.matrix('1 2; 3 4')
- >>> print(a)
- [[1 2]
- [3 4]]
+ >>> a
+ matrix([[1, 2],
+ [3, 4]])
>>> np.matrix([[1, 2], [3, 4]])
matrix([[1, 2],
@@ -305,12 +310,12 @@ class matrix(N.ndarray):
matrix([[3],
[7]])
>>> x.sum(axis=1, dtype='float')
- matrix([[ 3.],
- [ 7.]])
- >>> out = np.zeros((1, 2), dtype='float')
- >>> x.sum(axis=1, dtype='float', out=out)
- matrix([[ 3.],
- [ 7.]])
+ matrix([[3.],
+ [7.]])
+ >>> out = np.zeros((2, 1), dtype='float')
+ >>> x.sum(axis=1, dtype='float', out=np.asmatrix(out))
+ matrix([[3.],
+ [7.]])
"""
return N.ndarray.sum(self, axis, dtype, out, keepdims=True)._collapse(axis)
@@ -432,7 +437,7 @@ class matrix(N.ndarray):
>>> x.mean()
5.5
>>> x.mean(0)
- matrix([[ 4., 5., 6., 7.]])
+ matrix([[4., 5., 6., 7.]])
>>> x.mean(1)
matrix([[ 1.5],
[ 5.5],
@@ -464,9 +469,9 @@ class matrix(N.ndarray):
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.std()
- 3.4520525295346629
+ 3.4520525295346629 # may vary
>>> x.std(0)
- matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]])
+ matrix([[ 3.26598632, 3.26598632, 3.26598632, 3.26598632]]) # may vary
>>> x.std(1)
matrix([[ 1.11803399],
[ 1.11803399],
@@ -500,11 +505,11 @@ class matrix(N.ndarray):
>>> x.var()
11.916666666666666
>>> x.var(0)
- matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]])
+ matrix([[ 10.66666667, 10.66666667, 10.66666667, 10.66666667]]) # may vary
>>> x.var(1)
- matrix([[ 1.25],
- [ 1.25],
- [ 1.25]])
+ matrix([[1.25],
+ [1.25],
+ [1.25]])
"""
return N.ndarray.var(self, axis, dtype, out, ddof, keepdims=True)._collapse(axis)
@@ -786,7 +791,8 @@ class matrix(N.ndarray):
"""
return N.ndarray.ptp(self, axis, out)._align(axis)
- def getI(self):
+ @property
+ def I(self):
"""
Returns the (multiplicative) inverse of invertible `self`.
@@ -819,7 +825,7 @@ class matrix(N.ndarray):
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
>>> m.getI() * m
- matrix([[ 1., 0.],
+ matrix([[ 1., 0.], # may vary
[ 0., 1.]])
"""
@@ -830,7 +836,8 @@ class matrix(N.ndarray):
from numpy.dual import pinv as func
return asmatrix(func(self))
- def getA(self):
+ @property
+ def A(self):
"""
Return `self` as an `ndarray` object.
@@ -859,7 +866,8 @@ class matrix(N.ndarray):
"""
return self.__array__()
- def getA1(self):
+ @property
+ def A1(self):
"""
Return `self` as a flattened `ndarray`.
@@ -881,7 +889,8 @@ class matrix(N.ndarray):
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]])
>>> x.getA1()
- array([ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
+ array([ 0, 1, 2, ..., 9, 10, 11])
+
"""
return self.__array__().ravel()
@@ -925,8 +934,8 @@ class matrix(N.ndarray):
"""
return N.ndarray.ravel(self, order=order)
-
- def getT(self):
+ @property
+ def T(self):
"""
Returns the transpose of the matrix.
@@ -958,7 +967,8 @@ class matrix(N.ndarray):
"""
return self.transpose()
- def getH(self):
+ @property
+ def H(self):
"""
Returns the (complex) conjugate transpose of `self`.
@@ -981,10 +991,10 @@ class matrix(N.ndarray):
[ 4. -4.j, 5. -5.j, 6. -6.j, 7. -7.j],
[ 8. -8.j, 9. -9.j, 10.-10.j, 11.-11.j]])
>>> z.getH()
- matrix([[ 0. +0.j, 4. +4.j, 8. +8.j],
- [ 1. +1.j, 5. +5.j, 9. +9.j],
- [ 2. +2.j, 6. +6.j, 10.+10.j],
- [ 3. +3.j, 7. +7.j, 11.+11.j]])
+ matrix([[ 0. -0.j, 4. +4.j, 8. +8.j],
+ [ 1. +1.j, 5. +5.j, 9. +9.j],
+ [ 2. +2.j, 6. +6.j, 10.+10.j],
+ [ 3. +3.j, 7. +7.j, 11.+11.j]])
"""
if issubclass(self.dtype.type, N.complexfloating):
@@ -992,11 +1002,12 @@ class matrix(N.ndarray):
else:
return self.transpose()
- T = property(getT, None)
- A = property(getA, None)
- A1 = property(getA1, None)
- H = property(getH, None)
- I = property(getI, None)
+ # kept for compatibility
+ getT = T.fget
+ getA = A.fget
+ getA1 = A1.fget
+ getH = H.fget
+ getI = I.fget
def _from_string(str, gdict, ldict):
rows = str.split(';')
@@ -1023,6 +1034,7 @@ def _from_string(str, gdict, ldict):
return concatenate(rowtup, axis=0)
+@set_module('numpy')
def bmat(obj, ldict=None, gdict=None):
"""
Build a matrix object from a string, nested sequence, or array.
diff --git a/numpy/matrixlib/tests/test_defmatrix.py b/numpy/matrixlib/tests/test_defmatrix.py
index f8a8ad511..aa6e08d64 100644
--- a/numpy/matrixlib/tests/test_defmatrix.py
+++ b/numpy/matrixlib/tests/test_defmatrix.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
diff --git a/numpy/matrixlib/tests/test_masked_matrix.py b/numpy/matrixlib/tests/test_masked_matrix.py
index 7f84bb2c9..d3911d2e1 100644
--- a/numpy/matrixlib/tests/test_masked_matrix.py
+++ b/numpy/matrixlib/tests/test_masked_matrix.py
@@ -1,8 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pickle
-import pytest
-
import numpy as np
from numpy.ma.testutils import (assert_, assert_equal, assert_raises,
assert_array_equal)
@@ -10,6 +7,7 @@ from numpy.ma.core import (masked_array, masked_values, masked, allequal,
MaskType, getmask, MaskedArray, nomask,
log, add, hypot, divide)
from numpy.ma.extras import mr_
+from numpy.compat import pickle
class MMatrix(MaskedArray, np.matrix,):
@@ -24,11 +22,11 @@ class MMatrix(MaskedArray, np.matrix,):
MaskedArray.__array_finalize__(self, obj)
return
- def _get_series(self):
+ @property
+ def _series(self):
_view = self.view(MaskedArray)
_view._sharedmask = False
return _view
- _series = property(fget=_get_series)
class TestMaskedMatrix(object):
diff --git a/numpy/matrixlib/tests/test_matrix_linalg.py b/numpy/matrixlib/tests/test_matrix_linalg.py
index 8d31ec5b0..6fc733c2e 100644
--- a/numpy/matrixlib/tests/test_matrix_linalg.py
+++ b/numpy/matrixlib/tests/test_matrix_linalg.py
@@ -1,8 +1,6 @@
""" Test functions for linalg module using the matrix class."""
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.linalg.tests.test_linalg import (
diff --git a/numpy/matrixlib/tests/test_multiarray.py b/numpy/matrixlib/tests/test_multiarray.py
index 8de0a7c6a..6d84bd477 100644
--- a/numpy/matrixlib/tests/test_multiarray.py
+++ b/numpy/matrixlib/tests/test_multiarray.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.testing import assert_, assert_equal, assert_array_equal
diff --git a/numpy/matrixlib/tests/test_numeric.py b/numpy/matrixlib/tests/test_numeric.py
index e9f44e747..95e1c8001 100644
--- a/numpy/matrixlib/tests/test_numeric.py
+++ b/numpy/matrixlib/tests/test_numeric.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.testing import assert_equal
diff --git a/numpy/matrixlib/tests/test_regression.py b/numpy/matrixlib/tests/test_regression.py
index 88654c76a..70e147279 100644
--- a/numpy/matrixlib/tests/test_regression.py
+++ b/numpy/matrixlib/tests/test_regression.py
@@ -1,7 +1,5 @@
from __future__ import division, absolute_import, print_function
-import pytest
-
import numpy as np
from numpy.testing import assert_, assert_equal, assert_raises
diff --git a/numpy/polynomial/_polybase.py b/numpy/polynomial/_polybase.py
index c28e77e69..bfa030714 100644
--- a/numpy/polynomial/_polybase.py
+++ b/numpy/polynomial/_polybase.py
@@ -8,7 +8,7 @@ abc module from the stdlib, hence it is only available for Python >= 2.6.
"""
from __future__ import division, absolute_import, print_function
-from abc import ABCMeta, abstractmethod, abstractproperty
+import abc
import numbers
import numpy as np
@@ -16,7 +16,7 @@ from . import polyutils as pu
__all__ = ['ABCPolyBase']
-class ABCPolyBase(object):
+class ABCPolyBase(abc.ABC):
"""An abstract base class for immutable series classes.
ABCPolyBase provides the standard Python numerical methods
@@ -59,7 +59,6 @@ class ABCPolyBase(object):
Default window of the class.
"""
- __metaclass__ = ABCMeta
# Not hashable
__hash__ = None
@@ -70,68 +69,84 @@ class ABCPolyBase(object):
# Limit runaway size. T_n^m has degree n*m
maxpower = 100
- @abstractproperty
+ @property
+ @abc.abstractmethod
def domain(self):
pass
- @abstractproperty
+ @property
+ @abc.abstractmethod
def window(self):
pass
- @abstractproperty
+ @property
+ @abc.abstractmethod
def nickname(self):
pass
- @abstractproperty
+ @property
+ @abc.abstractmethod
def basis_name(self):
pass
- @abstractmethod
- def _add(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _add(c1, c2):
pass
- @abstractmethod
- def _sub(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _sub(c1, c2):
pass
- @abstractmethod
- def _mul(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _mul(c1, c2):
pass
- @abstractmethod
- def _div(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _div(c1, c2):
pass
- @abstractmethod
- def _pow(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _pow(c, pow, maxpower=None):
pass
- @abstractmethod
- def _val(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _val(x, c):
pass
- @abstractmethod
- def _int(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _int(c, m, k, lbnd, scl):
pass
- @abstractmethod
- def _der(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _der(c, m, scl):
pass
- @abstractmethod
- def _fit(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _fit(x, y, deg, rcond, full):
pass
- @abstractmethod
- def _line(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _line(off, scl):
pass
- @abstractmethod
- def _roots(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _roots(c):
pass
- @abstractmethod
- def _fromroots(self):
+ @staticmethod
+ @abc.abstractmethod
+ def _fromroots(r):
pass
def has_samecoef(self, other):
diff --git a/numpy/polynomial/chebyshev.py b/numpy/polynomial/chebyshev.py
index f1ddc9b06..093eb0048 100644
--- a/numpy/polynomial/chebyshev.py
+++ b/numpy/polynomial/chebyshev.py
@@ -225,15 +225,15 @@ def _zseries_div(z1, z2):
"""
z1 = z1.copy()
z2 = z2.copy()
- len1 = len(z1)
- len2 = len(z2)
- if len2 == 1:
+ lc1 = len(z1)
+ lc2 = len(z2)
+ if lc2 == 1:
z1 /= z2
return z1, z1[:1]*0
- elif len1 < len2:
+ elif lc1 < lc2:
return z1[:1]*0, z1
else:
- dlen = len1 - len2
+ dlen = lc1 - lc2
scl = z2[0]
z2 /= scl
quo = np.empty(dlen + 1, dtype=z1.dtype)
@@ -244,16 +244,16 @@ def _zseries_div(z1, z2):
quo[i] = z1[i]
quo[dlen - i] = r
tmp = r*z2
- z1[i:i+len2] -= tmp
- z1[j:j+len2] -= tmp
+ z1[i:i+lc2] -= tmp
+ z1[j:j+lc2] -= tmp
i += 1
j -= 1
r = z1[i]
quo[i] = r
tmp = r*z2
- z1[i:i+len2] -= tmp
+ z1[i:i+lc2] -= tmp
quo /= scl
- rem = z1[i+1:i-1+len2].copy()
+ rem = z1[i+1:i-1+lc2].copy()
return quo, rem
@@ -361,12 +361,12 @@ def poly2cheb(pol):
>>> from numpy import polynomial as P
>>> p = P.Polynomial(range(4))
>>> p
- Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = p.convert(kind=P.Chebyshev)
>>> c
- Chebyshev([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
+ Chebyshev([1. , 3.25, 1. , 0.75], domain=[-1., 1.], window=[-1., 1.])
>>> P.chebyshev.poly2cheb(range(4))
- array([ 1. , 3.25, 1. , 0.75])
+ array([1. , 3.25, 1. , 0.75])
"""
[pol] = pu.as_series([pol])
@@ -413,12 +413,12 @@ def cheb2poly(c):
>>> from numpy import polynomial as P
>>> c = P.Chebyshev(range(4))
>>> c
- Chebyshev([ 0., 1., 2., 3.], [-1., 1.])
+ Chebyshev([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p = c.convert(kind=P.Polynomial)
>>> p
- Polynomial([ -2., -8., 4., 12.], [-1., 1.])
+ Polynomial([-2., -8., 4., 12.], domain=[-1., 1.], window=[-1., 1.])
>>> P.chebyshev.cheb2poly(range(4))
- array([ -2., -8., 4., 12.])
+ array([-2., -8., 4., 12.])
"""
from .polynomial import polyadd, polysub, polymulx
@@ -528,8 +528,7 @@ def chebfromroots(roots):
See Also
--------
- polyfromroots, legfromroots, lagfromroots, hermfromroots,
- hermefromroots.
+ polyfromroots, legfromroots, lagfromroots, hermfromroots, hermefromroots
Examples
--------
@@ -538,24 +537,10 @@ def chebfromroots(roots):
array([ 0. , -0.25, 0. , 0.25])
>>> j = complex(0,1)
>>> C.chebfromroots((-j,j)) # x^2 + 1 relative to the standard basis
- array([ 1.5+0.j, 0.0+0.j, 0.5+0.j])
+ array([1.5+0.j, 0. +0.j, 0.5+0.j])
"""
- if len(roots) == 0:
- return np.ones(1)
- else:
- [roots] = pu.as_series([roots], trim=False)
- roots.sort()
- p = [chebline(-r, 1) for r in roots]
- n = len(p)
- while n > 1:
- m, r = divmod(n, 2)
- tmp = [chebmul(p[i], p[i+m]) for i in range(m)]
- if r:
- tmp[0] = chebmul(tmp[0], p[-1])
- p = tmp
- n = m
- return p[0]
+ return pu._fromroots(chebline, chebmul, roots)
def chebadd(c1, c2):
@@ -594,18 +579,10 @@ def chebadd(c1, c2):
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebadd(c1,c2)
- array([ 4., 4., 4.])
+ array([4., 4., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] += c2
- ret = c1
- else:
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._add(c1, c2)
def chebsub(c1, c2):
@@ -649,16 +626,7 @@ def chebsub(c1, c2):
array([ 2., 0., -2.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] -= c2
- ret = c1
- else:
- c2 = -c2
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._sub(c1, c2)
def chebmulx(c):
@@ -688,7 +656,7 @@ def chebmulx(c):
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebmulx([1,2,3])
- array([ 1., 2.5, 3., 1.5, 2.])
+ array([1. , 2.5, 1. , 1.5])
"""
# c is a trimmed copy
@@ -796,10 +764,10 @@ def chebdiv(c1, c2):
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> C.chebdiv(c1,c2) # quotient "intuitive," remainder not
- (array([ 3.]), array([-8., -4.]))
+ (array([3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> C.chebdiv(c2,c1) # neither "intuitive"
- (array([ 0., 2.]), array([-2., -4.]))
+ (array([0., 2.]), array([-2., -4.]))
"""
# c1, c2 are trimmed copies
@@ -807,6 +775,7 @@ def chebdiv(c1, c2):
if c2[-1] == 0:
raise ZeroDivisionError()
+ # note: this is more efficient than `pu._div(chebmul, c1, c2)`
lc1 = len(c1)
lc2 = len(c2)
if lc1 < lc2:
@@ -853,9 +822,12 @@ def chebpow(c, pow, maxpower=16):
--------
>>> from numpy.polynomial import chebyshev as C
>>> C.chebpow([1, 2, 3, 4], 2)
- array([15.5, 22. , 16. , 14. , 12.5, 12. , 8. ])
+ array([15.5, 22. , 16. , ..., 12.5, 12. , 8. ])
"""
+ # note: this is more efficient than `pu._pow(chebmul, c1, c2)`, as it
+ # avoids converting between z and c series repeatedly
+
# c is a trimmed copy
[c] = pu.as_series([c])
power = int(pow)
@@ -928,26 +900,22 @@ def chebder(c, m=1, scl=1, axis=0):
>>> from numpy.polynomial import chebyshev as C
>>> c = (1,2,3,4)
>>> C.chebder(c)
- array([ 14., 12., 24.])
+ array([14., 12., 24.])
>>> C.chebder(c,3)
- array([ 96.])
+ array([96.])
>>> C.chebder(c,scl=-1)
array([-14., -12., -24.])
>>> C.chebder(c,2,-1)
- array([ 12., 96.])
+ array([12., 96.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of derivation must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -1048,8 +1016,8 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
>>> C.chebint(c)
array([ 0.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,3)
- array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667,
- 0.00625 ])
+ array([ 0.03125 , -0.1875 , 0.04166667, -0.05208333, 0.01041667, # may vary
+ 0.00625 ])
>>> C.chebint(c, k=3)
array([ 3.5, -0.5, 0.5, 0.5])
>>> C.chebint(c,lbnd=-2)
@@ -1058,15 +1026,13 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
array([-1., 1., -1., -1.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of integration must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
@@ -1075,8 +1041,6 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -1096,7 +1060,7 @@ def chebint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
if n > 1:
tmp[2] = c[1]/4
for j in range(2, n):
- t = c[j]/(2*j + 1)
+ t = c[j]/(2*j + 1) # FIXME: t never used
tmp[j + 1] = c[j]/(2*(j + 1))
tmp[j - 1] -= c[j]/(2*(j - 1))
tmp[0] += k[i] - chebval(lbnd, tmp)
@@ -1167,7 +1131,7 @@ def chebval(x, c, tensor=True):
--------
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
@@ -1238,14 +1202,7 @@ def chebval2d(x, y, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y = np.array((x, y), copy=0)
- except Exception:
- raise ValueError('x, y are incompatible')
-
- c = chebval(x, c)
- c = chebval(y, c, tensor=False)
- return c
+ return pu._valnd(chebval, c, x, y)
def chebgrid2d(x, y, c):
@@ -1298,9 +1255,7 @@ def chebgrid2d(x, y, c):
.. versionadded:: 1.7.0
"""
- c = chebval(x, c)
- c = chebval(y, c)
- return c
+ return pu._gridnd(chebval, c, x, y)
def chebval3d(x, y, z, c):
@@ -1351,15 +1306,7 @@ def chebval3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y, z = np.array((x, y, z), copy=0)
- except Exception:
- raise ValueError('x, y, z are incompatible')
-
- c = chebval(x, c)
- c = chebval(y, c, tensor=False)
- c = chebval(z, c, tensor=False)
- return c
+ return pu._valnd(chebval, c, x, y, z)
def chebgrid3d(x, y, z, c):
@@ -1415,10 +1362,7 @@ def chebgrid3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- c = chebval(x, c)
- c = chebval(y, c)
- c = chebval(z, c)
- return c
+ return pu._gridnd(chebval, c, x, y, z)
def chebvander(x, deg):
@@ -1456,13 +1400,11 @@ def chebvander(x, deg):
the converted `x`.
"""
- ideg = int(deg)
- if ideg != deg:
- raise ValueError("deg must be integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
@@ -1518,7 +1460,7 @@ def chebvander2d(x, y, deg):
See Also
--------
- chebvander, chebvander3d. chebval2d, chebval3d
+ chebvander, chebvander3d, chebval2d, chebval3d
Notes
-----
@@ -1526,17 +1468,7 @@ def chebvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy = ideg
- x, y = np.array((x, y), copy=0) + 0.0
-
- vx = chebvander(x, degx)
- vy = chebvander(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ return pu._vander2d(chebvander, x, y, deg)
def chebvander3d(x, y, z, deg):
@@ -1582,7 +1514,7 @@ def chebvander3d(x, y, z, deg):
See Also
--------
- chebvander, chebvander3d. chebval2d, chebval3d
+ chebvander, chebvander3d, chebval2d, chebval3d
Notes
-----
@@ -1590,18 +1522,7 @@ def chebvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy, degz = ideg
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = chebvander(x, degx)
- vy = chebvander(y, degy)
- vz = chebvander(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ return pu._vander3d(chebvander, x, y, z, deg)
def chebfit(x, y, deg, rcond=None, full=False, w=None):
@@ -1674,7 +1595,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
warnings can be turned off by
>>> import warnings
- >>> warnings.simplefilter('ignore', RankWarning)
+ >>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
@@ -1723,81 +1644,7 @@ def chebfit(x, y, deg, rcond=None, full=False, w=None):
--------
"""
- x = np.asarray(x) + 0.0
- y = np.asarray(y) + 0.0
- deg = np.asarray(deg)
-
- # check arguments.
- if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
- raise TypeError("deg must be an int or non-empty 1-D array of int")
- if deg.min() < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if len(x) != len(y):
- raise TypeError("expected x and y to have same length")
-
- if deg.ndim == 0:
- lmax = deg
- order = lmax + 1
- van = chebvander(x, lmax)
- else:
- deg = np.sort(deg)
- lmax = deg[-1]
- order = len(deg)
- van = chebvander(x, lmax)[:, deg]
-
- # set up the least squares matrices in transposed form
- lhs = van.T
- rhs = y.T
- if w is not None:
- w = np.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected 1D vector for w")
- if len(x) != len(w):
- raise TypeError("expected x and w to have same length")
- # apply weights. Don't use inplace operations as they
- # can cause problems with NA.
- lhs = lhs * w
- rhs = rhs * w
-
- # set rcond
- if rcond is None:
- rcond = len(x)*np.finfo(x.dtype).eps
-
- # Determine the norms of the design matrix columns.
- if issubclass(lhs.dtype.type, np.complexfloating):
- scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
- else:
- scl = np.sqrt(np.square(lhs).sum(1))
- scl[scl == 0] = 1
-
- # Solve the least squares problem.
- c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
- c = (c.T/scl).T
-
- # Expand c to include non-fitted coefficients which are set to zero
- if deg.ndim > 0:
- if c.ndim == 2:
- cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
- else:
- cc = np.zeros(lmax + 1, dtype=c.dtype)
- cc[deg] = c
- c = cc
-
- # warn on rank reduction
- if rank != order and not full:
- msg = "The fit may be poorly conditioned"
- warnings.warn(msg, pu.RankWarning, stacklevel=2)
-
- if full:
- return c, [resids, rank, s, rcond]
- else:
- return c
+ return pu._fit(chebvander, x, y, deg, rcond, full, w)
def chebcompanion(c):
@@ -1885,7 +1732,7 @@ def chebroots(c):
--------
>>> import numpy.polynomial.chebyshev as cheb
>>> cheb.chebroots((-1, 1,-1, 1)) # T3 - T2 + T1 - T0 has real roots
- array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00])
+ array([ -5.00000000e-01, 2.60860684e-17, 1.00000000e+00]) # may vary
"""
# c is a trimmed copy
@@ -1895,7 +1742,8 @@ def chebroots(c):
if len(c) == 2:
return np.array([-c[0]/c[1]])
- m = chebcompanion(c)
+ # rotated companion matrix reduces error
+ m = chebcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
@@ -2003,9 +1851,9 @@ def chebgauss(deg):
.. math:: w_i = \\pi / n
"""
- ideg = int(deg)
- if ideg != deg or ideg < 1:
- raise ValueError("deg must be a non-negative integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
x = np.cos(np.pi * np.arange(1, 2*ideg, 2) / (2.0*ideg))
w = np.ones(ideg)*(np.pi/ideg)
diff --git a/numpy/polynomial/hermite.py b/numpy/polynomial/hermite.py
index 2aed4b34f..0011fa3b7 100644
--- a/numpy/polynomial/hermite.py
+++ b/numpy/polynomial/hermite.py
@@ -114,7 +114,7 @@ def poly2herm(pol):
--------
>>> from numpy.polynomial.hermite import poly2herm
>>> poly2herm(np.arange(4))
- array([ 1. , 2.75 , 0.5 , 0.375])
+ array([1. , 2.75 , 0.5 , 0.375])
"""
[pol] = pu.as_series([pol])
@@ -160,7 +160,7 @@ def herm2poly(c):
--------
>>> from numpy.polynomial.hermite import herm2poly
>>> herm2poly([ 1. , 2.75 , 0.5 , 0.375])
- array([ 0., 1., 2., 3.])
+ array([0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
@@ -272,35 +272,20 @@ def hermfromroots(roots):
See Also
--------
- polyfromroots, legfromroots, lagfromroots, chebfromroots,
- hermefromroots.
+ polyfromroots, legfromroots, lagfromroots, chebfromroots, hermefromroots
Examples
--------
>>> from numpy.polynomial.hermite import hermfromroots, hermval
>>> coef = hermfromroots((-1, 0, 1))
>>> hermval((-1, 0, 1), coef)
- array([ 0., 0., 0.])
+ array([0., 0., 0.])
>>> coef = hermfromroots((-1j, 1j))
>>> hermval((-1j, 1j), coef)
- array([ 0.+0.j, 0.+0.j])
+ array([0.+0.j, 0.+0.j])
"""
- if len(roots) == 0:
- return np.ones(1)
- else:
- [roots] = pu.as_series([roots], trim=False)
- roots.sort()
- p = [hermline(-r, 1) for r in roots]
- n = len(p)
- while n > 1:
- m, r = divmod(n, 2)
- tmp = [hermmul(p[i], p[i+m]) for i in range(m)]
- if r:
- tmp[0] = hermmul(tmp[0], p[-1])
- p = tmp
- n = m
- return p[0]
+ return pu._fromroots(hermline, hermmul, roots)
def hermadd(c1, c2):
@@ -337,18 +322,10 @@ def hermadd(c1, c2):
--------
>>> from numpy.polynomial.hermite import hermadd
>>> hermadd([1, 2, 3], [1, 2, 3, 4])
- array([ 2., 4., 6., 4.])
+ array([2., 4., 6., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] += c2
- ret = c1
- else:
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._add(c1, c2)
def hermsub(c1, c2):
@@ -385,19 +362,10 @@ def hermsub(c1, c2):
--------
>>> from numpy.polynomial.hermite import hermsub
>>> hermsub([1, 2, 3, 4], [1, 2, 3])
- array([ 0., 0., 0., 4.])
+ array([0., 0., 0., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] -= c2
- ret = c1
- else:
- c2 = -c2
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._sub(c1, c2)
def hermmulx(c):
@@ -435,7 +403,7 @@ def hermmulx(c):
--------
>>> from numpy.polynomial.hermite import hermmulx
>>> hermmulx([1, 2, 3])
- array([ 2. , 6.5, 1. , 1.5])
+ array([2. , 6.5, 1. , 1.5])
"""
# c is a trimmed copy
@@ -488,7 +456,7 @@ def hermmul(c1, c2):
--------
>>> from numpy.polynomial.hermite import hermmul
>>> hermmul([1, 2, 3], [0, 1, 2])
- array([ 52., 29., 52., 7., 6.])
+ array([52., 29., 52., 7., 6.])
"""
# s1, s2 are trimmed copies
@@ -557,33 +525,14 @@ def hermdiv(c1, c2):
--------
>>> from numpy.polynomial.hermite import hermdiv
>>> hermdiv([ 52., 29., 52., 7., 6.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 0.]))
+ (array([1., 2., 3.]), array([0.]))
>>> hermdiv([ 54., 31., 52., 7., 6.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 2., 2.]))
+ (array([1., 2., 3.]), array([2., 2.]))
>>> hermdiv([ 53., 30., 52., 7., 6.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 1., 1.]))
+ (array([1., 2., 3.]), array([1., 1.]))
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if c2[-1] == 0:
- raise ZeroDivisionError()
-
- lc1 = len(c1)
- lc2 = len(c2)
- if lc1 < lc2:
- return c1[:1]*0, c1
- elif lc2 == 1:
- return c1/c2[-1], c1[:1]*0
- else:
- quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
- rem = c1
- for i in range(lc1 - lc2, - 1, -1):
- p = hermmul([0]*i + [1], c2)
- q = rem[-1]/p[-1]
- rem = rem[:-1] - q*p[:-1]
- quo[i] = q
- return quo, pu.trimseq(rem)
+ return pu._div(hermmul, c1, c2)
def hermpow(c, pow, maxpower=16):
@@ -617,27 +566,10 @@ def hermpow(c, pow, maxpower=16):
--------
>>> from numpy.polynomial.hermite import hermpow
>>> hermpow([1, 2, 3], 2)
- array([ 81., 52., 82., 12., 9.])
+ array([81., 52., 82., 12., 9.])
"""
- # c is a trimmed copy
- [c] = pu.as_series([c])
- power = int(pow)
- if power != pow or power < 0:
- raise ValueError("Power must be a non-negative integer.")
- elif maxpower is not None and power > maxpower:
- raise ValueError("Power is too large")
- elif power == 0:
- return np.array([1], dtype=c.dtype)
- elif power == 1:
- return c
- else:
- # This can be made more efficient by using powers of two
- # in the usual way.
- prd = c
- for i in range(2, power + 1):
- prd = hermmul(prd, c)
- return prd
+ return pu._pow(hermmul, c, pow, maxpower)
def hermder(c, m=1, scl=1, axis=0):
@@ -690,22 +622,18 @@ def hermder(c, m=1, scl=1, axis=0):
--------
>>> from numpy.polynomial.hermite import hermder
>>> hermder([ 1. , 0.5, 0.5, 0.5])
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
>>> hermder([-0.5, 1./2., 1./8., 1./12., 1./16.], m=2)
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of derivation must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -799,26 +727,24 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
--------
>>> from numpy.polynomial.hermite import hermint
>>> hermint([1,2,3]) # integrate once, value 0 at 0.
- array([ 1. , 0.5, 0.5, 0.5])
+ array([1. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2) # integrate twice, value & deriv 0 at 0
- array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ])
+ array([-0.5 , 0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
>>> hermint([1,2,3], k=1) # integrate once, value 1 at 0.
- array([ 2. , 0.5, 0.5, 0.5])
+ array([2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], lbnd=-1) # integrate once, value 0 at -1
array([-2. , 0.5, 0.5, 0.5])
>>> hermint([1,2,3], m=2, k=[1,2], lbnd=-1)
- array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ])
+ array([ 1.66666667, -0.5 , 0.125 , 0.08333333, 0.0625 ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of integration must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
@@ -827,8 +753,6 @@ def hermint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -918,11 +842,11 @@ def hermval(x, c, tensor=True):
>>> hermval(1, coef)
11.0
>>> hermval([[1,2],[3,4]], coef)
- array([[ 11., 51.],
- [ 115., 203.]])
+ array([[ 11., 51.],
+ [115., 203.]])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
@@ -995,14 +919,7 @@ def hermval2d(x, y, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y = np.array((x, y), copy=0)
- except Exception:
- raise ValueError('x, y are incompatible')
-
- c = hermval(x, c)
- c = hermval(y, c, tensor=False)
- return c
+ return pu._valnd(hermval, c, x, y)
def hermgrid2d(x, y, c):
@@ -1055,9 +972,7 @@ def hermgrid2d(x, y, c):
.. versionadded:: 1.7.0
"""
- c = hermval(x, c)
- c = hermval(y, c)
- return c
+ return pu._gridnd(hermval, c, x, y)
def hermval3d(x, y, z, c):
@@ -1108,15 +1023,7 @@ def hermval3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y, z = np.array((x, y, z), copy=0)
- except Exception:
- raise ValueError('x, y, z are incompatible')
-
- c = hermval(x, c)
- c = hermval(y, c, tensor=False)
- c = hermval(z, c, tensor=False)
- return c
+ return pu._valnd(hermval, c, x, y, z)
def hermgrid3d(x, y, z, c):
@@ -1172,10 +1079,7 @@ def hermgrid3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- c = hermval(x, c)
- c = hermval(y, c)
- c = hermval(z, c)
- return c
+ return pu._gridnd(hermval, c, x, y, z)
def hermvander(x, deg):
@@ -1222,13 +1126,11 @@ def hermvander(x, deg):
[ 1., 2., 2., -4.]])
"""
- ideg = int(deg)
- if ideg != deg:
- raise ValueError("deg must be integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
@@ -1283,7 +1185,7 @@ def hermvander2d(x, y, deg):
See Also
--------
- hermvander, hermvander3d. hermval2d, hermval3d
+ hermvander, hermvander3d, hermval2d, hermval3d
Notes
-----
@@ -1291,17 +1193,7 @@ def hermvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy = ideg
- x, y = np.array((x, y), copy=0) + 0.0
-
- vx = hermvander(x, degx)
- vy = hermvander(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ return pu._vander2d(hermvander, x, y, deg)
def hermvander3d(x, y, z, deg):
@@ -1347,7 +1239,7 @@ def hermvander3d(x, y, z, deg):
See Also
--------
- hermvander, hermvander3d. hermval2d, hermval3d
+ hermvander, hermvander3d, hermval2d, hermval3d
Notes
-----
@@ -1355,18 +1247,7 @@ def hermvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy, degz = ideg
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = hermvander(x, degx)
- vy = hermvander(y, degy)
- vz = hermvander(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ return pu._vander3d(hermvander, x, y, z, deg)
def hermfit(x, y, deg, rcond=None, full=False, w=None):
@@ -1437,7 +1318,7 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
warnings can be turned off by
>>> import warnings
- >>> warnings.simplefilter('ignore', RankWarning)
+ >>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
@@ -1490,84 +1371,10 @@ def hermfit(x, y, deg, rcond=None, full=False, w=None):
>>> err = np.random.randn(len(x))/10
>>> y = hermval(x, [1, 2, 3]) + err
>>> hermfit(x, y, 2)
- array([ 0.97902637, 1.99849131, 3.00006 ])
+ array([1.0218, 1.9986, 2.9999]) # may vary
"""
- x = np.asarray(x) + 0.0
- y = np.asarray(y) + 0.0
- deg = np.asarray(deg)
-
- # check arguments.
- if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
- raise TypeError("deg must be an int or non-empty 1-D array of int")
- if deg.min() < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if len(x) != len(y):
- raise TypeError("expected x and y to have same length")
-
- if deg.ndim == 0:
- lmax = deg
- order = lmax + 1
- van = hermvander(x, lmax)
- else:
- deg = np.sort(deg)
- lmax = deg[-1]
- order = len(deg)
- van = hermvander(x, lmax)[:, deg]
-
- # set up the least squares matrices in transposed form
- lhs = van.T
- rhs = y.T
- if w is not None:
- w = np.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected 1D vector for w")
- if len(x) != len(w):
- raise TypeError("expected x and w to have same length")
- # apply weights. Don't use inplace operations as they
- # can cause problems with NA.
- lhs = lhs * w
- rhs = rhs * w
-
- # set rcond
- if rcond is None:
- rcond = len(x)*np.finfo(x.dtype).eps
-
- # Determine the norms of the design matrix columns.
- if issubclass(lhs.dtype.type, np.complexfloating):
- scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
- else:
- scl = np.sqrt(np.square(lhs).sum(1))
- scl[scl == 0] = 1
-
- # Solve the least squares problem.
- c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
- c = (c.T/scl).T
-
- # Expand c to include non-fitted coefficients which are set to zero
- if deg.ndim > 0:
- if c.ndim == 2:
- cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
- else:
- cc = np.zeros(lmax+1, dtype=c.dtype)
- cc[deg] = c
- c = cc
-
- # warn on rank reduction
- if rank != order and not full:
- msg = "The fit may be poorly conditioned"
- warnings.warn(msg, pu.RankWarning, stacklevel=2)
-
- if full:
- return c, [resids, rank, s, rcond]
- else:
- return c
+ return pu._fit(hermvander, x, y, deg, rcond, full, w)
def hermcompanion(c):
@@ -1656,9 +1463,9 @@ def hermroots(c):
>>> from numpy.polynomial.hermite import hermroots, hermfromroots
>>> coef = hermfromroots([-1, 0, 1])
>>> coef
- array([ 0. , 0.25 , 0. , 0.125])
+ array([0. , 0.25 , 0. , 0.125])
>>> hermroots(coef)
- array([ -1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
+ array([-1.00000000e+00, -1.38777878e-17, 1.00000000e+00])
"""
# c is a trimmed copy
@@ -1668,7 +1475,8 @@ def hermroots(c):
if len(c) == 2:
return np.array([-.5*c[0]/c[1]])
- m = hermcompanion(c)
+ # rotated companion matrix reduces error
+ m = hermcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
@@ -1704,7 +1512,7 @@ def _normed_hermite_n(x, n):
"""
if n == 0:
- return np.ones(x.shape)/np.sqrt(np.sqrt(np.pi))
+ return np.full(x.shape, 1/np.sqrt(np.sqrt(np.pi)))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(np.pi))
@@ -1753,9 +1561,9 @@ def hermgauss(deg):
the right value when integrating 1.
"""
- ideg = int(deg)
- if ideg != deg or ideg < 1:
- raise ValueError("deg must be a non-negative integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
diff --git a/numpy/polynomial/hermite_e.py b/numpy/polynomial/hermite_e.py
index d4520ad6c..b1cc2d3ab 100644
--- a/numpy/polynomial/hermite_e.py
+++ b/numpy/polynomial/hermite_e.py
@@ -161,7 +161,7 @@ def herme2poly(c):
--------
>>> from numpy.polynomial.hermite_e import herme2poly
>>> herme2poly([ 2., 10., 2., 3.])
- array([ 0., 1., 2., 3.])
+ array([0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
@@ -273,35 +273,20 @@ def hermefromroots(roots):
See Also
--------
- polyfromroots, legfromroots, lagfromroots, hermfromroots,
- chebfromroots.
+ polyfromroots, legfromroots, lagfromroots, hermfromroots, chebfromroots
Examples
--------
>>> from numpy.polynomial.hermite_e import hermefromroots, hermeval
>>> coef = hermefromroots((-1, 0, 1))
>>> hermeval((-1, 0, 1), coef)
- array([ 0., 0., 0.])
+ array([0., 0., 0.])
>>> coef = hermefromroots((-1j, 1j))
>>> hermeval((-1j, 1j), coef)
- array([ 0.+0.j, 0.+0.j])
+ array([0.+0.j, 0.+0.j])
"""
- if len(roots) == 0:
- return np.ones(1)
- else:
- [roots] = pu.as_series([roots], trim=False)
- roots.sort()
- p = [hermeline(-r, 1) for r in roots]
- n = len(p)
- while n > 1:
- m, r = divmod(n, 2)
- tmp = [hermemul(p[i], p[i+m]) for i in range(m)]
- if r:
- tmp[0] = hermemul(tmp[0], p[-1])
- p = tmp
- n = m
- return p[0]
+ return pu._fromroots(hermeline, hermemul, roots)
def hermeadd(c1, c2):
@@ -338,18 +323,10 @@ def hermeadd(c1, c2):
--------
>>> from numpy.polynomial.hermite_e import hermeadd
>>> hermeadd([1, 2, 3], [1, 2, 3, 4])
- array([ 2., 4., 6., 4.])
+ array([2., 4., 6., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] += c2
- ret = c1
- else:
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._add(c1, c2)
def hermesub(c1, c2):
@@ -386,19 +363,10 @@ def hermesub(c1, c2):
--------
>>> from numpy.polynomial.hermite_e import hermesub
>>> hermesub([1, 2, 3, 4], [1, 2, 3])
- array([ 0., 0., 0., 4.])
+ array([0., 0., 0., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] -= c2
- ret = c1
- else:
- c2 = -c2
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._sub(c1, c2)
def hermemulx(c):
@@ -432,7 +400,7 @@ def hermemulx(c):
--------
>>> from numpy.polynomial.hermite_e import hermemulx
>>> hermemulx([1, 2, 3])
- array([ 2., 7., 2., 3.])
+ array([2., 7., 2., 3.])
"""
# c is a trimmed copy
@@ -485,7 +453,7 @@ def hermemul(c1, c2):
--------
>>> from numpy.polynomial.hermite_e import hermemul
>>> hermemul([1, 2, 3], [0, 1, 2])
- array([ 14., 15., 28., 7., 6.])
+ array([14., 15., 28., 7., 6.])
"""
# s1, s2 are trimmed copies
@@ -554,31 +522,12 @@ def hermediv(c1, c2):
--------
>>> from numpy.polynomial.hermite_e import hermediv
>>> hermediv([ 14., 15., 28., 7., 6.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 0.]))
+ (array([1., 2., 3.]), array([0.]))
>>> hermediv([ 15., 17., 28., 7., 6.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 1., 2.]))
+ (array([1., 2., 3.]), array([1., 2.]))
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if c2[-1] == 0:
- raise ZeroDivisionError()
-
- lc1 = len(c1)
- lc2 = len(c2)
- if lc1 < lc2:
- return c1[:1]*0, c1
- elif lc2 == 1:
- return c1/c2[-1], c1[:1]*0
- else:
- quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
- rem = c1
- for i in range(lc1 - lc2, - 1, -1):
- p = hermemul([0]*i + [1], c2)
- q = rem[-1]/p[-1]
- rem = rem[:-1] - q*p[:-1]
- quo[i] = q
- return quo, pu.trimseq(rem)
+ return pu._div(hermemul, c1, c2)
def hermepow(c, pow, maxpower=16):
@@ -612,27 +561,10 @@ def hermepow(c, pow, maxpower=16):
--------
>>> from numpy.polynomial.hermite_e import hermepow
>>> hermepow([1, 2, 3], 2)
- array([ 23., 28., 46., 12., 9.])
+ array([23., 28., 46., 12., 9.])
"""
- # c is a trimmed copy
- [c] = pu.as_series([c])
- power = int(pow)
- if power != pow or power < 0:
- raise ValueError("Power must be a non-negative integer.")
- elif maxpower is not None and power > maxpower:
- raise ValueError("Power is too large")
- elif power == 0:
- return np.array([1], dtype=c.dtype)
- elif power == 1:
- return c
- else:
- # This can be made more efficient by using powers of two
- # in the usual way.
- prd = c
- for i in range(2, power + 1):
- prd = hermemul(prd, c)
- return prd
+ return pu._pow(hermemul, c, pow, maxpower)
def hermeder(c, m=1, scl=1, axis=0):
@@ -685,22 +617,18 @@ def hermeder(c, m=1, scl=1, axis=0):
--------
>>> from numpy.polynomial.hermite_e import hermeder
>>> hermeder([ 1., 1., 1., 1.])
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
>>> hermeder([-0.25, 1., 1./2., 1./3., 1./4 ], m=2)
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of derivation must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -794,26 +722,24 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
--------
>>> from numpy.polynomial.hermite_e import hermeint
>>> hermeint([1, 2, 3]) # integrate once, value 0 at 0.
- array([ 1., 1., 1., 1.])
+ array([1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2) # integrate twice, value & deriv 0 at 0
- array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ])
+ array([-0.25 , 1. , 0.5 , 0.33333333, 0.25 ]) # may vary
>>> hermeint([1, 2, 3], k=1) # integrate once, value 1 at 0.
- array([ 2., 1., 1., 1.])
+ array([2., 1., 1., 1.])
>>> hermeint([1, 2, 3], lbnd=-1) # integrate once, value 0 at -1
array([-1., 1., 1., 1.])
>>> hermeint([1, 2, 3], m=2, k=[1, 2], lbnd=-1)
- array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ])
+ array([ 1.83333333, 0. , 0.5 , 0.33333333, 0.25 ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of integration must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
@@ -822,8 +748,6 @@ def hermeint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -913,11 +837,11 @@ def hermeval(x, c, tensor=True):
>>> hermeval(1, coef)
3.0
>>> hermeval([[1,2],[3,4]], coef)
- array([[ 3., 14.],
- [ 31., 54.]])
+ array([[ 3., 14.],
+ [31., 54.]])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
@@ -989,14 +913,7 @@ def hermeval2d(x, y, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y = np.array((x, y), copy=0)
- except Exception:
- raise ValueError('x, y are incompatible')
-
- c = hermeval(x, c)
- c = hermeval(y, c, tensor=False)
- return c
+ return pu._valnd(hermeval, c, x, y)
def hermegrid2d(x, y, c):
@@ -1049,9 +966,7 @@ def hermegrid2d(x, y, c):
.. versionadded:: 1.7.0
"""
- c = hermeval(x, c)
- c = hermeval(y, c)
- return c
+ return pu._gridnd(hermeval, c, x, y)
def hermeval3d(x, y, z, c):
@@ -1102,15 +1017,7 @@ def hermeval3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y, z = np.array((x, y, z), copy=0)
- except Exception:
- raise ValueError('x, y, z are incompatible')
-
- c = hermeval(x, c)
- c = hermeval(y, c, tensor=False)
- c = hermeval(z, c, tensor=False)
- return c
+ return pu._valnd(hermeval, c, x, y, z)
def hermegrid3d(x, y, z, c):
@@ -1166,10 +1073,7 @@ def hermegrid3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- c = hermeval(x, c)
- c = hermeval(y, c)
- c = hermeval(z, c)
- return c
+ return pu._gridnd(hermeval, c, x, y, z)
def hermevander(x, deg):
@@ -1216,13 +1120,11 @@ def hermevander(x, deg):
[ 1., 1., 0., -2.]])
"""
- ideg = int(deg)
- if ideg != deg:
- raise ValueError("deg must be integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
@@ -1276,7 +1178,7 @@ def hermevander2d(x, y, deg):
See Also
--------
- hermevander, hermevander3d. hermeval2d, hermeval3d
+ hermevander, hermevander3d, hermeval2d, hermeval3d
Notes
-----
@@ -1284,17 +1186,7 @@ def hermevander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy = ideg
- x, y = np.array((x, y), copy=0) + 0.0
-
- vx = hermevander(x, degx)
- vy = hermevander(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ return pu._vander2d(hermevander, x, y, deg)
def hermevander3d(x, y, z, deg):
@@ -1340,7 +1232,7 @@ def hermevander3d(x, y, z, deg):
See Also
--------
- hermevander, hermevander3d. hermeval2d, hermeval3d
+ hermevander, hermevander3d, hermeval2d, hermeval3d
Notes
-----
@@ -1348,18 +1240,7 @@ def hermevander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy, degz = ideg
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = hermevander(x, degx)
- vy = hermevander(y, degy)
- vz = hermevander(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ return pu._vander3d(hermevander, x, y, z, deg)
def hermefit(x, y, deg, rcond=None, full=False, w=None):
@@ -1430,7 +1311,7 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
warnings can be turned off by
>>> import warnings
- >>> warnings.simplefilter('ignore', RankWarning)
+ >>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
@@ -1480,87 +1361,14 @@ def hermefit(x, y, deg, rcond=None, full=False, w=None):
--------
>>> from numpy.polynomial.hermite_e import hermefit, hermeval
>>> x = np.linspace(-10, 10)
+ >>> np.random.seed(123)
>>> err = np.random.randn(len(x))/10
>>> y = hermeval(x, [1, 2, 3]) + err
>>> hermefit(x, y, 2)
- array([ 1.01690445, 1.99951418, 2.99948696])
+ array([ 1.01690445, 1.99951418, 2.99948696]) # may vary
"""
- x = np.asarray(x) + 0.0
- y = np.asarray(y) + 0.0
- deg = np.asarray(deg)
-
- # check arguments.
- if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
- raise TypeError("deg must be an int or non-empty 1-D array of int")
- if deg.min() < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if len(x) != len(y):
- raise TypeError("expected x and y to have same length")
-
- if deg.ndim == 0:
- lmax = deg
- order = lmax + 1
- van = hermevander(x, lmax)
- else:
- deg = np.sort(deg)
- lmax = deg[-1]
- order = len(deg)
- van = hermevander(x, lmax)[:, deg]
-
- # set up the least squares matrices in transposed form
- lhs = van.T
- rhs = y.T
- if w is not None:
- w = np.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected 1D vector for w")
- if len(x) != len(w):
- raise TypeError("expected x and w to have same length")
- # apply weights. Don't use inplace operations as they
- # can cause problems with NA.
- lhs = lhs * w
- rhs = rhs * w
-
- # set rcond
- if rcond is None:
- rcond = len(x)*np.finfo(x.dtype).eps
-
- # Determine the norms of the design matrix columns.
- if issubclass(lhs.dtype.type, np.complexfloating):
- scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
- else:
- scl = np.sqrt(np.square(lhs).sum(1))
- scl[scl == 0] = 1
-
- # Solve the least squares problem.
- c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
- c = (c.T/scl).T
-
- # Expand c to include non-fitted coefficients which are set to zero
- if deg.ndim > 0:
- if c.ndim == 2:
- cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
- else:
- cc = np.zeros(lmax+1, dtype=c.dtype)
- cc[deg] = c
- c = cc
-
- # warn on rank reduction
- if rank != order and not full:
- msg = "The fit may be poorly conditioned"
- warnings.warn(msg, pu.RankWarning, stacklevel=2)
-
- if full:
- return c, [resids, rank, s, rcond]
- else:
- return c
+ return pu._fit(hermevander, x, y, deg, rcond, full, w)
def hermecompanion(c):
@@ -1650,9 +1458,9 @@ def hermeroots(c):
>>> from numpy.polynomial.hermite_e import hermeroots, hermefromroots
>>> coef = hermefromroots([-1, 0, 1])
>>> coef
- array([ 0., 2., 0., 1.])
+ array([0., 2., 0., 1.])
>>> hermeroots(coef)
- array([-1., 0., 1.])
+ array([-1., 0., 1.]) # may vary
"""
# c is a trimmed copy
@@ -1662,7 +1470,8 @@ def hermeroots(c):
if len(c) == 2:
return np.array([-c[0]/c[1]])
- m = hermecompanion(c)
+ # rotated companion matrix reduces error
+ m = hermecompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
@@ -1698,7 +1507,7 @@ def _normed_hermite_e_n(x, n):
"""
if n == 0:
- return np.ones(x.shape)/np.sqrt(np.sqrt(2*np.pi))
+ return np.full(x.shape, 1/np.sqrt(np.sqrt(2*np.pi)))
c0 = 0.
c1 = 1./np.sqrt(np.sqrt(2*np.pi))
@@ -1747,9 +1556,9 @@ def hermegauss(deg):
the right value when integrating 1.
"""
- ideg = int(deg)
- if ideg != deg or ideg < 1:
- raise ValueError("deg must be a non-negative integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
diff --git a/numpy/polynomial/laguerre.py b/numpy/polynomial/laguerre.py
index a116d20a7..7e7e45ca1 100644
--- a/numpy/polynomial/laguerre.py
+++ b/numpy/polynomial/laguerre.py
@@ -160,7 +160,7 @@ def lag2poly(c):
--------
>>> from numpy.polynomial.laguerre import lag2poly
>>> lag2poly([ 23., -63., 58., -18.])
- array([ 0., 1., 2., 3.])
+ array([0., 1., 2., 3.])
"""
from .polynomial import polyadd, polysub, polymulx
@@ -269,35 +269,20 @@ def lagfromroots(roots):
See Also
--------
- polyfromroots, legfromroots, chebfromroots, hermfromroots,
- hermefromroots.
+ polyfromroots, legfromroots, chebfromroots, hermfromroots, hermefromroots
Examples
--------
>>> from numpy.polynomial.laguerre import lagfromroots, lagval
>>> coef = lagfromroots((-1, 0, 1))
>>> lagval((-1, 0, 1), coef)
- array([ 0., 0., 0.])
+ array([0., 0., 0.])
>>> coef = lagfromroots((-1j, 1j))
>>> lagval((-1j, 1j), coef)
- array([ 0.+0.j, 0.+0.j])
+ array([0.+0.j, 0.+0.j])
"""
- if len(roots) == 0:
- return np.ones(1)
- else:
- [roots] = pu.as_series([roots], trim=False)
- roots.sort()
- p = [lagline(-r, 1) for r in roots]
- n = len(p)
- while n > 1:
- m, r = divmod(n, 2)
- tmp = [lagmul(p[i], p[i+m]) for i in range(m)]
- if r:
- tmp[0] = lagmul(tmp[0], p[-1])
- p = tmp
- n = m
- return p[0]
+ return pu._fromroots(lagline, lagmul, roots)
def lagadd(c1, c2):
@@ -334,19 +319,11 @@ def lagadd(c1, c2):
--------
>>> from numpy.polynomial.laguerre import lagadd
>>> lagadd([1, 2, 3], [1, 2, 3, 4])
- array([ 2., 4., 6., 4.])
+ array([2., 4., 6., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] += c2
- ret = c1
- else:
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._add(c1, c2)
def lagsub(c1, c2):
@@ -383,19 +360,10 @@ def lagsub(c1, c2):
--------
>>> from numpy.polynomial.laguerre import lagsub
>>> lagsub([1, 2, 3, 4], [1, 2, 3])
- array([ 0., 0., 0., 4.])
+ array([0., 0., 0., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] -= c2
- ret = c1
- else:
- c2 = -c2
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._sub(c1, c2)
def lagmulx(c):
@@ -433,7 +401,7 @@ def lagmulx(c):
--------
>>> from numpy.polynomial.laguerre import lagmulx
>>> lagmulx([1, 2, 3])
- array([ -1., -1., 11., -9.])
+ array([-1., -1., 11., -9.])
"""
# c is a trimmed copy
@@ -556,31 +524,12 @@ def lagdiv(c1, c2):
--------
>>> from numpy.polynomial.laguerre import lagdiv
>>> lagdiv([ 8., -13., 38., -51., 36.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 0.]))
+ (array([1., 2., 3.]), array([0.]))
>>> lagdiv([ 9., -12., 38., -51., 36.], [0, 1, 2])
- (array([ 1., 2., 3.]), array([ 1., 1.]))
+ (array([1., 2., 3.]), array([1., 1.]))
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if c2[-1] == 0:
- raise ZeroDivisionError()
-
- lc1 = len(c1)
- lc2 = len(c2)
- if lc1 < lc2:
- return c1[:1]*0, c1
- elif lc2 == 1:
- return c1/c2[-1], c1[:1]*0
- else:
- quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
- rem = c1
- for i in range(lc1 - lc2, - 1, -1):
- p = lagmul([0]*i + [1], c2)
- q = rem[-1]/p[-1]
- rem = rem[:-1] - q*p[:-1]
- quo[i] = q
- return quo, pu.trimseq(rem)
+ return pu._div(lagmul, c1, c2)
def lagpow(c, pow, maxpower=16):
@@ -617,24 +566,7 @@ def lagpow(c, pow, maxpower=16):
array([ 14., -16., 56., -72., 54.])
"""
- # c is a trimmed copy
- [c] = pu.as_series([c])
- power = int(pow)
- if power != pow or power < 0:
- raise ValueError("Power must be a non-negative integer.")
- elif maxpower is not None and power > maxpower:
- raise ValueError("Power is too large")
- elif power == 0:
- return np.array([1], dtype=c.dtype)
- elif power == 1:
- return c
- else:
- # This can be made more efficient by using powers of two
- # in the usual way.
- prd = c
- for i in range(2, power + 1):
- prd = lagmul(prd, c)
- return prd
+ return pu._pow(lagmul, c, pow, maxpower)
def lagder(c, m=1, scl=1, axis=0):
@@ -687,22 +619,19 @@ def lagder(c, m=1, scl=1, axis=0):
--------
>>> from numpy.polynomial.laguerre import lagder
>>> lagder([ 1., 1., 1., -3.])
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
>>> lagder([ 1., 0., 0., -4., 3.], m=2)
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
- cnt, iaxis = [int(t) for t in [m, axis]]
- if cnt != m:
- raise ValueError("The order of derivation must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -805,20 +734,18 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
>>> lagint([1,2,3], k=1)
array([ 2., 1., 1., -3.])
>>> lagint([1,2,3], lbnd=-1)
- array([ 11.5, 1. , 1. , -3. ])
+ array([11.5, 1. , 1. , -3. ])
>>> lagint([1,2], m=2, k=[1,2], lbnd=-1)
- array([ 11.16666667, -5. , -3. , 2. ])
+ array([ 11.16666667, -5. , -3. , 2. ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of integration must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
@@ -827,8 +754,6 @@ def lagint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -923,7 +848,7 @@ def lagval(x, c, tensor=True):
[-4.5, -2. ]])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
@@ -995,14 +920,7 @@ def lagval2d(x, y, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y = np.array((x, y), copy=0)
- except Exception:
- raise ValueError('x, y are incompatible')
-
- c = lagval(x, c)
- c = lagval(y, c, tensor=False)
- return c
+ return pu._valnd(lagval, c, x, y)
def laggrid2d(x, y, c):
@@ -1055,9 +973,7 @@ def laggrid2d(x, y, c):
.. versionadded:: 1.7.0
"""
- c = lagval(x, c)
- c = lagval(y, c)
- return c
+ return pu._gridnd(lagval, c, x, y)
def lagval3d(x, y, z, c):
@@ -1108,15 +1024,7 @@ def lagval3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y, z = np.array((x, y, z), copy=0)
- except Exception:
- raise ValueError('x, y, z are incompatible')
-
- c = lagval(x, c)
- c = lagval(y, c, tensor=False)
- c = lagval(z, c, tensor=False)
- return c
+ return pu._valnd(lagval, c, x, y, z)
def laggrid3d(x, y, z, c):
@@ -1172,10 +1080,7 @@ def laggrid3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- c = lagval(x, c)
- c = lagval(y, c)
- c = lagval(z, c)
- return c
+ return pu._gridnd(lagval, c, x, y, z)
def lagvander(x, deg):
@@ -1222,13 +1127,11 @@ def lagvander(x, deg):
[ 1. , -1. , -1. , -0.33333333]])
"""
- ideg = int(deg)
- if ideg != deg:
- raise ValueError("deg must be integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
@@ -1282,7 +1185,7 @@ def lagvander2d(x, y, deg):
See Also
--------
- lagvander, lagvander3d. lagval2d, lagval3d
+ lagvander, lagvander3d, lagval2d, lagval3d
Notes
-----
@@ -1290,17 +1193,7 @@ def lagvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy = ideg
- x, y = np.array((x, y), copy=0) + 0.0
-
- vx = lagvander(x, degx)
- vy = lagvander(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ return pu._vander2d(lagvander, x, y, deg)
def lagvander3d(x, y, z, deg):
@@ -1346,7 +1239,7 @@ def lagvander3d(x, y, z, deg):
See Also
--------
- lagvander, lagvander3d. lagval2d, lagval3d
+ lagvander, lagvander3d, lagval2d, lagval3d
Notes
-----
@@ -1354,18 +1247,7 @@ def lagvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy, degz = ideg
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = lagvander(x, degx)
- vy = lagvander(y, degy)
- vz = lagvander(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ return pu._vander3d(lagvander, x, y, z, deg)
def lagfit(x, y, deg, rcond=None, full=False, w=None):
@@ -1436,7 +1318,7 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
warnings can be turned off by
>>> import warnings
- >>> warnings.simplefilter('ignore', RankWarning)
+ >>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
@@ -1489,84 +1371,10 @@ def lagfit(x, y, deg, rcond=None, full=False, w=None):
>>> err = np.random.randn(len(x))/10
>>> y = lagval(x, [1, 2, 3]) + err
>>> lagfit(x, y, 2)
- array([ 0.96971004, 2.00193749, 3.00288744])
+ array([ 0.96971004, 2.00193749, 3.00288744]) # may vary
"""
- x = np.asarray(x) + 0.0
- y = np.asarray(y) + 0.0
- deg = np.asarray(deg)
-
- # check arguments.
- if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
- raise TypeError("deg must be an int or non-empty 1-D array of int")
- if deg.min() < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if len(x) != len(y):
- raise TypeError("expected x and y to have same length")
-
- if deg.ndim == 0:
- lmax = deg
- order = lmax + 1
- van = lagvander(x, lmax)
- else:
- deg = np.sort(deg)
- lmax = deg[-1]
- order = len(deg)
- van = lagvander(x, lmax)[:, deg]
-
- # set up the least squares matrices in transposed form
- lhs = van.T
- rhs = y.T
- if w is not None:
- w = np.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected 1D vector for w")
- if len(x) != len(w):
- raise TypeError("expected x and w to have same length")
- # apply weights. Don't use inplace operations as they
- # can cause problems with NA.
- lhs = lhs * w
- rhs = rhs * w
-
- # set rcond
- if rcond is None:
- rcond = len(x)*np.finfo(x.dtype).eps
-
- # Determine the norms of the design matrix columns.
- if issubclass(lhs.dtype.type, np.complexfloating):
- scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
- else:
- scl = np.sqrt(np.square(lhs).sum(1))
- scl[scl == 0] = 1
-
- # Solve the least squares problem.
- c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
- c = (c.T/scl).T
-
- # Expand c to include non-fitted coefficients which are set to zero
- if deg.ndim > 0:
- if c.ndim == 2:
- cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
- else:
- cc = np.zeros(lmax+1, dtype=c.dtype)
- cc[deg] = c
- c = cc
-
- # warn on rank reduction
- if rank != order and not full:
- msg = "The fit may be poorly conditioned"
- warnings.warn(msg, pu.RankWarning, stacklevel=2)
-
- if full:
- return c, [resids, rank, s, rcond]
- else:
- return c
+ return pu._fit(lagvander, x, y, deg, rcond, full, w)
def lagcompanion(c):
@@ -1656,7 +1464,7 @@ def lagroots(c):
>>> coef
array([ 2., -8., 12., -6.])
>>> lagroots(coef)
- array([ -4.44089210e-16, 1.00000000e+00, 2.00000000e+00])
+ array([-4.4408921e-16, 1.0000000e+00, 2.0000000e+00])
"""
# c is a trimmed copy
@@ -1666,7 +1474,8 @@ def lagroots(c):
if len(c) == 2:
return np.array([1 + c[0]/c[1]])
- m = lagcompanion(c)
+ # rotated companion matrix reduces error
+ m = lagcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
@@ -1708,9 +1517,9 @@ def laggauss(deg):
the right value when integrating 1.
"""
- ideg = int(deg)
- if ideg != deg or ideg < 1:
- raise ValueError("deg must be a non-negative integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
diff --git a/numpy/polynomial/legendre.py b/numpy/polynomial/legendre.py
index e9c24594b..281982d0b 100644
--- a/numpy/polynomial/legendre.py
+++ b/numpy/polynomial/legendre.py
@@ -136,10 +136,10 @@ def poly2leg(pol):
>>> from numpy import polynomial as P
>>> p = P.Polynomial(np.arange(4))
>>> p
- Polynomial([ 0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
+ Polynomial([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> c = P.Legendre(P.legendre.poly2leg(p.coef))
>>> c
- Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1])
+ Legendre([ 1. , 3.25, 1. , 0.75], domain=[-1, 1], window=[-1, 1]) # may vary
"""
[pol] = pu.as_series([pol])
@@ -183,12 +183,13 @@ def leg2poly(c):
Examples
--------
+ >>> from numpy import polynomial as P
>>> c = P.Legendre(range(4))
>>> c
- Legendre([ 0., 1., 2., 3.], [-1., 1.])
+ Legendre([0., 1., 2., 3.], domain=[-1, 1], window=[-1, 1])
>>> p = c.convert(kind=P.Polynomial)
>>> p
- Polynomial([-1. , -3.5, 3. , 7.5], [-1., 1.])
+ Polynomial([-1. , -3.5, 3. , 7.5], domain=[-1., 1.], window=[-1., 1.])
>>> P.leg2poly(range(4))
array([-1. , -3.5, 3. , 7.5])
@@ -300,8 +301,7 @@ def legfromroots(roots):
See Also
--------
- polyfromroots, chebfromroots, lagfromroots, hermfromroots,
- hermefromroots.
+ polyfromroots, chebfromroots, lagfromroots, hermfromroots, hermefromroots
Examples
--------
@@ -310,24 +310,10 @@ def legfromroots(roots):
array([ 0. , -0.4, 0. , 0.4])
>>> j = complex(0,1)
>>> L.legfromroots((-j,j)) # x^2 + 1 relative to the standard basis
- array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j])
+ array([ 1.33333333+0.j, 0.00000000+0.j, 0.66666667+0.j]) # may vary
"""
- if len(roots) == 0:
- return np.ones(1)
- else:
- [roots] = pu.as_series([roots], trim=False)
- roots.sort()
- p = [legline(-r, 1) for r in roots]
- n = len(p)
- while n > 1:
- m, r = divmod(n, 2)
- tmp = [legmul(p[i], p[i+m]) for i in range(m)]
- if r:
- tmp[0] = legmul(tmp[0], p[-1])
- p = tmp
- n = m
- return p[0]
+ return pu._fromroots(legline, legmul, roots)
def legadd(c1, c2):
@@ -366,18 +352,10 @@ def legadd(c1, c2):
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legadd(c1,c2)
- array([ 4., 4., 4.])
+ array([4., 4., 4.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] += c2
- ret = c1
- else:
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._add(c1, c2)
def legsub(c1, c2):
@@ -421,16 +399,7 @@ def legsub(c1, c2):
array([ 2., 0., -2.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] -= c2
- ret = c1
- else:
- c2 = -c2
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._sub(c1, c2)
def legmulx(c):
@@ -468,7 +437,7 @@ def legmulx(c):
--------
>>> from numpy.polynomial import legendre as L
>>> L.legmulx([1,2,3])
- array([ 0.66666667, 2.2, 1.33333333, 1.8])
+ array([ 0.66666667, 2.2, 1.33333333, 1.8]) # may vary
"""
# c is a trimmed copy
@@ -525,8 +494,8 @@ def legmul(c1, c2):
>>> from numpy.polynomial import legendre as L
>>> c1 = (1,2,3)
>>> c2 = (3,2)
- >>> P.legmul(c1,c2) # multiplication requires "reprojection"
- array([ 4.33333333, 10.4 , 11.66666667, 3.6 ])
+ >>> L.legmul(c1,c2) # multiplication requires "reprojection"
+ array([ 4.33333333, 10.4 , 11.66666667, 3.6 ]) # may vary
"""
# s1, s2 are trimmed copies
@@ -597,32 +566,13 @@ def legdiv(c1, c2):
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> L.legdiv(c1,c2) # quotient "intuitive," remainder not
- (array([ 3.]), array([-8., -4.]))
+ (array([3.]), array([-8., -4.]))
>>> c2 = (0,1,2,3)
>>> L.legdiv(c2,c1) # neither "intuitive"
- (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852]))
+ (array([-0.07407407, 1.66666667]), array([-1.03703704, -2.51851852])) # may vary
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if c2[-1] == 0:
- raise ZeroDivisionError()
-
- lc1 = len(c1)
- lc2 = len(c2)
- if lc1 < lc2:
- return c1[:1]*0, c1
- elif lc2 == 1:
- return c1/c2[-1], c1[:1]*0
- else:
- quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
- rem = c1
- for i in range(lc1 - lc2, - 1, -1):
- p = legmul([0]*i + [1], c2)
- q = rem[-1]/p[-1]
- rem = rem[:-1] - q*p[:-1]
- quo[i] = q
- return quo, pu.trimseq(rem)
+ return pu._div(legmul, c1, c2)
def legpow(c, pow, maxpower=16):
@@ -656,24 +606,7 @@ def legpow(c, pow, maxpower=16):
--------
"""
- # c is a trimmed copy
- [c] = pu.as_series([c])
- power = int(pow)
- if power != pow or power < 0:
- raise ValueError("Power must be a non-negative integer.")
- elif maxpower is not None and power > maxpower:
- raise ValueError("Power is too large")
- elif power == 0:
- return np.array([1], dtype=c.dtype)
- elif power == 1:
- return c
- else:
- # This can be made more efficient by using powers of two
- # in the usual way.
- prd = c
- for i in range(2, power + 1):
- prd = legmul(prd, c)
- return prd
+ return pu._pow(legmul, c, pow, maxpower)
def legder(c, m=1, scl=1, axis=0):
@@ -729,24 +662,20 @@ def legder(c, m=1, scl=1, axis=0):
>>> L.legder(c)
array([ 6., 9., 20.])
>>> L.legder(c, 3)
- array([ 60.])
+ array([60.])
>>> L.legder(c, scl=-1)
array([ -6., -9., -20.])
>>> L.legder(c, 2,-1)
array([ 9., 60.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of derivation must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -845,27 +774,25 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
>>> from numpy.polynomial import legendre as L
>>> c = (1,2,3)
>>> L.legint(c)
- array([ 0.33333333, 0.4 , 0.66666667, 0.6 ])
+ array([ 0.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, 3)
- array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02,
- -1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
+ array([ 1.66666667e-02, -1.78571429e-02, 4.76190476e-02, # may vary
+ -1.73472348e-18, 1.90476190e-02, 9.52380952e-03])
>>> L.legint(c, k=3)
- array([ 3.33333333, 0.4 , 0.66666667, 0.6 ])
+ array([ 3.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, lbnd=-2)
- array([ 7.33333333, 0.4 , 0.66666667, 0.6 ])
+ array([ 7.33333333, 0.4 , 0.66666667, 0.6 ]) # may vary
>>> L.legint(c, scl=2)
- array([ 0.66666667, 0.8 , 1.33333333, 1.2 ])
+ array([ 0.66666667, 0.8 , 1.33333333, 1.2 ]) # may vary
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if not np.iterable(k):
k = [k]
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of integration must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
@@ -874,8 +801,6 @@ def legint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -966,7 +891,7 @@ def legval(x, c, tensor=True):
--------
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
c = c.astype(np.double)
if isinstance(x, (tuple, list)):
@@ -1038,14 +963,7 @@ def legval2d(x, y, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y = np.array((x, y), copy=0)
- except Exception:
- raise ValueError('x, y are incompatible')
-
- c = legval(x, c)
- c = legval(y, c, tensor=False)
- return c
+ return pu._valnd(legval, c, x, y)
def leggrid2d(x, y, c):
@@ -1098,9 +1016,7 @@ def leggrid2d(x, y, c):
.. versionadded:: 1.7.0
"""
- c = legval(x, c)
- c = legval(y, c)
- return c
+ return pu._gridnd(legval, c, x, y)
def legval3d(x, y, z, c):
@@ -1151,15 +1067,7 @@ def legval3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y, z = np.array((x, y, z), copy=0)
- except Exception:
- raise ValueError('x, y, z are incompatible')
-
- c = legval(x, c)
- c = legval(y, c, tensor=False)
- c = legval(z, c, tensor=False)
- return c
+ return pu._valnd(legval, c, x, y, z)
def leggrid3d(x, y, z, c):
@@ -1215,10 +1123,7 @@ def leggrid3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- c = legval(x, c)
- c = legval(y, c)
- c = legval(z, c)
- return c
+ return pu._gridnd(legval, c, x, y, z)
def legvander(x, deg):
@@ -1256,13 +1161,11 @@ def legvander(x, deg):
the converted `x`.
"""
- ideg = int(deg)
- if ideg != deg:
- raise ValueError("deg must be integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
@@ -1318,7 +1221,7 @@ def legvander2d(x, y, deg):
See Also
--------
- legvander, legvander3d. legval2d, legval3d
+ legvander, legvander3d, legval2d, legval3d
Notes
-----
@@ -1326,17 +1229,7 @@ def legvander2d(x, y, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy = ideg
- x, y = np.array((x, y), copy=0) + 0.0
-
- vx = legvander(x, degx)
- vy = legvander(y, degy)
- v = vx[..., None]*vy[..., None,:]
- return v.reshape(v.shape[:-2] + (-1,))
+ return pu._vander2d(legvander, x, y, deg)
def legvander3d(x, y, z, deg):
@@ -1382,7 +1275,7 @@ def legvander3d(x, y, z, deg):
See Also
--------
- legvander, legvander3d. legval2d, legval3d
+ legvander, legvander3d, legval2d, legval3d
Notes
-----
@@ -1390,18 +1283,7 @@ def legvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy, degz = ideg
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = legvander(x, degx)
- vy = legvander(y, degy)
- vz = legvander(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- return v.reshape(v.shape[:-3] + (-1,))
+ return pu._vander3d(legvander, x, y, z, deg)
def legfit(x, y, deg, rcond=None, full=False, w=None):
@@ -1476,7 +1358,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
warnings can be turned off by
>>> import warnings
- >>> warnings.simplefilter('ignore', RankWarning)
+ >>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
@@ -1525,81 +1407,7 @@ def legfit(x, y, deg, rcond=None, full=False, w=None):
--------
"""
- x = np.asarray(x) + 0.0
- y = np.asarray(y) + 0.0
- deg = np.asarray(deg)
-
- # check arguments.
- if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
- raise TypeError("deg must be an int or non-empty 1-D array of int")
- if deg.min() < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if len(x) != len(y):
- raise TypeError("expected x and y to have same length")
-
- if deg.ndim == 0:
- lmax = deg
- order = lmax + 1
- van = legvander(x, lmax)
- else:
- deg = np.sort(deg)
- lmax = deg[-1]
- order = len(deg)
- van = legvander(x, lmax)[:, deg]
-
- # set up the least squares matrices in transposed form
- lhs = van.T
- rhs = y.T
- if w is not None:
- w = np.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected 1D vector for w")
- if len(x) != len(w):
- raise TypeError("expected x and w to have same length")
- # apply weights. Don't use inplace operations as they
- # can cause problems with NA.
- lhs = lhs * w
- rhs = rhs * w
-
- # set rcond
- if rcond is None:
- rcond = len(x)*np.finfo(x.dtype).eps
-
- # Determine the norms of the design matrix columns.
- if issubclass(lhs.dtype.type, np.complexfloating):
- scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
- else:
- scl = np.sqrt(np.square(lhs).sum(1))
- scl[scl == 0] = 1
-
- # Solve the least squares problem.
- c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
- c = (c.T/scl).T
-
- # Expand c to include non-fitted coefficients which are set to zero
- if deg.ndim > 0:
- if c.ndim == 2:
- cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
- else:
- cc = np.zeros(lmax+1, dtype=c.dtype)
- cc[deg] = c
- c = cc
-
- # warn on rank reduction
- if rank != order and not full:
- msg = "The fit may be poorly conditioned"
- warnings.warn(msg, pu.RankWarning, stacklevel=2)
-
- if full:
- return c, [resids, rank, s, rcond]
- else:
- return c
+ return pu._fit(legvander, x, y, deg, rcond, full, w)
def legcompanion(c):
@@ -1686,7 +1494,7 @@ def legroots(c):
--------
>>> import numpy.polynomial.legendre as leg
>>> leg.legroots((1, 2, 3, 4)) # 4L_3 + 3L_2 + 2L_1 + 1L_0, all real roots
- array([-0.85099543, -0.11407192, 0.51506735])
+ array([-0.85099543, -0.11407192, 0.51506735]) # may vary
"""
# c is a trimmed copy
@@ -1696,7 +1504,8 @@ def legroots(c):
if len(c) == 2:
return np.array([-c[0]/c[1]])
- m = legcompanion(c)
+ # rotated companion matrix reduces error
+ m = legcompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
@@ -1738,9 +1547,9 @@ def leggauss(deg):
the right value when integrating 1.
"""
- ideg = int(deg)
- if ideg != deg or ideg < 1:
- raise ValueError("deg must be a non-negative integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
+ if ideg <= 0:
+ raise ValueError("deg must be a positive integer")
# first approximation of roots. We use the fact that the companion
# matrix is symmetric in this case in order to obtain better zeros.
diff --git a/numpy/polynomial/polynomial.py b/numpy/polynomial/polynomial.py
index 259cd31f5..3f0a902cf 100644
--- a/numpy/polynomial/polynomial.py
+++ b/numpy/polynomial/polynomial.py
@@ -185,24 +185,10 @@ def polyfromroots(roots):
array([ 0., -1., 0., 1.])
>>> j = complex(0,1)
>>> P.polyfromroots((-j,j)) # complex returned, though values are real
- array([ 1.+0.j, 0.+0.j, 1.+0.j])
+ array([1.+0.j, 0.+0.j, 1.+0.j])
"""
- if len(roots) == 0:
- return np.ones(1)
- else:
- [roots] = pu.as_series([roots], trim=False)
- roots.sort()
- p = [polyline(-r, 1) for r in roots]
- n = len(p)
- while n > 1:
- m, r = divmod(n, 2)
- tmp = [polymul(p[i], p[i+m]) for i in range(m)]
- if r:
- tmp[0] = polymul(tmp[0], p[-1])
- p = tmp
- n = m
- return p[0]
+ return pu._fromroots(polyline, polymul, roots)
def polyadd(c1, c2):
@@ -233,20 +219,12 @@ def polyadd(c1, c2):
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> sum = P.polyadd(c1,c2); sum
- array([ 4., 4., 4.])
+ array([4., 4., 4.])
>>> P.polyval(2, sum) # 4 + 4(2) + 4(2**2)
28.0
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] += c2
- ret = c1
- else:
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._add(c1, c2)
def polysub(c1, c2):
@@ -283,16 +261,7 @@ def polysub(c1, c2):
array([ 2., 0., -2.])
"""
- # c1, c2 are trimmed copies
- [c1, c2] = pu.as_series([c1, c2])
- if len(c1) > len(c2):
- c1[:c2.size] -= c2
- ret = c1
- else:
- c2 = -c2
- c2[:c1.size] += c1
- ret = c2
- return pu.trimseq(ret)
+ return pu._sub(c1, c2)
def polymulx(c):
@@ -401,9 +370,9 @@ def polydiv(c1, c2):
>>> c1 = (1,2,3)
>>> c2 = (3,2,1)
>>> P.polydiv(c1,c2)
- (array([ 3.]), array([-8., -4.]))
+ (array([3.]), array([-8., -4.]))
>>> P.polydiv(c2,c1)
- (array([ 0.33333333]), array([ 2.66666667, 1.33333333]))
+ (array([ 0.33333333]), array([ 2.66666667, 1.33333333])) # may vary
"""
# c1, c2 are trimmed copies
@@ -411,18 +380,19 @@ def polydiv(c1, c2):
if c2[-1] == 0:
raise ZeroDivisionError()
- len1 = len(c1)
- len2 = len(c2)
- if len2 == 1:
- return c1/c2[-1], c1[:1]*0
- elif len1 < len2:
+ # note: this is more efficient than `pu._div(polymul, c1, c2)`
+ lc1 = len(c1)
+ lc2 = len(c2)
+ if lc1 < lc2:
return c1[:1]*0, c1
+ elif lc2 == 1:
+ return c1/c2[-1], c1[:1]*0
else:
- dlen = len1 - len2
+ dlen = lc1 - lc2
scl = c2[-1]
c2 = c2[:-1]/scl
i = dlen
- j = len1 - 1
+ j = lc1 - 1
while i >= 0:
c1[i:j] -= c2*c1[j]
i -= 1
@@ -464,24 +434,9 @@ def polypow(c, pow, maxpower=None):
array([ 1., 4., 10., 12., 9.])
"""
- # c is a trimmed copy
- [c] = pu.as_series([c])
- power = int(pow)
- if power != pow or power < 0:
- raise ValueError("Power must be a non-negative integer.")
- elif maxpower is not None and power > maxpower:
- raise ValueError("Power is too large")
- elif power == 0:
- return np.array([1], dtype=c.dtype)
- elif power == 1:
- return c
- else:
- # This can be made more efficient by using powers of two
- # in the usual way.
- prd = c
- for i in range(2, power + 1):
- prd = np.convolve(prd, c)
- return prd
+ # note: this is more efficient than `pu._pow(polymul, c1, c2)`, as it
+ # avoids calling `as_series` repeatedly
+ return pu._pow(np.convolve, c, pow, maxpower)
def polyder(c, m=1, scl=1, axis=0):
@@ -529,26 +484,22 @@ def polyder(c, m=1, scl=1, axis=0):
>>> P.polyder(c) # (d/dx)(c) = 2 + 6x + 12x**2
array([ 2., 6., 12.])
>>> P.polyder(c,3) # (d**3/dx**3)(c) = 24
- array([ 24.])
+ array([24.])
>>> P.polyder(c,scl=-1) # (d/d(-x))(c) = -2 - 6x - 12x**2
array([ -2., -6., -12.])
>>> P.polyder(c,2,-1) # (d**2/d(-x)**2)(c) = 6 + 24x
array([ 6., 24.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
cdt = c.dtype
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of derivation must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of derivation")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of derivation must be non-negative")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -636,29 +587,27 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
>>> from numpy.polynomial import polynomial as P
>>> c = (1,2,3)
>>> P.polyint(c) # should return array([0, 1, 1, 1])
- array([ 0., 1., 1., 1.])
+ array([0., 1., 1., 1.])
>>> P.polyint(c,3) # should return array([0, 0, 0, 1/6, 1/12, 1/20])
- array([ 0. , 0. , 0. , 0.16666667, 0.08333333,
- 0.05 ])
+ array([ 0. , 0. , 0. , 0.16666667, 0.08333333, # may vary
+ 0.05 ])
>>> P.polyint(c,k=3) # should return array([3, 1, 1, 1])
- array([ 3., 1., 1., 1.])
+ array([3., 1., 1., 1.])
>>> P.polyint(c,lbnd=-2) # should return array([6, 1, 1, 1])
- array([ 6., 1., 1., 1.])
+ array([6., 1., 1., 1.])
>>> P.polyint(c,scl=-2) # should return array([0, -2, -2, -2])
array([ 0., -2., -2., -2.])
"""
- c = np.array(c, ndmin=1, copy=1)
+ c = np.array(c, ndmin=1, copy=True)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype doesn't preserve mask attribute.
c = c + 0.0
cdt = c.dtype
if not np.iterable(k):
k = [k]
- cnt, iaxis = [int(t) for t in [m, axis]]
-
- if cnt != m:
- raise ValueError("The order of integration must be integer")
+ cnt = pu._deprecate_as_int(m, "the order of integration")
+ iaxis = pu._deprecate_as_int(axis, "the axis")
if cnt < 0:
raise ValueError("The order of integration must be non-negative")
if len(k) > cnt:
@@ -667,8 +616,6 @@ def polyint(c, m=1, k=[], lbnd=0, scl=1, axis=0):
raise ValueError("lbnd must be a scalar.")
if np.ndim(scl) != 0:
raise ValueError("scl must be a scalar.")
- if iaxis != axis:
- raise ValueError("The axis must be integer")
iaxis = normalize_axis_index(iaxis, c.ndim)
if cnt == 0:
@@ -761,20 +708,20 @@ def polyval(x, c, tensor=True):
array([[0, 1],
[2, 3]])
>>> polyval(a, [1,2,3])
- array([[ 1., 6.],
- [ 17., 34.]])
+ array([[ 1., 6.],
+ [17., 34.]])
>>> coef = np.arange(4).reshape(2,2) # multidimensional coefficients
>>> coef
array([[0, 1],
[2, 3]])
>>> polyval([1,2], coef, tensor=True)
- array([[ 2., 4.],
- [ 4., 7.]])
+ array([[2., 4.],
+ [4., 7.]])
>>> polyval([1,2], coef, tensor=False)
- array([ 2., 7.])
+ array([2., 7.])
"""
- c = np.array(c, ndmin=1, copy=0)
+ c = np.array(c, ndmin=1, copy=False)
if c.dtype.char in '?bBhHiIlLqQpP':
# astype fails with NA
c = c + 0.0
@@ -851,8 +798,8 @@ def polyvalfromroots(x, r, tensor=True):
array([[0, 1],
[2, 3]])
>>> polyvalfromroots(a, [-1, 0, 1])
- array([[ -0., 0.],
- [ 6., 24.]])
+ array([[-0., 0.],
+ [ 6., 24.]])
>>> r = np.arange(-2, 2).reshape(2,2) # multidimensional coefficients
>>> r # each column of r defines one polynomial
array([[-2, -1],
@@ -864,7 +811,7 @@ def polyvalfromroots(x, r, tensor=True):
>>> polyvalfromroots(b, r, tensor=False)
array([-0., 0.])
"""
- r = np.array(r, ndmin=1, copy=0)
+ r = np.array(r, ndmin=1, copy=False)
if r.dtype.char in '?bBhHiIlLqQpP':
r = r.astype(np.double)
if isinstance(x, (tuple, list)):
@@ -924,14 +871,7 @@ def polyval2d(x, y, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y = np.array((x, y), copy=0)
- except Exception:
- raise ValueError('x, y are incompatible')
-
- c = polyval(x, c)
- c = polyval(y, c, tensor=False)
- return c
+ return pu._valnd(polyval, c, x, y)
def polygrid2d(x, y, c):
@@ -984,9 +924,7 @@ def polygrid2d(x, y, c):
.. versionadded:: 1.7.0
"""
- c = polyval(x, c)
- c = polyval(y, c)
- return c
+ return pu._gridnd(polyval, c, x, y)
def polyval3d(x, y, z, c):
@@ -1037,15 +975,7 @@ def polyval3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- try:
- x, y, z = np.array((x, y, z), copy=0)
- except Exception:
- raise ValueError('x, y, z are incompatible')
-
- c = polyval(x, c)
- c = polyval(y, c, tensor=False)
- c = polyval(z, c, tensor=False)
- return c
+ return pu._valnd(polyval, c, x, y, z)
def polygrid3d(x, y, z, c):
@@ -1101,10 +1031,7 @@ def polygrid3d(x, y, z, c):
.. versionadded:: 1.7.0
"""
- c = polyval(x, c)
- c = polyval(y, c)
- c = polyval(z, c)
- return c
+ return pu._gridnd(polyval, c, x, y, z)
def polyvander(x, deg):
@@ -1145,13 +1072,11 @@ def polyvander(x, deg):
polyvander2d, polyvander3d
"""
- ideg = int(deg)
- if ideg != deg:
- raise ValueError("deg must be integer")
+ ideg = pu._deprecate_as_int(deg, "deg")
if ideg < 0:
raise ValueError("deg must be non-negative")
- x = np.array(x, copy=0, ndmin=1) + 0.0
+ x = np.array(x, copy=False, ndmin=1) + 0.0
dims = (ideg + 1,) + x.shape
dtyp = x.dtype
v = np.empty(dims, dtype=dtyp)
@@ -1205,22 +1130,10 @@ def polyvander2d(x, y, deg):
See Also
--------
- polyvander, polyvander3d. polyval2d, polyval3d
+ polyvander, polyvander3d, polyval2d, polyval3d
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy = ideg
- x, y = np.array((x, y), copy=0) + 0.0
-
- vx = polyvander(x, degx)
- vy = polyvander(y, degy)
- v = vx[..., None]*vy[..., None,:]
- # einsum bug
- #v = np.einsum("...i,...j->...ij", vx, vy)
- return v.reshape(v.shape[:-2] + (-1,))
+ return pu._vander2d(polyvander, x, y, deg)
def polyvander3d(x, y, z, deg):
@@ -1266,7 +1179,7 @@ def polyvander3d(x, y, z, deg):
See Also
--------
- polyvander, polyvander3d. polyval2d, polyval3d
+ polyvander, polyvander3d, polyval2d, polyval3d
Notes
-----
@@ -1274,20 +1187,7 @@ def polyvander3d(x, y, z, deg):
.. versionadded:: 1.7.0
"""
- ideg = [int(d) for d in deg]
- is_valid = [id == d and id >= 0 for id, d in zip(ideg, deg)]
- if is_valid != [1, 1, 1]:
- raise ValueError("degrees must be non-negative integers")
- degx, degy, degz = ideg
- x, y, z = np.array((x, y, z), copy=0) + 0.0
-
- vx = polyvander(x, degx)
- vy = polyvander(y, degy)
- vz = polyvander(z, degz)
- v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
- # einsum bug
- #v = np.einsum("...i, ...j, ...k->...ijk", vx, vy, vz)
- return v.reshape(v.shape[:-3] + (-1,))
+ return pu._vander3d(polyvander, x, y, z, deg)
def polyfit(x, y, deg, rcond=None, full=False, w=None):
@@ -1363,7 +1263,7 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
be turned off by:
>>> import warnings
- >>> warnings.simplefilter('ignore', RankWarning)
+ >>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
@@ -1410,103 +1310,30 @@ def polyfit(x, y, deg, rcond=None, full=False, w=None):
Examples
--------
+ >>> np.random.seed(123)
>>> from numpy.polynomial import polynomial as P
>>> x = np.linspace(-1,1,51) # x "data": [-1, -0.96, ..., 0.96, 1]
>>> y = x**3 - x + np.random.randn(len(x)) # x^3 - x + N(0,1) "noise"
>>> c, stats = P.polyfit(x,y,3,full=True)
+ >>> np.random.seed(123)
>>> c # c[0], c[2] should be approx. 0, c[1] approx. -1, c[3] approx. 1
- array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286])
+ array([ 0.01909725, -1.30598256, -0.00577963, 1.02644286]) # may vary
>>> stats # note the large SSR, explaining the rather poor results
- [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316,
- 0.28853036]), 1.1324274851176597e-014]
+ [array([ 38.06116253]), 4, array([ 1.38446749, 1.32119158, 0.50443316, # may vary
+ 0.28853036]), 1.1324274851176597e-014]
Same thing without the added noise
>>> y = x**3 - x
>>> c, stats = P.polyfit(x,y,3,full=True)
>>> c # c[0], c[2] should be "very close to 0", c[1] ~= -1, c[3] ~= 1
- array([ -1.73362882e-17, -1.00000000e+00, -2.67471909e-16,
- 1.00000000e+00])
+ array([-6.36925336e-18, -1.00000000e+00, -4.08053781e-16, 1.00000000e+00])
>>> stats # note the minuscule SSR
- [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158,
- 0.50443316, 0.28853036]), 1.1324274851176597e-014]
+ [array([ 7.46346754e-31]), 4, array([ 1.38446749, 1.32119158, # may vary
+ 0.50443316, 0.28853036]), 1.1324274851176597e-014]
"""
- x = np.asarray(x) + 0.0
- y = np.asarray(y) + 0.0
- deg = np.asarray(deg)
-
- # check arguments.
- if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
- raise TypeError("deg must be an int or non-empty 1-D array of int")
- if deg.min() < 0:
- raise ValueError("expected deg >= 0")
- if x.ndim != 1:
- raise TypeError("expected 1D vector for x")
- if x.size == 0:
- raise TypeError("expected non-empty vector for x")
- if y.ndim < 1 or y.ndim > 2:
- raise TypeError("expected 1D or 2D array for y")
- if len(x) != len(y):
- raise TypeError("expected x and y to have same length")
-
- if deg.ndim == 0:
- lmax = deg
- order = lmax + 1
- van = polyvander(x, lmax)
- else:
- deg = np.sort(deg)
- lmax = deg[-1]
- order = len(deg)
- van = polyvander(x, lmax)[:, deg]
-
- # set up the least squares matrices in transposed form
- lhs = van.T
- rhs = y.T
- if w is not None:
- w = np.asarray(w) + 0.0
- if w.ndim != 1:
- raise TypeError("expected 1D vector for w")
- if len(x) != len(w):
- raise TypeError("expected x and w to have same length")
- # apply weights. Don't use inplace operations as they
- # can cause problems with NA.
- lhs = lhs * w
- rhs = rhs * w
-
- # set rcond
- if rcond is None:
- rcond = len(x)*np.finfo(x.dtype).eps
-
- # Determine the norms of the design matrix columns.
- if issubclass(lhs.dtype.type, np.complexfloating):
- scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
- else:
- scl = np.sqrt(np.square(lhs).sum(1))
- scl[scl == 0] = 1
-
- # Solve the least squares problem.
- c, resids, rank, s = la.lstsq(lhs.T/scl, rhs.T, rcond)
- c = (c.T/scl).T
-
- # Expand c to include non-fitted coefficients which are set to zero
- if deg.ndim == 1:
- if c.ndim == 2:
- cc = np.zeros((lmax + 1, c.shape[1]), dtype=c.dtype)
- else:
- cc = np.zeros(lmax + 1, dtype=c.dtype)
- cc[deg] = c
- c = cc
-
- # warn on rank reduction
- if rank != order and not full:
- msg = "The fit may be poorly conditioned"
- warnings.warn(msg, pu.RankWarning, stacklevel=2)
-
- if full:
- return c, [resids, rank, s, rcond]
- else:
- return c
+ return pu._fit(polyvander, x, y, deg, rcond, full, w)
def polycompanion(c):
@@ -1591,7 +1418,7 @@ def polyroots(c):
dtype('float64')
>>> j = complex(0,1)
>>> poly.polyroots(poly.polyfromroots((-j,0,j)))
- array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j])
+ array([ 0.00000000e+00+0.j, 0.00000000e+00+1.j, 2.77555756e-17-1.j]) # may vary
"""
# c is a trimmed copy
@@ -1601,7 +1428,8 @@ def polyroots(c):
if len(c) == 2:
return np.array([-c[0]/c[1]])
- m = polycompanion(c)
+ # rotated companion matrix reduces error
+ m = polycompanion(c)[::-1,::-1]
r = la.eigvals(m)
r.sort()
return r
diff --git a/numpy/polynomial/polyutils.py b/numpy/polynomial/polyutils.py
index c1ed0c9b3..35b24d1ab 100644
--- a/numpy/polynomial/polyutils.py
+++ b/numpy/polynomial/polyutils.py
@@ -45,6 +45,9 @@ Functions
"""
from __future__ import division, absolute_import, print_function
+import operator
+import warnings
+
import numpy as np
__all__ = [
@@ -156,22 +159,22 @@ def as_series(alist, trim=True):
>>> from numpy.polynomial import polyutils as pu
>>> a = np.arange(4)
>>> pu.as_series(a)
- [array([ 0.]), array([ 1.]), array([ 2.]), array([ 3.])]
+ [array([0.]), array([1.]), array([2.]), array([3.])]
>>> b = np.arange(6).reshape((2,3))
>>> pu.as_series(b)
- [array([ 0., 1., 2.]), array([ 3., 4., 5.])]
+ [array([0., 1., 2.]), array([3., 4., 5.])]
>>> pu.as_series((1, np.arange(3), np.arange(2, dtype=np.float16)))
- [array([ 1.]), array([ 0., 1., 2.]), array([ 0., 1.])]
+ [array([1.]), array([0., 1., 2.]), array([0., 1.])]
>>> pu.as_series([2, [1.1, 0.]])
- [array([ 2.]), array([ 1.1])]
+ [array([2.]), array([1.1])]
>>> pu.as_series([2, [1.1, 0.]], trim=False)
- [array([ 2.]), array([ 1.1, 0. ])]
+ [array([2.]), array([1.1, 0. ])]
"""
- arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
+ arrays = [np.array(a, ndmin=1, copy=False) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if any([a.ndim != 1 for a in arrays]):
@@ -193,7 +196,7 @@ def as_series(alist, trim=True):
dtype = np.common_type(*arrays)
except Exception:
raise ValueError("Coefficient arrays have no common type")
- ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
+ ret = [np.array(a, copy=True, dtype=dtype) for a in arrays]
return ret
@@ -233,12 +236,12 @@ def trimcoef(c, tol=0):
--------
>>> from numpy.polynomial import polyutils as pu
>>> pu.trimcoef((0,0,3,0,5,0,0))
- array([ 0., 0., 3., 0., 5.])
+ array([0., 0., 3., 0., 5.])
>>> pu.trimcoef((0,0,1e-3,0,1e-5,0,0),1e-3) # item == tol is trimmed
- array([ 0.])
+ array([0.])
>>> i = complex(0,1) # works for complex
>>> pu.trimcoef((3e-4,1e-3*(1-i),5e-4,2e-5*(1+i)), 1e-3)
- array([ 0.0003+0.j , 0.0010-0.001j])
+ array([0.0003+0.j , 0.001 -0.001j])
"""
if tol < 0:
@@ -332,10 +335,10 @@ def mapparms(old, new):
>>> pu.mapparms((-1,1),(-1,1))
(0.0, 1.0)
>>> pu.mapparms((1,-1),(-1,1))
- (0.0, -1.0)
+ (-0.0, -1.0)
>>> i = complex(0,1)
>>> pu.mapparms((-i,-1),(1,i))
- ((1+1j), (1+0j))
+ ((1+1j), (1-0j))
"""
oldlen = old[1] - old[0]
@@ -390,10 +393,10 @@ def mapdomain(x, old, new):
>>> x = np.linspace(-1,1,6); x
array([-1. , -0.6, -0.2, 0.2, 0.6, 1. ])
>>> x_out = pu.mapdomain(x, old_domain, new_domain); x_out
- array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825,
+ array([ 0. , 1.25663706, 2.51327412, 3.76991118, 5.02654825, # may vary
6.28318531])
>>> x - pu.mapdomain(x_out, new_domain, old_domain)
- array([ 0., 0., 0., 0., 0., 0.])
+ array([0., 0., 0., 0., 0., 0.])
Also works for complex numbers (and thus can be used to map any line in
the complex plane to any other line therein).
@@ -402,11 +405,352 @@ def mapdomain(x, old, new):
>>> old = (-1 - i, 1 + i)
>>> new = (-1 + i, 1 - i)
>>> z = np.linspace(old[0], old[1], 6); z
- array([-1.0-1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1.0+1.j ])
- >>> new_z = P.mapdomain(z, old, new); new_z
- array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ])
+ array([-1. -1.j , -0.6-0.6j, -0.2-0.2j, 0.2+0.2j, 0.6+0.6j, 1. +1.j ])
+ >>> new_z = pu.mapdomain(z, old, new); new_z
+ array([-1.0+1.j , -0.6+0.6j, -0.2+0.2j, 0.2-0.2j, 0.6-0.6j, 1.0-1.j ]) # may vary
"""
x = np.asanyarray(x)
off, scl = mapparms(old, new)
return off + scl*x
+
+
+def _vander2d(vander_f, x, y, deg):
+ """
+ Helper function used to implement the ``<type>vander2d`` functions.
+
+ Parameters
+ ----------
+ vander_f : function(array_like, int) -> ndarray
+ The 1d vander function, such as ``polyvander``
+ x, y, deg :
+ See the ``<type>vander2d`` functions for more detail
+ """
+ degx, degy = deg
+ x, y = np.array((x, y), copy=False) + 0.0
+
+ vx = vander_f(x, degx)
+ vy = vander_f(y, degy)
+ v = vx[..., None]*vy[..., None,:]
+ return v.reshape(v.shape[:-2] + (-1,))
+
+
+def _vander3d(vander_f, x, y, z, deg):
+ """
+ Helper function used to implement the ``<type>vander3d`` functions.
+
+ Parameters
+ ----------
+ vander_f : function(array_like, int) -> ndarray
+ The 1d vander function, such as ``polyvander``
+ x, y, z, deg :
+ See the ``<type>vander3d`` functions for more detail
+ """
+ degx, degy, degz = deg
+ x, y, z = np.array((x, y, z), copy=False) + 0.0
+
+ vx = vander_f(x, degx)
+ vy = vander_f(y, degy)
+ vz = vander_f(z, degz)
+ v = vx[..., None, None]*vy[..., None,:, None]*vz[..., None, None,:]
+ return v.reshape(v.shape[:-3] + (-1,))
+
+
+def _fromroots(line_f, mul_f, roots):
+ """
+ Helper function used to implement the ``<type>fromroots`` functions.
+
+ Parameters
+ ----------
+ line_f : function(float, float) -> ndarray
+ The ``<type>line`` function, such as ``polyline``
+ mul_f : function(array_like, array_like) -> ndarray
+ The ``<type>mul`` function, such as ``polymul``
+ roots :
+ See the ``<type>fromroots`` functions for more detail
+ """
+ if len(roots) == 0:
+ return np.ones(1)
+ else:
+ [roots] = as_series([roots], trim=False)
+ roots.sort()
+ p = [line_f(-r, 1) for r in roots]
+ n = len(p)
+ while n > 1:
+ m, r = divmod(n, 2)
+ tmp = [mul_f(p[i], p[i+m]) for i in range(m)]
+ if r:
+ tmp[0] = mul_f(tmp[0], p[-1])
+ p = tmp
+ n = m
+ return p[0]
+
+
+def _valnd(val_f, c, *args):
+ """
+ Helper function used to implement the ``<type>val<n>d`` functions.
+
+ Parameters
+ ----------
+ val_f : function(array_like, array_like, tensor: bool) -> array_like
+ The ``<type>val`` function, such as ``polyval``
+ c, args :
+ See the ``<type>val<n>d`` functions for more detail
+ """
+ try:
+ args = tuple(np.array(args, copy=False))
+ except Exception:
+ # preserve the old error message
+ if len(args) == 2:
+ raise ValueError('x, y, z are incompatible')
+ elif len(args) == 3:
+ raise ValueError('x, y are incompatible')
+ else:
+ raise ValueError('ordinates are incompatible')
+
+ it = iter(args)
+ x0 = next(it)
+
+ # use tensor on only the first
+ c = val_f(x0, c)
+ for xi in it:
+ c = val_f(xi, c, tensor=False)
+ return c
+
+
+def _gridnd(val_f, c, *args):
+ """
+ Helper function used to implement the ``<type>grid<n>d`` functions.
+
+ Parameters
+ ----------
+ val_f : function(array_like, array_like, tensor: bool) -> array_like
+ The ``<type>val`` function, such as ``polyval``
+ c, args :
+ See the ``<type>grid<n>d`` functions for more detail
+ """
+ for xi in args:
+ c = val_f(xi, c)
+ return c
+
+
+def _div(mul_f, c1, c2):
+ """
+ Helper function used to implement the ``<type>div`` functions.
+
+ Implementation uses repeated subtraction of c2 multiplied by the nth basis.
+ For some polynomial types, a more efficient approach may be possible.
+
+ Parameters
+ ----------
+ mul_f : function(array_like, array_like) -> array_like
+ The ``<type>mul`` function, such as ``polymul``
+ c1, c2 :
+ See the ``<type>div`` functions for more detail
+ """
+ # c1, c2 are trimmed copies
+ [c1, c2] = as_series([c1, c2])
+ if c2[-1] == 0:
+ raise ZeroDivisionError()
+
+ lc1 = len(c1)
+ lc2 = len(c2)
+ if lc1 < lc2:
+ return c1[:1]*0, c1
+ elif lc2 == 1:
+ return c1/c2[-1], c1[:1]*0
+ else:
+ quo = np.empty(lc1 - lc2 + 1, dtype=c1.dtype)
+ rem = c1
+ for i in range(lc1 - lc2, - 1, -1):
+ p = mul_f([0]*i + [1], c2)
+ q = rem[-1]/p[-1]
+ rem = rem[:-1] - q*p[:-1]
+ quo[i] = q
+ return quo, trimseq(rem)
+
+
+def _add(c1, c2):
+ """ Helper function used to implement the ``<type>add`` functions. """
+ # c1, c2 are trimmed copies
+ [c1, c2] = as_series([c1, c2])
+ if len(c1) > len(c2):
+ c1[:c2.size] += c2
+ ret = c1
+ else:
+ c2[:c1.size] += c1
+ ret = c2
+ return trimseq(ret)
+
+
+def _sub(c1, c2):
+ """ Helper function used to implement the ``<type>sub`` functions. """
+ # c1, c2 are trimmed copies
+ [c1, c2] = as_series([c1, c2])
+ if len(c1) > len(c2):
+ c1[:c2.size] -= c2
+ ret = c1
+ else:
+ c2 = -c2
+ c2[:c1.size] += c1
+ ret = c2
+ return trimseq(ret)
+
+
+def _fit(vander_f, x, y, deg, rcond=None, full=False, w=None):
+ """
+ Helper function used to implement the ``<type>fit`` functions.
+
+ Parameters
+ ----------
+ vander_f : function(array_like, int) -> ndarray
+ The 1d vander function, such as ``polyvander``
+ c1, c2 :
+ See the ``<type>fit`` functions for more detail
+ """
+ x = np.asarray(x) + 0.0
+ y = np.asarray(y) + 0.0
+ deg = np.asarray(deg)
+
+ # check arguments.
+ if deg.ndim > 1 or deg.dtype.kind not in 'iu' or deg.size == 0:
+ raise TypeError("deg must be an int or non-empty 1-D array of int")
+ if deg.min() < 0:
+ raise ValueError("expected deg >= 0")
+ if x.ndim != 1:
+ raise TypeError("expected 1D vector for x")
+ if x.size == 0:
+ raise TypeError("expected non-empty vector for x")
+ if y.ndim < 1 or y.ndim > 2:
+ raise TypeError("expected 1D or 2D array for y")
+ if len(x) != len(y):
+ raise TypeError("expected x and y to have same length")
+
+ if deg.ndim == 0:
+ lmax = deg
+ order = lmax + 1
+ van = vander_f(x, lmax)
+ else:
+ deg = np.sort(deg)
+ lmax = deg[-1]
+ order = len(deg)
+ van = vander_f(x, lmax)[:, deg]
+
+ # set up the least squares matrices in transposed form
+ lhs = van.T
+ rhs = y.T
+ if w is not None:
+ w = np.asarray(w) + 0.0
+ if w.ndim != 1:
+ raise TypeError("expected 1D vector for w")
+ if len(x) != len(w):
+ raise TypeError("expected x and w to have same length")
+ # apply weights. Don't use inplace operations as they
+ # can cause problems with NA.
+ lhs = lhs * w
+ rhs = rhs * w
+
+ # set rcond
+ if rcond is None:
+ rcond = len(x)*np.finfo(x.dtype).eps
+
+ # Determine the norms of the design matrix columns.
+ if issubclass(lhs.dtype.type, np.complexfloating):
+ scl = np.sqrt((np.square(lhs.real) + np.square(lhs.imag)).sum(1))
+ else:
+ scl = np.sqrt(np.square(lhs).sum(1))
+ scl[scl == 0] = 1
+
+ # Solve the least squares problem.
+ c, resids, rank, s = np.linalg.lstsq(lhs.T/scl, rhs.T, rcond)
+ c = (c.T/scl).T
+
+ # Expand c to include non-fitted coefficients which are set to zero
+ if deg.ndim > 0:
+ if c.ndim == 2:
+ cc = np.zeros((lmax+1, c.shape[1]), dtype=c.dtype)
+ else:
+ cc = np.zeros(lmax+1, dtype=c.dtype)
+ cc[deg] = c
+ c = cc
+
+ # warn on rank reduction
+ if rank != order and not full:
+ msg = "The fit may be poorly conditioned"
+ warnings.warn(msg, RankWarning, stacklevel=2)
+
+ if full:
+ return c, [resids, rank, s, rcond]
+ else:
+ return c
+
+
+def _pow(mul_f, c, pow, maxpower):
+ """
+ Helper function used to implement the ``<type>pow`` functions.
+
+ Parameters
+ ----------
+ vander_f : function(array_like, int) -> ndarray
+ The 1d vander function, such as ``polyvander``
+ pow, maxpower :
+ See the ``<type>pow`` functions for more detail
+ mul_f : function(array_like, array_like) -> ndarray
+ The ``<type>mul`` function, such as ``polymul``
+ """
+ # c is a trimmed copy
+ [c] = as_series([c])
+ power = int(pow)
+ if power != pow or power < 0:
+ raise ValueError("Power must be a non-negative integer.")
+ elif maxpower is not None and power > maxpower:
+ raise ValueError("Power is too large")
+ elif power == 0:
+ return np.array([1], dtype=c.dtype)
+ elif power == 1:
+ return c
+ else:
+ # This can be made more efficient by using powers of two
+ # in the usual way.
+ prd = c
+ for i in range(2, power + 1):
+ prd = mul_f(prd, c)
+ return prd
+
+
+def _deprecate_as_int(x, desc):
+ """
+ Like `operator.index`, but emits a deprecation warning when passed a float
+
+ Parameters
+ ----------
+ x : int-like, or float with integral value
+ Value to interpret as an integer
+ desc : str
+ description to include in any error message
+
+ Raises
+ ------
+ TypeError : if x is a non-integral float or non-numeric
+ DeprecationWarning : if x is an integral float
+ """
+ try:
+ return operator.index(x)
+ except TypeError:
+ # Numpy 1.17.0, 2019-03-11
+ try:
+ ix = int(x)
+ except TypeError:
+ pass
+ else:
+ if ix == x:
+ warnings.warn(
+ "In future, this will raise TypeError, as {} will need to "
+ "be an integer not just an integral float."
+ .format(desc),
+ DeprecationWarning,
+ stacklevel=3
+ )
+ return ix
+
+ raise TypeError("{} must be an integer".format(desc))
diff --git a/numpy/polynomial/tests/test_chebyshev.py b/numpy/polynomial/tests/test_chebyshev.py
index 7fb7492c6..c8d2d6dba 100644
--- a/numpy/polynomial/tests/test_chebyshev.py
+++ b/numpy/polynomial/tests/test_chebyshev.py
@@ -221,12 +221,12 @@ class TestIntegral(object):
def test_chebint(self):
# check exceptions
- assert_raises(ValueError, cheb.chebint, [0], .5)
+ assert_raises(TypeError, cheb.chebint, [0], .5)
assert_raises(ValueError, cheb.chebint, [0], -1)
assert_raises(ValueError, cheb.chebint, [0], 1, [0, 0])
assert_raises(ValueError, cheb.chebint, [0], lbnd=[0])
assert_raises(ValueError, cheb.chebint, [0], scl=[0])
- assert_raises(ValueError, cheb.chebint, [0], axis=.5)
+ assert_raises(TypeError, cheb.chebint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
@@ -323,7 +323,7 @@ class TestDerivative(object):
def test_chebder(self):
# check exceptions
- assert_raises(ValueError, cheb.chebder, [0], .5)
+ assert_raises(TypeError, cheb.chebder, [0], .5)
assert_raises(ValueError, cheb.chebder, [0], -1)
# check that zeroth derivative does nothing
diff --git a/numpy/polynomial/tests/test_classes.py b/numpy/polynomial/tests/test_classes.py
index 15e24f92b..2261f960b 100644
--- a/numpy/polynomial/tests/test_classes.py
+++ b/numpy/polynomial/tests/test_classes.py
@@ -16,7 +16,7 @@ from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
)
from numpy.compat import long
-
+from numpy.polynomial.polyutils import RankWarning
#
# fixtures
@@ -133,6 +133,17 @@ def test_fromroots(Poly):
assert_almost_equal(p2.coef[-1], 1)
+def test_bad_conditioned_fit(Poly):
+
+ x = [0., 0., 1.]
+ y = [1., 2., 3.]
+
+ # check RankWarning is raised
+ with pytest.warns(RankWarning) as record:
+ Poly.fit(x, y, 2)
+ assert record[0].message.args[0] == "The fit may be poorly conditioned"
+
+
def test_fit(Poly):
def f(x):
diff --git a/numpy/polynomial/tests/test_hermite.py b/numpy/polynomial/tests/test_hermite.py
index 1287ef3fe..271c1964b 100644
--- a/numpy/polynomial/tests/test_hermite.py
+++ b/numpy/polynomial/tests/test_hermite.py
@@ -209,12 +209,12 @@ class TestIntegral(object):
def test_hermint(self):
# check exceptions
- assert_raises(ValueError, herm.hermint, [0], .5)
+ assert_raises(TypeError, herm.hermint, [0], .5)
assert_raises(ValueError, herm.hermint, [0], -1)
assert_raises(ValueError, herm.hermint, [0], 1, [0, 0])
assert_raises(ValueError, herm.hermint, [0], lbnd=[0])
assert_raises(ValueError, herm.hermint, [0], scl=[0])
- assert_raises(ValueError, herm.hermint, [0], axis=.5)
+ assert_raises(TypeError, herm.hermint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
@@ -311,7 +311,7 @@ class TestDerivative(object):
def test_hermder(self):
# check exceptions
- assert_raises(ValueError, herm.hermder, [0], .5)
+ assert_raises(TypeError, herm.hermder, [0], .5)
assert_raises(ValueError, herm.hermder, [0], -1)
# check that zeroth derivative does nothing
diff --git a/numpy/polynomial/tests/test_hermite_e.py b/numpy/polynomial/tests/test_hermite_e.py
index ccb44ad73..434b30e7b 100644
--- a/numpy/polynomial/tests/test_hermite_e.py
+++ b/numpy/polynomial/tests/test_hermite_e.py
@@ -209,12 +209,12 @@ class TestIntegral(object):
def test_hermeint(self):
# check exceptions
- assert_raises(ValueError, herme.hermeint, [0], .5)
+ assert_raises(TypeError, herme.hermeint, [0], .5)
assert_raises(ValueError, herme.hermeint, [0], -1)
assert_raises(ValueError, herme.hermeint, [0], 1, [0, 0])
assert_raises(ValueError, herme.hermeint, [0], lbnd=[0])
assert_raises(ValueError, herme.hermeint, [0], scl=[0])
- assert_raises(ValueError, herme.hermeint, [0], axis=.5)
+ assert_raises(TypeError, herme.hermeint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
@@ -311,7 +311,7 @@ class TestDerivative(object):
def test_hermeder(self):
# check exceptions
- assert_raises(ValueError, herme.hermeder, [0], .5)
+ assert_raises(TypeError, herme.hermeder, [0], .5)
assert_raises(ValueError, herme.hermeder, [0], -1)
# check that zeroth derivative does nothing
diff --git a/numpy/polynomial/tests/test_laguerre.py b/numpy/polynomial/tests/test_laguerre.py
index 3ababec5e..4b9b28637 100644
--- a/numpy/polynomial/tests/test_laguerre.py
+++ b/numpy/polynomial/tests/test_laguerre.py
@@ -206,12 +206,12 @@ class TestIntegral(object):
def test_lagint(self):
# check exceptions
- assert_raises(ValueError, lag.lagint, [0], .5)
+ assert_raises(TypeError, lag.lagint, [0], .5)
assert_raises(ValueError, lag.lagint, [0], -1)
assert_raises(ValueError, lag.lagint, [0], 1, [0, 0])
assert_raises(ValueError, lag.lagint, [0], lbnd=[0])
assert_raises(ValueError, lag.lagint, [0], scl=[0])
- assert_raises(ValueError, lag.lagint, [0], axis=.5)
+ assert_raises(TypeError, lag.lagint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
@@ -308,7 +308,7 @@ class TestDerivative(object):
def test_lagder(self):
# check exceptions
- assert_raises(ValueError, lag.lagder, [0], .5)
+ assert_raises(TypeError, lag.lagder, [0], .5)
assert_raises(ValueError, lag.lagder, [0], -1)
# check that zeroth derivative does nothing
diff --git a/numpy/polynomial/tests/test_legendre.py b/numpy/polynomial/tests/test_legendre.py
index a23086d59..917a7e03a 100644
--- a/numpy/polynomial/tests/test_legendre.py
+++ b/numpy/polynomial/tests/test_legendre.py
@@ -210,12 +210,12 @@ class TestIntegral(object):
def test_legint(self):
# check exceptions
- assert_raises(ValueError, leg.legint, [0], .5)
+ assert_raises(TypeError, leg.legint, [0], .5)
assert_raises(ValueError, leg.legint, [0], -1)
assert_raises(ValueError, leg.legint, [0], 1, [0, 0])
assert_raises(ValueError, leg.legint, [0], lbnd=[0])
assert_raises(ValueError, leg.legint, [0], scl=[0])
- assert_raises(ValueError, leg.legint, [0], axis=.5)
+ assert_raises(TypeError, leg.legint, [0], axis=.5)
# test integration of zero polynomial
for i in range(2, 5):
@@ -312,7 +312,7 @@ class TestDerivative(object):
def test_legder(self):
# check exceptions
- assert_raises(ValueError, leg.legder, [0], .5)
+ assert_raises(TypeError, leg.legder, [0], .5)
assert_raises(ValueError, leg.legder, [0], -1)
# check that zeroth derivative does nothing
diff --git a/numpy/polynomial/tests/test_polynomial.py b/numpy/polynomial/tests/test_polynomial.py
index 0c93be278..1436963c6 100644
--- a/numpy/polynomial/tests/test_polynomial.py
+++ b/numpy/polynomial/tests/test_polynomial.py
@@ -9,7 +9,7 @@ import numpy as np
import numpy.polynomial.polynomial as poly
from numpy.testing import (
assert_almost_equal, assert_raises, assert_equal, assert_,
- )
+ assert_warns, assert_array_equal)
def trim(x):
@@ -147,6 +147,19 @@ class TestEvaluation(object):
assert_equal(poly.polyval(x, [1, 0]).shape, dims)
assert_equal(poly.polyval(x, [1, 0, 0]).shape, dims)
+ #check masked arrays are processed correctly
+ mask = [False, True, False]
+ mx = np.ma.array([1, 2, 3], mask=mask)
+ res = np.polyval([7, 5, 3], mx)
+ assert_array_equal(res.mask, mask)
+
+ #check subtypes of ndarray are preserved
+ class C(np.ndarray):
+ pass
+
+ cx = np.array([1, 2, 3]).view(C)
+ assert_equal(type(np.polyval([2, 3, 4], cx)), C)
+
def test_polyvalfromroots(self):
# check exception for broadcasting x values over root array with
# too few dimensions
@@ -278,12 +291,14 @@ class TestIntegral(object):
def test_polyint(self):
# check exceptions
- assert_raises(ValueError, poly.polyint, [0], .5)
+ assert_raises(TypeError, poly.polyint, [0], .5)
assert_raises(ValueError, poly.polyint, [0], -1)
assert_raises(ValueError, poly.polyint, [0], 1, [0, 0])
assert_raises(ValueError, poly.polyint, [0], lbnd=[0])
assert_raises(ValueError, poly.polyint, [0], scl=[0])
- assert_raises(ValueError, poly.polyint, [0], axis=.5)
+ assert_raises(TypeError, poly.polyint, [0], axis=.5)
+ with assert_warns(DeprecationWarning):
+ poly.polyint([1, 1], 1.)
# test integration of zero polynomial
for i in range(2, 5):
@@ -375,7 +390,7 @@ class TestDerivative(object):
def test_polyder(self):
# check exceptions
- assert_raises(ValueError, poly.polyder, [0], .5)
+ assert_raises(TypeError, poly.polyder, [0], .5)
assert_raises(ValueError, poly.polyder, [0], -1)
# check that zeroth derivative does nothing
diff --git a/numpy/random/LICENSE.md b/numpy/random/LICENSE.md
new file mode 100644
index 000000000..a6cf1b17e
--- /dev/null
+++ b/numpy/random/LICENSE.md
@@ -0,0 +1,71 @@
+**This software is dual-licensed under the The University of Illinois/NCSA
+Open Source License (NCSA) and The 3-Clause BSD License**
+
+# NCSA Open Source License
+**Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
+
+Developed by: Kevin Sheppard (<kevin.sheppard@economics.ox.ac.uk>,
+<kevin.k.sheppard@gmail.com>)
+[http://www.kevinsheppard.com](http://www.kevinsheppard.com)
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+Redistributions of source code must retain the above copyright notice, this
+list of conditions and the following disclaimers.
+
+Redistributions in binary form must reproduce the above copyright notice, this
+list of conditions and the following disclaimers in the documentation and/or
+other materials provided with the distribution.
+
+Neither the names of Kevin Sheppard, nor the names of any contributors may be
+used to endorse or promote products derived from this Software without specific
+prior written permission.
+
+**THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
+THE SOFTWARE.**
+
+
+# 3-Clause BSD License
+**Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.**
+
+# Components
+
+Many parts of this module have been derived from original sources,
+often the algorithm's designer. Component licenses are located with
+the component code.
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 965ab5ea9..f7c248451 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -3,32 +3,67 @@
Random Number Generation
========================
+Use ``default_rng()`` to create a `Generator` and call its methods.
+
+=============== =========================================================
+Generator
+--------------- ---------------------------------------------------------
+Generator Class implementing all of the random number distributions
+default_rng Default constructor for ``Generator``
+=============== =========================================================
+
+============================================= ===
+BitGenerator Streams that work with Generator
+--------------------------------------------- ---
+MT19937
+PCG64
+Philox
+SFC64
+============================================= ===
+
+============================================= ===
+Getting entropy to initialize a BitGenerator
+--------------------------------------------- ---
+SeedSequence
+============================================= ===
+
+
+Legacy
+------
+
+For backwards compatibility with previous versions of numpy before 1.17, the
+various aliases to the global `RandomState` methods are left alone and do not
+use the new `Generator` API.
+
==================== =========================================================
Utility functions
-==============================================================================
-random_sample Uniformly distributed floats over ``[0, 1)``.
-random Alias for `random_sample`.
+-------------------- ---------------------------------------------------------
+random Uniformly distributed floats over ``[0, 1)``
bytes Uniformly distributed random bytes.
-random_integers Uniformly distributed integers in a given range.
permutation Randomly permute a sequence / generate a random sequence.
shuffle Randomly permute a sequence in place.
-seed Seed the random number generator.
choice Random sample from 1-D array.
-
==================== =========================================================
==================== =========================================================
-Compatibility functions
-==============================================================================
+Compatibility
+functions - removed
+in the new API
+-------------------- ---------------------------------------------------------
rand Uniformly distributed values.
randn Normally distributed values.
ranf Uniformly distributed floating point numbers.
-randint Uniformly distributed integers in a given range.
+random_integers Uniformly distributed integers in a given range.
+ (deprecated, use ``integers(..., closed=True)`` instead)
+random_sample Alias for `random_sample`
+randint Uniformly distributed integers in a given range
+seed Seed the legacy random number generator.
==================== =========================================================
==================== =========================================================
-Univariate distributions
-==============================================================================
+Univariate
+distributions
+-------------------- ---------------------------------------------------------
beta Beta distribution over ``[0, 1]``.
binomial Binomial distribution.
chisquare :math:`\\chi^2` distribution.
@@ -58,17 +93,19 @@ weibull Weibull distribution.
zipf Zipf's distribution over ranked data.
==================== =========================================================
-==================== =========================================================
-Multivariate distributions
-==============================================================================
+==================== ==========================================================
+Multivariate
+distributions
+-------------------- ----------------------------------------------------------
dirichlet Multivariate generalization of Beta distribution.
multinomial Multivariate generalization of the binomial distribution.
multivariate_normal Multivariate generalization of the normal distribution.
-==================== =========================================================
+==================== ==========================================================
==================== =========================================================
-Standard distributions
-==============================================================================
+Standard
+distributions
+-------------------- ---------------------------------------------------------
standard_cauchy Standard Cauchy-Lorentz distribution.
standard_exponential Standard exponential distribution.
standard_gamma Standard Gamma distribution.
@@ -78,16 +115,15 @@ standard_t Standard Student's t-distribution.
==================== =========================================================
Internal functions
-==============================================================================
+-------------------- ---------------------------------------------------------
get_state Get tuple representing internal state of generator.
set_state Set state of generator.
==================== =========================================================
+
"""
from __future__ import division, absolute_import, print_function
-import warnings
-
__all__ = [
'beta',
'binomial',
@@ -119,9 +155,12 @@ __all__ = [
'rand',
'randint',
'randn',
+ 'random',
'random_integers',
'random_sample',
+ 'ranf',
'rayleigh',
+ 'sample',
'seed',
'set_state',
'shuffle',
@@ -135,32 +174,43 @@ __all__ = [
'vonmises',
'wald',
'weibull',
- 'zipf'
+ 'zipf',
]
-with warnings.catch_warnings():
- warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
- from .mtrand import *
+# add these for module-freeze analysis (like PyInstaller)
+from . import _pickle
+from . import common
+from . import bounded_integers
+
+from .mtrand import *
+from .generator import Generator, default_rng
+from .bit_generator import SeedSequence
+from .mt19937 import MT19937
+from .pcg64 import PCG64
+from .philox import Philox
+from .sfc64 import SFC64
+from .mtrand import RandomState
+
+__all__ += ['Generator', 'RandomState', 'SeedSequence', 'MT19937',
+ 'Philox', 'PCG64', 'SFC64', 'default_rng']
-# Some aliases:
-ranf = random = sample = random_sample
-__all__.extend(['ranf', 'random', 'sample'])
def __RandomState_ctor():
"""Return a RandomState instance.
This function exists solely to assist (un)pickling.
- Note that the state of the RandomState returned here is irrelevant, as this function's
- entire purpose is to return a newly allocated RandomState whose state pickle can set.
- Consequently the RandomState returned by this function is a freshly allocated copy
- with a seed=0.
+ Note that the state of the RandomState returned here is irrelevant, as this
+ function's entire purpose is to return a newly allocated RandomState whose
+ state pickle can set. Consequently the RandomState returned by this function
+ is a freshly allocated copy with a seed=0.
See https://github.com/numpy/numpy/issues/4763 for a detailed discussion
"""
return RandomState(seed=0)
+
from numpy._pytesttester import PytestTester
test = PytestTester(__name__)
del PytestTester
diff --git a/numpy/random/_pickle.py b/numpy/random/_pickle.py
new file mode 100644
index 000000000..3b58f21e8
--- /dev/null
+++ b/numpy/random/_pickle.py
@@ -0,0 +1,82 @@
+from .mtrand import RandomState
+from .philox import Philox
+from .pcg64 import PCG64
+from .sfc64 import SFC64
+
+from .generator import Generator
+from .mt19937 import MT19937
+
+BitGenerators = {'MT19937': MT19937,
+ 'PCG64': PCG64,
+ 'Philox': Philox,
+ 'SFC64': SFC64,
+ }
+
+
+def __generator_ctor(bit_generator_name='MT19937'):
+ """
+ Pickling helper function that returns a Generator object
+
+ Parameters
+ ----------
+ bit_generator_name: str
+ String containing the core BitGenerator
+
+ Returns
+ -------
+ rg: Generator
+ Generator using the named core BitGenerator
+ """
+ if bit_generator_name in BitGenerators:
+ bit_generator = BitGenerators[bit_generator_name]
+ else:
+ raise ValueError(str(bit_generator_name) + ' is not a known '
+ 'BitGenerator module.')
+
+ return Generator(bit_generator())
+
+
+def __bit_generator_ctor(bit_generator_name='MT19937'):
+ """
+ Pickling helper function that returns a bit generator object
+
+ Parameters
+ ----------
+ bit_generator_name: str
+ String containing the name of the BitGenerator
+
+ Returns
+ -------
+ bit_generator: BitGenerator
+ BitGenerator instance
+ """
+ if bit_generator_name in BitGenerators:
+ bit_generator = BitGenerators[bit_generator_name]
+ else:
+ raise ValueError(str(bit_generator_name) + ' is not a known '
+ 'BitGenerator module.')
+
+ return bit_generator()
+
+
+def __randomstate_ctor(bit_generator_name='MT19937'):
+ """
+ Pickling helper function that returns a legacy RandomState-like object
+
+ Parameters
+ ----------
+ bit_generator_name: str
+ String containing the core BitGenerator
+
+ Returns
+ -------
+ rs: RandomState
+ Legacy RandomState using the named core BitGenerator
+ """
+ if bit_generator_name in BitGenerators:
+ bit_generator = BitGenerators[bit_generator_name]
+ else:
+ raise ValueError(str(bit_generator_name) + ' is not a known '
+ 'BitGenerator module.')
+
+ return RandomState(bit_generator())
diff --git a/numpy/random/bit_generator.pxd b/numpy/random/bit_generator.pxd
new file mode 100644
index 000000000..984033f17
--- /dev/null
+++ b/numpy/random/bit_generator.pxd
@@ -0,0 +1,26 @@
+
+from .common cimport bitgen_t, uint32_t
+cimport numpy as np
+
+cdef class BitGenerator():
+ cdef readonly object _seed_seq
+ cdef readonly object lock
+ cdef bitgen_t _bitgen
+ cdef readonly object _ctypes
+ cdef readonly object _cffi
+ cdef readonly object capsule
+
+
+cdef class SeedSequence():
+ cdef readonly object entropy
+ cdef readonly tuple spawn_key
+ cdef readonly uint32_t pool_size
+ cdef readonly object pool
+ cdef readonly uint32_t n_children_spawned
+
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array)
+ cdef get_assembled_entropy(self)
+
+cdef class SeedlessSequence():
+ pass
diff --git a/numpy/random/bit_generator.pyx b/numpy/random/bit_generator.pyx
new file mode 100644
index 000000000..eb608af6c
--- /dev/null
+++ b/numpy/random/bit_generator.pyx
@@ -0,0 +1,629 @@
+"""
+BitGenerator base class and SeedSequence used to seed the BitGenerators.
+
+SeedSequence is derived from Melissa E. O'Neill's C++11 `std::seed_seq`
+implementation, as it has a lot of nice properties that we want.
+
+https://gist.github.com/imneme/540829265469e673d045
+http://www.pcg-random.org/posts/developing-a-seed_seq-alternative.html
+
+The MIT License (MIT)
+
+Copyright (c) 2015 Melissa E. O'Neill
+Copyright (c) 2019 NumPy Developers
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import abc
+import sys
+from itertools import cycle
+import re
+
+try:
+ from secrets import randbits
+except ImportError:
+ # secrets unavailable on python 3.5 and before
+ from random import SystemRandom
+ randbits = SystemRandom().getrandbits
+
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
+from cpython.pycapsule cimport PyCapsule_New
+
+import numpy as np
+cimport numpy as np
+
+from libc.stdint cimport uint32_t
+from .common cimport (random_raw, benchmark, prepare_ctypes, prepare_cffi)
+from .distributions cimport bitgen_t
+
+__all__ = ['SeedSequence', 'BitGenerator']
+
+np.import_array()
+
+DECIMAL_RE = re.compile(r'[0-9]+')
+
+cdef uint32_t DEFAULT_POOL_SIZE = 4 # Appears also in docstring for pool_size
+cdef uint32_t INIT_A = 0x43b0d7e5
+cdef uint32_t MULT_A = 0x931e8875
+cdef uint32_t INIT_B = 0x8b51f9dd
+cdef uint32_t MULT_B = 0x58f38ded
+cdef uint32_t MIX_MULT_L = 0xca01f9dd
+cdef uint32_t MIX_MULT_R = 0x4973f715
+cdef uint32_t XSHIFT = np.dtype(np.uint32).itemsize * 8 // 2
+cdef uint32_t MASK32 = 0xFFFFFFFF
+
+def _int_to_uint32_array(n):
+ arr = []
+ if n < 0:
+ raise ValueError("expected non-negative integer")
+ if n == 0:
+ arr.append(np.uint32(n))
+ if isinstance(n, np.unsignedinteger):
+ # Cannot do n & MASK32, convert to python int
+ n = int(n)
+ while n > 0:
+ arr.append(np.uint32(n & MASK32))
+ n //= (2**32)
+ return np.array(arr, dtype=np.uint32)
+
+def _coerce_to_uint32_array(x):
+ """ Coerce an input to a uint32 array.
+
+ If a `uint32` array, pass it through directly.
+ If a non-negative integer, then break it up into `uint32` words, lowest
+ bits first.
+ If a string starting with "0x", then interpret as a hex integer, as above.
+ If a string of decimal digits, interpret as a decimal integer, as above.
+ If a sequence of ints or strings, interpret each element as above and
+ concatenate.
+
+ Note that the handling of `int64` or `uint64` arrays are not just
+ straightforward views as `uint32` arrays. If an element is small enough to
+ fit into a `uint32`, then it will only take up one `uint32` element in the
+ output. This is to make sure that the interpretation of a sequence of
+ integers is the same regardless of numpy's default integer type, which
+ differs on different platforms.
+
+ Parameters
+ ----------
+ x : int, str, sequence of int or str
+
+ Returns
+ -------
+ seed_array : uint32 array
+
+ Examples
+ --------
+ >>> import numpy as np
+ >>> from numpy.random.bit_generator import _coerce_to_uint32_array
+ >>> _coerce_to_uint32_array(12345)
+ array([12345], dtype=uint32)
+ >>> _coerce_to_uint32_array('12345')
+ array([12345], dtype=uint32)
+ >>> _coerce_to_uint32_array('0x12345')
+ array([74565], dtype=uint32)
+ >>> _coerce_to_uint32_array([12345, '67890'])
+ array([12345, 67890], dtype=uint32)
+ >>> _coerce_to_uint32_array(np.array([12345, 67890], dtype=np.uint32))
+ array([12345, 67890], dtype=uint32)
+ >>> _coerce_to_uint32_array(np.array([12345, 67890], dtype=np.int64))
+ array([12345, 67890], dtype=uint32)
+ >>> _coerce_to_uint32_array([12345, 0x10deadbeef, 67890, 0xdeadbeef])
+ array([ 12345, 3735928559, 16, 67890, 3735928559],
+ dtype=uint32)
+ >>> _coerce_to_uint32_array(1234567890123456789012345678901234567890)
+ array([3460238034, 2898026390, 3235640248, 2697535605, 3],
+ dtype=uint32)
+ """
+ if isinstance(x, np.ndarray) and x.dtype == np.dtype(np.uint32):
+ return x.copy()
+ elif isinstance(x, str):
+ if x.startswith('0x'):
+ x = int(x, base=16)
+ elif DECIMAL_RE.match(x):
+ x = int(x)
+ else:
+ raise ValueError("unrecognized seed string")
+ if isinstance(x, (int, np.integer)):
+ return _int_to_uint32_array(x)
+ elif isinstance(x, (float, np.inexact)):
+ raise TypeError('seed must be integer')
+ else:
+ if len(x) == 0:
+ return np.array([], dtype=np.uint32)
+ # Should be a sequence of interpretable-as-ints. Convert each one to
+ # a uint32 array and concatenate.
+ subseqs = [_coerce_to_uint32_array(v) for v in x]
+ return np.concatenate(subseqs)
+
+
+cdef uint32_t hashmix(uint32_t value, uint32_t * hash_const):
+ # We are modifying the multiplier as we go along, so it is input-output
+ value ^= hash_const[0]
+ hash_const[0] *= MULT_A
+ value *= hash_const[0]
+ value ^= value >> XSHIFT
+ return value
+
+cdef uint32_t mix(uint32_t x, uint32_t y):
+ cdef uint32_t result = (MIX_MULT_L * x - MIX_MULT_R * y)
+ result ^= result >> XSHIFT
+ return result
+
+
+class ISeedSequence(abc.ABC):
+ """
+ Abstract base class for seed sequences.
+
+ ``BitGenerator`` implementations should treat any object that adheres to
+ this interface as a seed sequence.
+
+ See Also
+ --------
+ SeedSequence, SeedlessSeedSequence
+ """
+
+ @abc.abstractmethod
+ def generate_state(self, n_words, dtype=np.uint32):
+ """
+ generate_state(n_words, dtype=np.uint32)
+
+ Return the requested number of words for PRNG seeding.
+
+ A BitGenerator should call this method in its constructor with
+ an appropriate `n_words` parameter to properly seed itself.
+
+ Parameters
+ ----------
+ n_words : int
+ dtype : np.uint32 or np.uint64, optional
+ The size of each word. This should only be either `uint32` or
+ `uint64`. Strings (`'uint32'`, `'uint64'`) are fine. Note that
+ requesting `uint64` will draw twice as many bits as `uint32` for
+ the same `n_words`. This is a convenience for `BitGenerator`s that
+ express their states as `uint64` arrays.
+
+ Returns
+ -------
+ state : uint32 or uint64 array, shape=(n_words,)
+ """
+
+
+class ISpawnableSeedSequence(ISeedSequence):
+ """
+ Abstract base class for seed sequences that can spawn.
+ """
+
+ @abc.abstractmethod
+ def spawn(self, n_children):
+ """
+ spawn(n_children)
+
+ Spawn a number of child `SeedSequence` s by extending the
+ `spawn_key`.
+
+ Parameters
+ ----------
+ n_children : int
+
+ Returns
+ -------
+ seqs : list of `SeedSequence` s
+ """
+
+
+cdef class SeedlessSeedSequence():
+ """
+ A seed sequence for BitGenerators with no need for seed state.
+
+ See Also
+ --------
+ SeedSequence, ISeedSequence
+ """
+
+ def generate_state(self, n_words, dtype=np.uint32):
+ raise NotImplementedError('seedless SeedSequences cannot generate state')
+
+ def spawn(self, n_children):
+ return [self] * n_children
+
+
+# We cannot directly subclass a `cdef class` type from an `ABC` in Cython, so
+# we must register it after the fact.
+ISpawnableSeedSequence.register(SeedlessSeedSequence)
+
+
+cdef class SeedSequence():
+ """
+ SeedSequence(entropy=None, *, spawn_key=(), pool_size=4)
+
+ SeedSequence mixes sources of entropy in a reproducible way to set the
+ initial state for independent and very probably non-overlapping
+ BitGenerators.
+
+ Once the SeedSequence is instantiated, you can call the `generate_state`
+ method to get an appropriately sized seed. Calling `spawn(n) <spawn>` will
+ create ``n`` SeedSequences that can be used to seed independent
+ BitGenerators, i.e. for different threads.
+
+ Parameters
+ ----------
+ entropy : {None, int, sequence[int]}, optional
+ The entropy for creating a `SeedSequence`.
+ spawn_key : {(), sequence[int]}, optional
+ A third source of entropy, used internally when calling
+ `SeedSequence.spawn`
+ pool_size : {int}, optional
+ Size of the pooled entropy to store. Default is 4 to give a 128-bit
+ entropy pool. 8 (for 256 bits) is another reasonable choice if working
+ with larger PRNGs, but there is very little to be gained by selecting
+ another value.
+ n_children_spawned : {int}, optional
+ The number of children already spawned. Only pass this if
+ reconstructing a `SeedSequence` from a serialized form.
+
+ Notes
+ -----
+
+ Best practice for achieving reproducible bit streams is to use
+ the default ``None`` for the initial entropy, and then use
+ `SeedSequence.entropy` to log/pickle the `entropy` for reproducibility:
+
+ >>> sq1 = np.random.SeedSequence()
+ >>> sq1.entropy
+ 243799254704924441050048792905230269161 # random
+ >>> sq2 = np.random.SeedSequence(sq1.entropy)
+ >>> np.all(sq1.generate_state(10) == sq2.generate_state(10))
+ True
+ """
+
+ def __init__(self, entropy=None, *, spawn_key=(),
+ pool_size=DEFAULT_POOL_SIZE, n_children_spawned=0):
+ if pool_size < DEFAULT_POOL_SIZE:
+ raise ValueError("The size of the entropy pool should be at least "
+ f"{DEFAULT_POOL_SIZE}")
+ if entropy is None:
+ entropy = randbits(pool_size * 32)
+ elif not isinstance(entropy, (int, np.integer, list, tuple, range,
+ np.ndarray)):
+ raise TypeError('SeedSequence expects int or sequence of ints for '
+ 'entropy not {}'.format(entropy))
+ self.entropy = entropy
+ self.spawn_key = tuple(spawn_key)
+ self.pool_size = pool_size
+ self.n_children_spawned = n_children_spawned
+
+ self.pool = np.zeros(pool_size, dtype=np.uint32)
+ self.mix_entropy(self.pool, self.get_assembled_entropy())
+
+ def __repr__(self):
+ lines = [
+ f'{type(self).__name__}(',
+ f' entropy={self.entropy!r},',
+ ]
+ # Omit some entries if they are left as the defaults in order to
+ # simplify things.
+ if self.spawn_key:
+ lines.append(f' spawn_key={self.spawn_key!r},')
+ if self.pool_size != DEFAULT_POOL_SIZE:
+ lines.append(f' pool_size={self.pool_size!r},')
+ if self.n_children_spawned != 0:
+ lines.append(f' n_children_spawned={self.n_children_spawned!r},')
+ lines.append(')')
+ text = '\n'.join(lines)
+ return text
+
+ @property
+ def state(self):
+ return {k:getattr(self, k) for k in
+ ['entropy', 'spawn_key', 'pool_size',
+ 'n_children_spawned']
+ if getattr(self, k) is not None}
+
+ cdef mix_entropy(self, np.ndarray[np.npy_uint32, ndim=1] mixer,
+ np.ndarray[np.npy_uint32, ndim=1] entropy_array):
+ """ Mix in the given entropy to mixer.
+
+ Parameters
+ ----------
+ mixer : 1D uint32 array, modified in-place
+ entropy_array : 1D uint32 array
+ """
+ cdef uint32_t hash_const[1]
+ hash_const[0] = INIT_A
+
+ # Add in the entropy up to the pool size.
+ for i in range(len(mixer)):
+ if i < len(entropy_array):
+ mixer[i] = hashmix(entropy_array[i], hash_const)
+ else:
+ # Our pool size is bigger than our entropy, so just keep
+ # running the hash out.
+ mixer[i] = hashmix(0, hash_const)
+
+ # Mix all bits together so late bits can affect earlier bits.
+ for i_src in range(len(mixer)):
+ for i_dst in range(len(mixer)):
+ if i_src != i_dst:
+ mixer[i_dst] = mix(mixer[i_dst],
+ hashmix(mixer[i_src], hash_const))
+
+ # Add any remaining entropy, mixing each new entropy word with each
+ # pool word.
+ for i_src in range(len(mixer), len(entropy_array)):
+ for i_dst in range(len(mixer)):
+ mixer[i_dst] = mix(mixer[i_dst],
+ hashmix(entropy_array[i_src], hash_const))
+
+ cdef get_assembled_entropy(self):
+ """ Convert and assemble all entropy sources into a uniform uint32
+ array.
+
+ Returns
+ -------
+ entropy_array : 1D uint32 array
+ """
+ # Convert run-entropy, program-entropy, and the spawn key into uint32
+ # arrays and concatenate them.
+
+ # We MUST have at least some run-entropy. The others are optional.
+ assert self.entropy is not None
+ run_entropy = _coerce_to_uint32_array(self.entropy)
+ spawn_entropy = _coerce_to_uint32_array(self.spawn_key)
+ entropy_array = np.concatenate([run_entropy, spawn_entropy])
+ return entropy_array
+
+ @np.errstate(over='ignore')
+ def generate_state(self, n_words, dtype=np.uint32):
+ """
+ generate_state(n_words, dtype=np.uint32)
+
+ Return the requested number of words for PRNG seeding.
+
+ A BitGenerator should call this method in its constructor with
+ an appropriate `n_words` parameter to properly seed itself.
+
+ Parameters
+ ----------
+ n_words : int
+ dtype : np.uint32 or np.uint64, optional
+ The size of each word. This should only be either `uint32` or
+ `uint64`. Strings (`'uint32'`, `'uint64'`) are fine. Note that
+ requesting `uint64` will draw twice as many bits as `uint32` for
+ the same `n_words`. This is a convenience for `BitGenerator`s that
+ express their states as `uint64` arrays.
+
+ Returns
+ -------
+ state : uint32 or uint64 array, shape=(n_words,)
+ """
+ cdef uint32_t hash_const = INIT_B
+ cdef uint32_t data_val
+
+ out_dtype = np.dtype(dtype)
+ if out_dtype == np.dtype(np.uint32):
+ pass
+ elif out_dtype == np.dtype(np.uint64):
+ n_words *= 2
+ else:
+ raise ValueError("only support uint32 or uint64")
+ state = np.zeros(n_words, dtype=np.uint32)
+ src_cycle = cycle(self.pool)
+ for i_dst in range(n_words):
+ data_val = next(src_cycle)
+ data_val ^= hash_const
+ hash_const *= MULT_B
+ data_val *= hash_const
+ data_val ^= data_val >> XSHIFT
+ state[i_dst] = data_val
+ if out_dtype == np.dtype(np.uint64):
+ # For consistency across different endiannesses, view first as
+ # little-endian then convert the values to the native endianness.
+ state = state.astype('<u4').view('<u8').astype(np.uint64)
+ return state
+
+ def spawn(self, n_children):
+ """
+ spawn(n_children)
+
+ Spawn a number of child `SeedSequence` s by extending the
+ `spawn_key`.
+
+ Parameters
+ ----------
+ n_children : int
+
+ Returns
+ -------
+ seqs : list of `SeedSequence` s
+ """
+ cdef uint32_t i
+
+ seqs = []
+ for i in range(self.n_children_spawned,
+ self.n_children_spawned + n_children):
+ seqs.append(type(self)(
+ self.entropy,
+ spawn_key=self.spawn_key + (i,),
+ pool_size=self.pool_size,
+ ))
+ self.n_children_spawned += n_children
+ return seqs
+
+
+ISpawnableSeedSequence.register(SeedSequence)
+
+
+cdef class BitGenerator():
+ """
+ BitGenerator(seed=None)
+
+ Base Class for generic BitGenerators, which provide a stream
+ of random bits based on different algorithms. Must be overridden.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], ISeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in an implementor of the `ISeedSequence` interface like
+ `SeedSequence`.
+
+ Attributes
+ ----------
+ lock : threading.Lock
+ Lock instance that is shared so that the same BitGenerator can
+ be used in multiple Generators without corrupting the state. Code that
+ generates values from a bit generator should hold the bit generator's
+ lock.
+
+ See Also
+ -------
+ SeedSequence
+ """
+
+ def __init__(self, seed=None):
+ self.lock = Lock()
+ self._bitgen.state = <void *>0
+ if type(self) is BitGenerator:
+ raise NotImplementedError('BitGenerator is a base class and cannot be instantized')
+
+ self._ctypes = None
+ self._cffi = None
+
+ cdef const char *name = "BitGenerator"
+ self.capsule = PyCapsule_New(<void *>&self._bitgen, name, NULL)
+ if not isinstance(seed, ISeedSequence):
+ seed = SeedSequence(seed)
+ self._seed_seq = seed
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.state
+
+ def __setstate__(self, state):
+ self.state = state
+
+ def __reduce__(self):
+ from ._pickle import __bit_generator_ctor
+ return __bit_generator_ctor, (self.state['bit_generator'],), self.state
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ The base BitGenerator.state must be overridden by a subclass
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ raise NotImplementedError('Not implemented in base BitGenerator')
+
+ @state.setter
+ def state(self, value):
+ raise NotImplementedError('Not implemented in base BitGenerator')
+
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BitGenerator
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(&self._bitgen, self.lock, size, output)
+
+ def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
+ '''Used in tests'''
+ return benchmark(&self._bitgen, self.lock, cnt, method)
+
+ @property
+ def ctypes(self):
+ """
+ ctypes interface
+
+ Returns
+ -------
+ interface : namedtuple
+ Named tuple containing ctypes wrapper
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bitgen - pointer to the bit generator struct
+ """
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(&self._bitgen)
+
+ return self._ctypes
+
+ @property
+ def cffi(self):
+ """
+ CFFI interface
+
+ Returns
+ -------
+ interface : namedtuple
+ Named tuple containing CFFI wrapper
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bitgen - pointer to the bit generator struct
+ """
+ if self._cffi is None:
+ self._cffi = prepare_cffi(&self._bitgen)
+ return self._cffi
diff --git a/numpy/random/bounded_integers.pxd.in b/numpy/random/bounded_integers.pxd.in
new file mode 100644
index 000000000..7a3f224dc
--- /dev/null
+++ b/numpy/random/bounded_integers.pxd.in
@@ -0,0 +1,26 @@
+from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int8_t, int16_t, int32_t, int64_t, intptr_t)
+import numpy as np
+cimport numpy as np
+ctypedef np.npy_bool bool_t
+
+from .common cimport bitgen_t
+
+cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
+ """Mask generator for use in bounded random numbers"""
+ # Smallest bit mask >= max
+ cdef uint64_t mask = max_val
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+ mask |= mask >> 32
+ return mask
+{{
+py:
+inttypes = ('uint64','uint32','uint16','uint8','bool','int64','int32','int16','int8')
+}}
+{{for inttype in inttypes}}
+cdef object _rand_{{inttype}}(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
+{{endfor}}
diff --git a/numpy/random/bounded_integers.pyx.in b/numpy/random/bounded_integers.pyx.in
new file mode 100644
index 000000000..411b65a37
--- /dev/null
+++ b/numpy/random/bounded_integers.pyx.in
@@ -0,0 +1,305 @@
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True
+
+import numpy as np
+cimport numpy as np
+
+from .distributions cimport *
+
+__all__ = []
+
+np.import_array()
+
+_integers_types = {'bool': (0, 2),
+ 'int8': (-2**7, 2**7),
+ 'int16': (-2**15, 2**15),
+ 'int32': (-2**31, 2**31),
+ 'int64': (-2**63, 2**63),
+ 'uint8': (0, 2**8),
+ 'uint16': (0, 2**16),
+ 'uint32': (0, 2**32),
+ 'uint64': (0, 2**64)}
+{{
+py:
+type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
+ ('uint16', 'uint16', 'uint32', 'NPY_UINT32', 1, 16, 0, '0X10000UL'),
+ ('uint8', 'uint8', 'uint16', 'NPY_UINT16', 3, 8, 0, '0X100UL'),
+ ('bool','bool', 'uint8', 'NPY_UINT8', 31, 1, 0, '0x2UL'),
+ ('int32', 'uint32', 'uint64', 'NPY_INT64', 0, 0, '-0x80000000LL', '0x80000000LL'),
+ ('int16', 'uint16', 'uint32', 'NPY_INT32', 1, 16, '-0x8000LL', '0x8000LL' ),
+ ('int8', 'uint8', 'uint16', 'NPY_INT16', 3, 8, '-0x80LL', '0x80LL' ),
+)}}
+{{for nptype, utype, nptype_up, npctype, remaining, bitshift, lb, ub in type_info}}
+{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
+cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object size,
+ bint use_masked, bint closed,
+ bitgen_t *state, object lock):
+ """
+ Array path for smaller integer types
+
+ This path is simpler since the high value in the open interval [low, high)
+ must be in-range for the next larger type, {{nptype_up}}. Here we case to
+ this type for checking and the recast to {{nptype}} when producing the
+ random integers.
+ """
+ cdef {{utype}}_t rng, last_rng, off, val, mask, out_val, is_open
+ cdef uint32_t buf
+ cdef {{utype}}_t *out_data
+ cdef {{nptype_up}}_t low_v, high_v
+ cdef np.ndarray low_arr, high_arr, out_arr
+ cdef np.npy_intp i, cnt
+ cdef np.broadcast it
+ cdef int buf_rem = 0
+
+ # Array path
+ is_open = not closed
+ low_arr = <np.ndarray>low
+ high_arr = <np.ndarray>high
+ if np.any(np.less(low_arr, {{lb}})):
+ raise ValueError('low is out of bounds for {{nptype}}')
+ if closed:
+ high_comp = np.greater_equal
+ low_high_comp = np.greater
+ else:
+ high_comp = np.greater
+ low_high_comp = np.greater_equal
+
+ if np.any(high_comp(high_arr, {{ub}})):
+ raise ValueError('high is out of bounds for {{nptype}}')
+ if np.any(low_high_comp(low_arr, high_arr)):
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+
+ low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+ high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+
+ if size is not None:
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
+ else:
+ it = np.PyArray_MultiIterNew2(low_arr, high_arr)
+ out_arr = <np.ndarray>np.empty(it.shape, np.{{otype}})
+
+ it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
+ out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
+ cnt = np.PyArray_SIZE(out_arr)
+ mask = last_rng = 0
+ with lock, nogil:
+ for i in range(cnt):
+ low_v = (<{{nptype_up}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
+ high_v = (<{{nptype_up}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ # Subtract 1 since generator produces values on the closed int [off, off+rng]
+ rng = <{{utype}}_t>((high_v - is_open) - low_v)
+ off = <{{utype}}_t>(<{{nptype_up}}_t>low_v)
+
+ if rng != last_rng:
+ # Smallest bit mask >= max
+ mask = <{{utype}}_t>_gen_mask(rng)
+
+ out_data[i] = random_buffered_bounded_{{utype}}(state, off, rng, mask, use_masked, &buf_rem, &buf)
+
+ np.PyArray_MultiIter_NEXT(it)
+ return out_arr
+{{endfor}}
+{{
+py:
+big_type_info = (('uint64', 'uint64', 'NPY_UINT64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
+ ('int64', 'uint64', 'NPY_INT64', '-0x8000000000000000LL', '0x7FFFFFFFFFFFFFFFLL' )
+)}}
+{{for nptype, utype, npctype, lb, ub in big_type_info}}
+{{ py: otype = nptype}}
+cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
+ bint use_masked, bint closed,
+ bitgen_t *state, object lock):
+ """
+ Array path for 64-bit integer types
+
+ Requires special treatment since the high value can be out-of-range for
+ the largest (64 bit) integer type since the generator is specified on the
+ interval [low,high).
+
+ The internal generator does not have this issue since it generates from
+ the closes interval [low, high-1] and high-1 is always in range for the
+ 64 bit integer type.
+ """
+
+ cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr
+ cdef np.npy_intp i, cnt, n
+ cdef np.broadcast it
+ cdef object closed_upper
+ cdef uint64_t *out_data
+ cdef {{nptype}}_t *highm1_data
+ cdef {{nptype}}_t low_v, high_v
+ cdef uint64_t rng, last_rng, val, mask, off, out_val
+
+ low_arr = <np.ndarray>low
+ high_arr = <np.ndarray>high
+
+ if np.any(np.less(low_arr, {{lb}})):
+ raise ValueError('low is out of bounds for {{nptype}}')
+ dt = high_arr.dtype
+ if closed or np.issubdtype(dt, np.integer):
+ # Avoid object dtype path if already an integer
+ high_lower_comp = np.less if closed else np.less_equal
+ if np.any(high_lower_comp(high_arr, {{lb}})):
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+ high_m1 = high_arr if closed else high_arr - dt.type(1)
+ if np.any(np.greater(high_m1, {{ub}})):
+ raise ValueError('high is out of bounds for {{nptype}}')
+ highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+ else:
+ # If input is object or a floating type
+ highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{nptype}})
+ highm1_data = <{{nptype}}_t *>np.PyArray_DATA(highm1_arr)
+ cnt = np.PyArray_SIZE(high_arr)
+ flat = high_arr.flat
+ for i in range(cnt):
+ # Subtract 1 since generator produces values on the closed int [off, off+rng]
+ closed_upper = int(flat[i]) - 1
+ if closed_upper > {{ub}}:
+ raise ValueError('high is out of bounds for {{nptype}}')
+ if closed_upper < {{lb}}:
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+ highm1_data[i] = <{{nptype}}_t>closed_upper
+
+ if np.any(np.greater(low_arr, highm1_arr)):
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+
+ high_arr = highm1_arr
+ low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
+
+ if size is not None:
+ out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
+ else:
+ it = np.PyArray_MultiIterNew2(low_arr, high_arr)
+ out_arr = <np.ndarray>np.empty(it.shape, np.{{nptype}})
+
+ it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
+ out_data = <uint64_t *>np.PyArray_DATA(out_arr)
+ n = np.PyArray_SIZE(out_arr)
+ mask = last_rng = 0
+ with lock, nogil:
+ for i in range(n):
+ low_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
+ high_v = (<{{nptype}}_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ # Generator produces values on the closed int [off, off+rng], -1 subtracted above
+ rng = <{{utype}}_t>(high_v - low_v)
+ off = <{{utype}}_t>(<{{nptype}}_t>low_v)
+
+ if rng != last_rng:
+ mask = _gen_mask(rng)
+ out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return out_arr
+{{endfor}}
+{{
+py:
+type_info = (('uint64', 'uint64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
+ ('uint32', 'uint32', '0x0UL', '0XFFFFFFFFUL'),
+ ('uint16', 'uint16', '0x0UL', '0XFFFFUL'),
+ ('uint8', 'uint8', '0x0UL', '0XFFUL'),
+ ('bool', 'bool', '0x0UL', '0x1UL'),
+ ('int64', 'uint64', '-0x8000000000000000LL', '0x7FFFFFFFFFFFFFFFL'),
+ ('int32', 'uint32', '-0x80000000L', '0x7FFFFFFFL'),
+ ('int16', 'uint16', '-0x8000L', '0x7FFFL' ),
+ ('int8', 'uint8', '-0x80L', '0x7FL' )
+)}}
+{{for nptype, utype, lb, ub in type_info}}
+{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
+cdef object _rand_{{nptype}}(object low, object high, object size,
+ bint use_masked, bint closed,
+ bitgen_t *state, object lock):
+ """
+ _rand_{{nptype}}(low, high, size, use_masked, *state, lock)
+
+ Return random np.{{nptype}} integers from `low` (inclusive) to `high` (exclusive).
+
+ Return random integers from the "discrete uniform" distribution in the
+ interval [`low`, `high`). If `high` is None (the default),
+ then results are from [0, `low`). On entry the arguments are presumed
+ to have been validated for size and order for the np.{{nptype}} type.
+
+ Parameters
+ ----------
+ low : int or array-like
+ Lowest (signed) integer to be drawn from the distribution (unless
+ ``high=None``, in which case this parameter is the *highest* such
+ integer).
+ high : int or array-like
+ If provided, one above the largest (signed) integer to be drawn from the
+ distribution (see above for behavior if ``high=None``).
+ size : int or tuple of ints
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ use_masked : bool
+ If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
+ closed : bool
+ If True then sample from [low, high]. If False, sample [low, high)
+ state : bit generator
+ Bit generator state to use in the core random number generators
+ lock : threading.Lock
+ Lock to prevent multiple using a single generator simultaneously
+
+ Returns
+ -------
+ out : python scalar or ndarray of np.{{nptype}}
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ Notes
+ -----
+ The internal integer generator produces values from the closed
+ interval [low, high-(not closed)]. This requires some care since
+ high can be out-of-range for {{utype}}. The scalar path leaves
+ integers as Python integers until the 1 has been subtracted to
+ avoid needing to cast to a larger type.
+ """
+ cdef np.ndarray out_arr, low_arr, high_arr
+ cdef {{utype}}_t rng, off, out_val
+ cdef {{utype}}_t *out_data
+ cdef np.npy_intp i, n, cnt
+
+ if size is not None:
+ if (np.prod(size) == 0):
+ return np.empty(size, dtype=np.{{nptype}})
+
+ low_arr = <np.ndarray>np.array(low, copy=False)
+ high_arr = <np.ndarray>np.array(high, copy=False)
+ low_ndim = np.PyArray_NDIM(low_arr)
+ high_ndim = np.PyArray_NDIM(high_arr)
+ if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
+ (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
+ low = int(low_arr)
+ high = int(high_arr)
+ # Subtract 1 since internal generator produces on closed interval [low, high]
+ if not closed:
+ high -= 1
+
+ if low < {{lb}}:
+ raise ValueError("low is out of bounds for {{nptype}}")
+ if high > {{ub}}:
+ raise ValueError("high is out of bounds for {{nptype}}")
+ if low > high: # -1 already subtracted, closed interval
+ comp = '>' if closed else '>='
+ raise ValueError('low {comp} high'.format(comp=comp))
+
+ rng = <{{utype}}_t>(high - low)
+ off = <{{utype}}_t>(<{{nptype}}_t>low)
+ if size is None:
+ with lock:
+ random_bounded_{{utype}}_fill(state, off, rng, 1, use_masked, &out_val)
+ return np.{{otype}}(<{{nptype}}_t>out_val)
+ else:
+ out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
+ cnt = np.PyArray_SIZE(out_arr)
+ out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
+ with lock, nogil:
+ random_bounded_{{utype}}_fill(state, off, rng, cnt, use_masked, out_data)
+ return out_arr
+ return _rand_{{nptype}}_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
+{{endfor}}
diff --git a/numpy/random/common.pxd b/numpy/random/common.pxd
new file mode 100644
index 000000000..ac0a94bb0
--- /dev/null
+++ b/numpy/random/common.pxd
@@ -0,0 +1,114 @@
+#cython: language_level=3
+
+from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int8_t, int16_t, int32_t, int64_t, intptr_t,
+ uintptr_t)
+from libc.math cimport sqrt
+
+cdef extern from "src/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+import numpy as np
+cimport numpy as np
+
+cdef double POISSON_LAM_MAX
+cdef double LEGACY_POISSON_LAM_MAX
+cdef uint64_t MAXSIZE
+
+cdef enum ConstraintType:
+ CONS_NONE
+ CONS_NON_NEGATIVE
+ CONS_POSITIVE
+ CONS_POSITIVE_NOT_NAN
+ CONS_BOUNDED_0_1
+ CONS_BOUNDED_0_1_NOTNAN
+ CONS_BOUNDED_GT_0_1
+ CONS_GT_1
+ CONS_GTE_1
+ CONS_POISSON
+ LEGACY_CONS_POISSON
+
+ctypedef ConstraintType constraint_type
+
+cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method)
+cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output)
+cdef object prepare_cffi(bitgen_t *bitgen)
+cdef object prepare_ctypes(bitgen_t *bitgen)
+cdef int check_constraint(double val, object name, constraint_type cons) except -1
+cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
+
+cdef extern from "src/aligned_malloc/aligned_malloc.h":
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
+ cdef void *PyArray_malloc_aligned(size_t n)
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
+ cdef void PyArray_free_aligned(void *p)
+
+ctypedef double (*random_double_fill)(bitgen_t *state, np.npy_intp count, double* out) nogil
+ctypedef double (*random_double_0)(void *state) nogil
+ctypedef double (*random_double_1)(void *state, double a) nogil
+ctypedef double (*random_double_2)(void *state, double a, double b) nogil
+ctypedef double (*random_double_3)(void *state, double a, double b, double c) nogil
+
+ctypedef float (*random_float_0)(bitgen_t *state) nogil
+ctypedef float (*random_float_1)(bitgen_t *state, float a) nogil
+
+ctypedef int64_t (*random_uint_0)(void *state) nogil
+ctypedef int64_t (*random_uint_d)(void *state, double a) nogil
+ctypedef int64_t (*random_uint_dd)(void *state, double a, double b) nogil
+ctypedef int64_t (*random_uint_di)(void *state, double a, uint64_t b) nogil
+ctypedef int64_t (*random_uint_i)(void *state, int64_t a) nogil
+ctypedef int64_t (*random_uint_iii)(void *state, int64_t a, int64_t b, int64_t c) nogil
+
+ctypedef uint32_t (*random_uint_0_32)(bitgen_t *state) nogil
+ctypedef uint32_t (*random_uint_1_i_32)(bitgen_t *state, uint32_t a) nogil
+
+ctypedef int32_t (*random_int_2_i_32)(bitgen_t *state, int32_t a, int32_t b) nogil
+ctypedef int64_t (*random_int_2_i)(bitgen_t *state, int64_t a, int64_t b) nogil
+
+cdef double kahan_sum(double *darr, np.npy_intp n)
+
+cdef inline double uint64_to_double(uint64_t rnd) nogil:
+ return (rnd >> 11) * (1.0 / 9007199254740992.0)
+
+cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out)
+
+cdef object wrap_int(object val, object bits)
+
+cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size)
+
+cdef object cont(void *func, void *state, object size, object lock, int narg,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint,
+ object out)
+
+cdef object disc(void *func, void *state, object size, object lock,
+ int narg_double, int narg_int64,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint)
+
+cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
+ object a, object a_name, constraint_type a_constraint,
+ object out)
+
+cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
+
+cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint)
diff --git a/numpy/random/common.pyx b/numpy/random/common.pyx
new file mode 100644
index 000000000..74cd5f033
--- /dev/null
+++ b/numpy/random/common.pyx
@@ -0,0 +1,976 @@
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+from collections import namedtuple
+from cpython cimport PyFloat_AsDouble
+import sys
+import numpy as np
+cimport numpy as np
+
+from .common cimport *
+
+__all__ = ['interface']
+
+np.import_array()
+
+interface = namedtuple('interface', ['state_address', 'state', 'next_uint64',
+ 'next_uint32', 'next_double',
+ 'bit_generator'])
+
+cdef double LEGACY_POISSON_LAM_MAX = <double>np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10
+cdef double POISSON_LAM_MAX = <double>np.iinfo('int64').max - np.sqrt(np.iinfo('int64').max)*10
+
+cdef uint64_t MAXSIZE = <uint64_t>sys.maxsize
+
+
+cdef object benchmark(bitgen_t *bitgen, object lock, Py_ssize_t cnt, object method):
+ """Benchmark command used by BitGenerator"""
+ cdef Py_ssize_t i
+ if method==u'uint64':
+ with lock, nogil:
+ for i in range(cnt):
+ bitgen.next_uint64(bitgen.state)
+ elif method==u'double':
+ with lock, nogil:
+ for i in range(cnt):
+ bitgen.next_double(bitgen.state)
+ else:
+ raise ValueError('Unknown method')
+
+
+cdef object random_raw(bitgen_t *bitgen, object lock, object size, object output):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying PRNG
+
+ Parameters
+ ----------
+ bitgen : BitGenerator
+ Address of the bit generator struct
+ lock : Threading.Lock
+ Lock provided by the bit generator
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ cdef np.ndarray randoms
+ cdef uint64_t *randoms_data
+ cdef Py_ssize_t i, n
+
+ if not output:
+ if size is None:
+ with lock:
+ bitgen.next_raw(bitgen.state)
+ return None
+ n = np.asarray(size).sum()
+ with lock, nogil:
+ for i in range(n):
+ bitgen.next_raw(bitgen.state)
+ return None
+
+ if size is None:
+ with lock:
+ return bitgen.next_raw(bitgen.state)
+
+ randoms = <np.ndarray>np.empty(size, np.uint64)
+ randoms_data = <uint64_t*>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ with lock, nogil:
+ for i in range(n):
+ randoms_data[i] = bitgen.next_raw(bitgen.state)
+ return randoms
+
+cdef object prepare_cffi(bitgen_t *bitgen):
+ """
+ Bundles the interfaces to interact with a BitGenerator using cffi
+
+ Parameters
+ ----------
+ bitgen : pointer
+ A pointer to a BitGenerator instance
+
+ Returns
+ -------
+ interface : namedtuple
+ The functions required to interface with the BitGenerator using cffi
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bit_generator - pointer to the BitGenerator struct
+ """
+ try:
+ import cffi
+ except ImportError:
+ raise ImportError('cffi cannot be imported.')
+
+ ffi = cffi.FFI()
+ _cffi = interface(<uintptr_t>bitgen.state,
+ ffi.cast('void *', <uintptr_t>bitgen.state),
+ ffi.cast('uint64_t (*)(void *)', <uintptr_t>bitgen.next_uint64),
+ ffi.cast('uint32_t (*)(void *)', <uintptr_t>bitgen.next_uint32),
+ ffi.cast('double (*)(void *)', <uintptr_t>bitgen.next_double),
+ ffi.cast('void *', <uintptr_t>bitgen))
+ return _cffi
+
+cdef object prepare_ctypes(bitgen_t *bitgen):
+ """
+ Bundles the interfaces to interact with a BitGenerator using ctypes
+
+ Parameters
+ ----------
+ bitgen : pointer
+ A pointer to a BitGenerator instance
+
+ Returns
+ -------
+ interface : namedtuple
+ The functions required to interface with the BitGenerator using ctypes:
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * bit_generator - pointer to the BitGenerator struct
+ """
+ import ctypes
+
+ _ctypes = interface(<uintptr_t>bitgen.state,
+ ctypes.c_void_p(<uintptr_t>bitgen.state),
+ ctypes.cast(<uintptr_t>bitgen.next_uint64,
+ ctypes.CFUNCTYPE(ctypes.c_uint64,
+ ctypes.c_void_p)),
+ ctypes.cast(<uintptr_t>bitgen.next_uint32,
+ ctypes.CFUNCTYPE(ctypes.c_uint32,
+ ctypes.c_void_p)),
+ ctypes.cast(<uintptr_t>bitgen.next_double,
+ ctypes.CFUNCTYPE(ctypes.c_double,
+ ctypes.c_void_p)),
+ ctypes.c_void_p(<uintptr_t>bitgen))
+ return _ctypes
+
+cdef double kahan_sum(double *darr, np.npy_intp n):
+ cdef double c, y, t, sum
+ cdef np.npy_intp i
+ sum = darr[0]
+ c = 0.0
+ for i in range(1, n):
+ y = darr[i] - c
+ t = sum + y
+ c = (t-sum) - y
+ sum = t
+ return sum
+
+
+cdef object wrap_int(object val, object bits):
+ """Wraparound to place an integer into the interval [0, 2**bits)"""
+ mask = ~(~int(0) << bits)
+ return val & mask
+
+
+cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size):
+ """Convert a large integer to an array of unsigned integers"""
+ len = bits // uint_size
+ value = np.asarray(value)
+ if uint_size == 32:
+ dtype = np.uint32
+ elif uint_size == 64:
+ dtype = np.uint64
+ else:
+ raise ValueError('Unknown uint_size')
+ if value.shape == ():
+ value = int(value)
+ upper = int(2)**int(bits)
+ if value < 0 or value >= upper:
+ raise ValueError('{name} must be positive and '
+ 'less than 2**{bits}.'.format(name=name, bits=bits))
+
+ out = np.empty(len, dtype=dtype)
+ for i in range(len):
+ out[i] = value % 2**int(uint_size)
+ value >>= int(uint_size)
+ else:
+ out = value.astype(dtype)
+ if out.shape != (len,):
+ raise ValueError('{name} must have {len} elements when using '
+ 'array form'.format(name=name, len=len))
+ return out
+
+
+cdef check_output(object out, object dtype, object size):
+ if out is None:
+ return
+ cdef np.ndarray out_array = <np.ndarray>out
+ if not (np.PyArray_CHKFLAGS(out_array, np.NPY_CARRAY) or
+ np.PyArray_CHKFLAGS(out_array, np.NPY_FARRAY)):
+ raise ValueError('Supplied output array is not contiguous, writable or aligned.')
+ if out_array.dtype != dtype:
+ raise TypeError('Supplied output array has the wrong type. '
+ 'Expected {0}, got {1}'.format(np.dtype(dtype), out_array.dtype))
+ if size is not None:
+ try:
+ tup_size = tuple(size)
+ except TypeError:
+ tup_size = tuple([size])
+ if tup_size != out.shape:
+ raise ValueError('size must match out.shape when used together')
+
+
+cdef object double_fill(void *func, bitgen_t *state, object size, object lock, object out):
+ cdef random_double_fill random_func = (<random_double_fill>func)
+ cdef double out_val
+ cdef double *out_array_data
+ cdef np.ndarray out_array
+ cdef np.npy_intp i, n
+
+ if size is None and out is None:
+ with lock:
+ random_func(state, 1, &out_val)
+ return out_val
+
+ if out is not None:
+ check_output(out, np.float64, size)
+ out_array = <np.ndarray>out
+ else:
+ out_array = <np.ndarray>np.empty(size, np.double)
+
+ n = np.PyArray_SIZE(out_array)
+ out_array_data = <double *>np.PyArray_DATA(out_array)
+ with lock, nogil:
+ random_func(state, n, out_array_data)
+ return out_array
+
+cdef object float_fill(void *func, bitgen_t *state, object size, object lock, object out):
+ cdef random_float_0 random_func = (<random_float_0>func)
+ cdef float *out_array_data
+ cdef np.ndarray out_array
+ cdef np.npy_intp i, n
+
+ if size is None and out is None:
+ with lock:
+ return random_func(state)
+
+ if out is not None:
+ check_output(out, np.float32, size)
+ out_array = <np.ndarray>out
+ else:
+ out_array = <np.ndarray>np.empty(size, np.float32)
+
+ n = np.PyArray_SIZE(out_array)
+ out_array_data = <float *>np.PyArray_DATA(out_array)
+ with lock, nogil:
+ for i in range(n):
+ out_array_data[i] = random_func(state)
+ return out_array
+
+cdef object float_fill_from_double(void *func, bitgen_t *state, object size, object lock, object out):
+ cdef random_double_0 random_func = (<random_double_0>func)
+ cdef float *out_array_data
+ cdef np.ndarray out_array
+ cdef np.npy_intp i, n
+
+ if size is None and out is None:
+ with lock:
+ return <float>random_func(state)
+
+ if out is not None:
+ check_output(out, np.float32, size)
+ out_array = <np.ndarray>out
+ else:
+ out_array = <np.ndarray>np.empty(size, np.float32)
+
+ n = np.PyArray_SIZE(out_array)
+ out_array_data = <float *>np.PyArray_DATA(out_array)
+ with lock, nogil:
+ for i in range(n):
+ out_array_data[i] = <float>random_func(state)
+ return out_array
+
+
+cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1:
+ if cons == CONS_NON_NEGATIVE:
+ if np.any(np.logical_and(np.logical_not(np.isnan(val)), np.signbit(val))):
+ raise ValueError(name + " < 0")
+ elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
+ if cons == CONS_POSITIVE_NOT_NAN and np.any(np.isnan(val)):
+ raise ValueError(name + " must not be NaN")
+ elif np.any(np.less_equal(val, 0)):
+ raise ValueError(name + " <= 0")
+ elif cons == CONS_BOUNDED_0_1:
+ if not np.all(np.greater_equal(val, 0)) or \
+ not np.all(np.less_equal(val, 1)):
+ raise ValueError("{0} < 0, {0} > 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_BOUNDED_GT_0_1:
+ if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)):
+ raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_GT_1:
+ if not np.all(np.greater(val, 1)):
+ raise ValueError("{0} <= 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_GTE_1:
+ if not np.all(np.greater_equal(val, 1)):
+ raise ValueError("{0} < 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_POISSON:
+ if not np.all(np.less_equal(val, POISSON_LAM_MAX)):
+ raise ValueError("{0} value too large".format(name))
+ elif not np.all(np.greater_equal(val, 0.0)):
+ raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
+ elif cons == LEGACY_CONS_POISSON:
+ if not np.all(np.less_equal(val, LEGACY_POISSON_LAM_MAX)):
+ raise ValueError("{0} value too large".format(name))
+ elif not np.all(np.greater_equal(val, 0.0)):
+ raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
+
+ return 0
+
+
+cdef int check_constraint(double val, object name, constraint_type cons) except -1:
+ cdef bint is_nan
+ if cons == CONS_NON_NEGATIVE:
+ if not np.isnan(val) and np.signbit(val):
+ raise ValueError(name + " < 0")
+ elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
+ if cons == CONS_POSITIVE_NOT_NAN and np.isnan(val):
+ raise ValueError(name + " must not be NaN")
+ elif val <= 0:
+ raise ValueError(name + " <= 0")
+ elif cons == CONS_BOUNDED_0_1:
+ if not (val >= 0) or not (val <= 1):
+ raise ValueError("{0} < 0, {0} > 1 or {0} is NaN".format(name))
+ elif cons == CONS_BOUNDED_GT_0_1:
+ if not val >0 or not val <= 1:
+ raise ValueError("{0} <= 0, {0} > 1 or {0} contains NaNs".format(name))
+ elif cons == CONS_GT_1:
+ if not (val > 1):
+ raise ValueError("{0} <= 1 or {0} is NaN".format(name))
+ elif cons == CONS_GTE_1:
+ if not (val >= 1):
+ raise ValueError("{0} < 1 or {0} is NaN".format(name))
+ elif cons == CONS_POISSON:
+ if not (val >= 0):
+ raise ValueError("{0} < 0 or {0} is NaN".format(name))
+ elif not (val <= POISSON_LAM_MAX):
+ raise ValueError(name + " value too large")
+ elif cons == LEGACY_CONS_POISSON:
+ if not (val >= 0):
+ raise ValueError("{0} < 0 or {0} is NaN".format(name))
+ elif not (val <= LEGACY_POISSON_LAM_MAX):
+ raise ValueError(name + " value too large")
+
+ return 0
+
+cdef object cont_broadcast_1(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ object out):
+
+ cdef np.ndarray randoms
+ cdef double a_val
+ cdef double *randoms_data
+ cdef np.broadcast it
+ cdef random_double_1 f = (<random_double_1>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None and out is None:
+ randoms = <np.ndarray>np.empty(size, np.double)
+ elif out is None:
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_DOUBLE)
+ else:
+ randoms = <np.ndarray>out
+
+ randoms_data = <double *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont_broadcast_2(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint):
+ cdef np.ndarray randoms
+ cdef double a_val, b_val
+ cdef double *randoms_data
+ cdef np.broadcast it
+ cdef random_double_2 f = (<random_double_2>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.double)
+ else:
+ it = np.PyArray_MultiIterNew2(a_arr, b_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.double)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
+
+ randoms_data = <double *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ randoms_data[i] = f(state, a_val, b_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint):
+ cdef np.ndarray randoms
+ cdef double a_val, b_val, c_val
+ cdef double *randoms_data
+ cdef np.broadcast it
+ cdef random_double_3 f = (<random_double_3>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if c_constraint != CONS_NONE:
+ check_array_constraint(c_arr, c_name, c_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.double)
+ else:
+ it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
+ randoms = <np.ndarray>np.empty(it.shape, np.double)
+
+ randoms_data = <double *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew4(randoms, a_arr, b_arr, c_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ c_val = (<double*>np.PyArray_MultiIter_DATA(it, 3))[0]
+ randoms_data[i] = f(state, a_val, b_val, c_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont(void *func, void *state, object size, object lock, int narg,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint,
+ object out):
+
+ cdef np.ndarray a_arr, b_arr, c_arr
+ cdef double _a = 0.0, _b = 0.0, _c = 0.0
+ cdef bint is_scalar = True
+ check_output(out, np.float64, size)
+ if narg > 0:
+ a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
+ if narg > 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ if narg == 3:
+ c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
+
+ if not is_scalar:
+ if narg == 1:
+ return cont_broadcast_1(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ out)
+ elif narg == 2:
+ return cont_broadcast_2(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint)
+ else:
+ return cont_broadcast_3(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint,
+ c_arr, c_name, c_constraint)
+
+ if narg > 0:
+ _a = PyFloat_AsDouble(a)
+ if a_constraint != CONS_NONE and is_scalar:
+ check_constraint(_a, a_name, a_constraint)
+ if narg > 1:
+ _b = PyFloat_AsDouble(b)
+ if b_constraint != CONS_NONE:
+ check_constraint(_b, b_name, b_constraint)
+ if narg == 3:
+ _c = PyFloat_AsDouble(c)
+ if c_constraint != CONS_NONE and is_scalar:
+ check_constraint(_c, c_name, c_constraint)
+
+ if size is None and out is None:
+ with lock:
+ if narg == 0:
+ return (<random_double_0>func)(state)
+ elif narg == 1:
+ return (<random_double_1>func)(state, _a)
+ elif narg == 2:
+ return (<random_double_2>func)(state, _a, _b)
+ elif narg == 3:
+ return (<random_double_3>func)(state, _a, _b, _c)
+
+ cdef np.npy_intp i, n
+ cdef np.ndarray randoms
+ if out is None:
+ randoms = <np.ndarray>np.empty(size)
+ else:
+ randoms = <np.ndarray>out
+ n = np.PyArray_SIZE(randoms)
+
+ cdef double *randoms_data = <double *>np.PyArray_DATA(randoms)
+ cdef random_double_0 f0
+ cdef random_double_1 f1
+ cdef random_double_2 f2
+ cdef random_double_3 f3
+
+ with lock, nogil:
+ if narg == 0:
+ f0 = (<random_double_0>func)
+ for i in range(n):
+ randoms_data[i] = f0(state)
+ elif narg == 1:
+ f1 = (<random_double_1>func)
+ for i in range(n):
+ randoms_data[i] = f1(state, _a)
+ elif narg == 2:
+ f2 = (<random_double_2>func)
+ for i in range(n):
+ randoms_data[i] = f2(state, _a, _b)
+ elif narg == 3:
+ f3 = (<random_double_3>func)
+ for i in range(n):
+ randoms_data[i] = f3(state, _a, _b, _c)
+
+ if out is None:
+ return randoms
+ else:
+ return out
+
+cdef object discrete_broadcast_d(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint):
+
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_d f = (<random_uint_d>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None:
+ randoms = np.empty(size, np.int64)
+ else:
+ # randoms = np.empty(np.shape(a_arr), np.double)
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_dd(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_dd f = (<random_uint_dd>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(a_arr, b_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_INT64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ randoms_data[i] = f(state, a_val, b_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_di(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_di f = (<random_uint_di>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(a_arr, b_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, a_arr, b_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = f(state, a_val, b_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_iii(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ np.ndarray b_arr, object b_name, constraint_type b_constraint,
+ np.ndarray c_arr, object c_name, constraint_type c_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_iii f = (<random_uint_iii>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if b_constraint != CONS_NONE:
+ check_array_constraint(b_arr, b_name, b_constraint)
+
+ if c_constraint != CONS_NONE:
+ check_array_constraint(c_arr, c_name, c_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew4(randoms, a_arr, b_arr, c_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ b_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ c_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 3))[0]
+ randoms_data[i] = f(state, a_val, b_val, c_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object discrete_broadcast_i(void *func, void *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint):
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+ cdef np.broadcast it
+ cdef random_uint_i f = (<random_uint_i>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
+
+ randoms_data = <int64_t *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+# Needs double <vec>, double-double <vec>, double-int64_t<vec>, int64_t <vec>, int64_t-int64_t-int64_t
+cdef object disc(void *func, void *state, object size, object lock,
+ int narg_double, int narg_int64,
+ object a, object a_name, constraint_type a_constraint,
+ object b, object b_name, constraint_type b_constraint,
+ object c, object c_name, constraint_type c_constraint):
+
+ cdef double _da = 0, _db = 0
+ cdef int64_t _ia = 0, _ib = 0, _ic = 0
+ cdef bint is_scalar = True
+ if narg_double > 0:
+ a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
+ if narg_double > 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ elif narg_int64 == 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ else:
+ if narg_int64 > 0:
+ a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(a_arr) == 0
+ if narg_int64 > 1:
+ b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
+ if narg_int64 > 2:
+ c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
+
+ if not is_scalar:
+ if narg_int64 == 0:
+ if narg_double == 1:
+ return discrete_broadcast_d(func, state, size, lock,
+ a_arr, a_name, a_constraint)
+ elif narg_double == 2:
+ return discrete_broadcast_dd(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint)
+ elif narg_int64 == 1:
+ if narg_double == 0:
+ return discrete_broadcast_i(func, state, size, lock,
+ a_arr, a_name, a_constraint)
+ elif narg_double == 1:
+ return discrete_broadcast_di(func, state, size, lock,
+ a_arr, a_name, a_constraint,
+ b_arr, b_name, b_constraint)
+ else:
+ raise NotImplementedError("No vector path available")
+
+ if narg_double > 0:
+ _da = PyFloat_AsDouble(a)
+ if a_constraint != CONS_NONE and is_scalar:
+ check_constraint(_da, a_name, a_constraint)
+
+ if narg_double > 1:
+ _db = PyFloat_AsDouble(b)
+ if b_constraint != CONS_NONE and is_scalar:
+ check_constraint(_db, b_name, b_constraint)
+ elif narg_int64 == 1:
+ _ib = <int64_t>b
+ if b_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ib, b_name, b_constraint)
+ else:
+ if narg_int64 > 0:
+ _ia = <int64_t>a
+ if a_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ia, a_name, a_constraint)
+ if narg_int64 > 1:
+ _ib = <int64_t>b
+ if b_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ib, b_name, b_constraint)
+ if narg_int64 > 2:
+ _ic = <int64_t>c
+ if c_constraint != CONS_NONE and is_scalar:
+ check_constraint(<double>_ic, c_name, c_constraint)
+
+ if size is None:
+ with lock:
+ if narg_int64 == 0:
+ if narg_double == 0:
+ return (<random_uint_0>func)(state)
+ elif narg_double == 1:
+ return (<random_uint_d>func)(state, _da)
+ elif narg_double == 2:
+ return (<random_uint_dd>func)(state, _da, _db)
+ elif narg_int64 == 1:
+ if narg_double == 0:
+ return (<random_uint_i>func)(state, _ia)
+ if narg_double == 1:
+ return (<random_uint_di>func)(state, _da, _ib)
+ else:
+ return (<random_uint_iii>func)(state, _ia, _ib, _ic)
+
+ cdef np.npy_intp i, n
+ cdef np.ndarray randoms = <np.ndarray>np.empty(size, np.int64)
+ cdef np.int64_t *randoms_data
+ cdef random_uint_0 f0
+ cdef random_uint_d fd
+ cdef random_uint_dd fdd
+ cdef random_uint_di fdi
+ cdef random_uint_i fi
+ cdef random_uint_iii fiii
+
+ n = np.PyArray_SIZE(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+
+ with lock, nogil:
+ if narg_int64 == 0:
+ if narg_double == 0:
+ f0 = (<random_uint_0>func)
+ for i in range(n):
+ randoms_data[i] = f0(state)
+ elif narg_double == 1:
+ fd = (<random_uint_d>func)
+ for i in range(n):
+ randoms_data[i] = fd(state, _da)
+ elif narg_double == 2:
+ fdd = (<random_uint_dd>func)
+ for i in range(n):
+ randoms_data[i] = fdd(state, _da, _db)
+ elif narg_int64 == 1:
+ if narg_double == 0:
+ fi = (<random_uint_i>func)
+ for i in range(n):
+ randoms_data[i] = fi(state, _ia)
+ if narg_double == 1:
+ fdi = (<random_uint_di>func)
+ for i in range(n):
+ randoms_data[i] = fdi(state, _da, _ib)
+ else:
+ fiii = (<random_uint_iii>func)
+ for i in range(n):
+ randoms_data[i] = fiii(state, _ia, _ib, _ic)
+
+ return randoms
+
+
+cdef object cont_broadcast_1_f(void *func, bitgen_t *state, object size, object lock,
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ object out):
+
+ cdef np.ndarray randoms
+ cdef float a_val
+ cdef float *randoms_data
+ cdef np.broadcast it
+ cdef random_float_1 f = (<random_float_1>func)
+ cdef np.npy_intp i, n
+
+ if a_constraint != CONS_NONE:
+ check_array_constraint(a_arr, a_name, a_constraint)
+
+ if size is not None and out is None:
+ randoms = <np.ndarray>np.empty(size, np.float32)
+ elif out is None:
+ randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr),
+ np.PyArray_DIMS(a_arr),
+ np.NPY_FLOAT32)
+ else:
+ randoms = <np.ndarray>out
+
+ randoms_data = <float *>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+ it = np.PyArray_MultiIterNew2(randoms, a_arr)
+
+ with lock, nogil:
+ for i in range(n):
+ a_val = (<float*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ randoms_data[i] = f(state, a_val)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+cdef object cont_f(void *func, bitgen_t *state, object size, object lock,
+ object a, object a_name, constraint_type a_constraint,
+ object out):
+
+ cdef np.ndarray a_arr, b_arr, c_arr
+ cdef float _a
+ cdef bint is_scalar = True
+ cdef int requirements = np.NPY_ALIGNED | np.NPY_FORCECAST
+ check_output(out, np.float32, size)
+ a_arr = <np.ndarray>np.PyArray_FROMANY(a, np.NPY_FLOAT32, 0, 0, requirements)
+ is_scalar = np.PyArray_NDIM(a_arr) == 0
+
+ if not is_scalar:
+ return cont_broadcast_1_f(func, state, size, lock, a_arr, a_name, a_constraint, out)
+
+ _a = <float>PyFloat_AsDouble(a)
+ if a_constraint != CONS_NONE:
+ check_constraint(_a, a_name, a_constraint)
+
+ if size is None and out is None:
+ with lock:
+ return (<random_float_1>func)(state, _a)
+
+ cdef np.npy_intp i, n
+ cdef np.ndarray randoms
+ if out is None:
+ randoms = <np.ndarray>np.empty(size, np.float32)
+ else:
+ randoms = <np.ndarray>out
+ n = np.PyArray_SIZE(randoms)
+
+ cdef float *randoms_data = <float *>np.PyArray_DATA(randoms)
+ cdef random_float_1 f1 = <random_float_1>func
+
+ with lock, nogil:
+ for i in range(n):
+ randoms_data[i] = f1(state, _a)
+
+ if out is None:
+ return randoms
+ else:
+ return out
diff --git a/numpy/random/distributions.pxd b/numpy/random/distributions.pxd
new file mode 100644
index 000000000..75edaee9d
--- /dev/null
+++ b/numpy/random/distributions.pxd
@@ -0,0 +1,140 @@
+#cython: language_level=3
+
+from .common cimport (uint8_t, uint16_t, uint32_t, uint64_t,
+ int32_t, int64_t, bitgen_t)
+import numpy as np
+cimport numpy as np
+
+cdef extern from "src/distributions/distributions.h":
+
+ struct s_binomial_t:
+ int has_binomial
+ double psave
+ int64_t nsave
+ double r
+ double q
+ double fm
+ int64_t m
+ double p1
+ double xm
+ double xl
+ double xr
+ double c
+ double laml
+ double lamr
+ double p2
+ double p3
+ double p4
+
+ ctypedef s_binomial_t binomial_t
+
+ double random_double(bitgen_t *bitgen_state) nogil
+ void random_double_fill(bitgen_t* bitgen_state, np.npy_intp cnt, double *out) nogil
+ double random_standard_exponential(bitgen_t *bitgen_state) nogil
+ void random_standard_exponential_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
+ double random_standard_exponential_zig(bitgen_t *bitgen_state) nogil
+ void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, np.npy_intp cnt, double *out) nogil
+ double random_gauss_zig(bitgen_t* bitgen_state) nogil
+ void random_gauss_zig_fill(bitgen_t *bitgen_state, np.npy_intp count, double *out) nogil
+ double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) nogil
+
+ float random_float(bitgen_t *bitgen_state) nogil
+ float random_standard_exponential_f(bitgen_t *bitgen_state) nogil
+ float random_standard_exponential_zig_f(bitgen_t *bitgen_state) nogil
+ float random_gauss_zig_f(bitgen_t* bitgen_state) nogil
+ float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) nogil
+ float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) nogil
+
+ int64_t random_positive_int64(bitgen_t *bitgen_state) nogil
+ int32_t random_positive_int32(bitgen_t *bitgen_state) nogil
+ int64_t random_positive_int(bitgen_t *bitgen_state) nogil
+ uint64_t random_uint(bitgen_t *bitgen_state) nogil
+
+ double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) nogil
+
+ double random_gamma(bitgen_t *bitgen_state, double shape, double scale) nogil
+ float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) nogil
+
+ double random_exponential(bitgen_t *bitgen_state, double scale) nogil
+ double random_uniform(bitgen_t *bitgen_state, double lower, double range) nogil
+ double random_beta(bitgen_t *bitgen_state, double a, double b) nogil
+ double random_chisquare(bitgen_t *bitgen_state, double df) nogil
+ double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) nogil
+ double random_standard_cauchy(bitgen_t *bitgen_state) nogil
+ double random_pareto(bitgen_t *bitgen_state, double a) nogil
+ double random_weibull(bitgen_t *bitgen_state, double a) nogil
+ double random_power(bitgen_t *bitgen_state, double a) nogil
+ double random_laplace(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_logistic(bitgen_t *bitgen_state, double loc, double scale) nogil
+ double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) nogil
+ double random_rayleigh(bitgen_t *bitgen_state, double mode) nogil
+ double random_standard_t(bitgen_t *bitgen_state, double df) nogil
+ double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc) nogil
+ double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc) nogil
+ double random_wald(bitgen_t *bitgen_state, double mean, double scale) nogil
+ double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) nogil
+ double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) nogil
+
+ int64_t random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t random_negative_binomial(bitgen_t *bitgen_state, double n, double p) nogil
+ int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n, binomial_t *binomial) nogil
+ int64_t random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_search(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric_inversion(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_geometric(bitgen_t *bitgen_state, double p) nogil
+ int64_t random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad,
+ int64_t sample) nogil
+
+ uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) nogil
+
+ # Generate random uint64 numbers in closed interval [off, off + rng].
+ uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng,
+ uint64_t mask, bint use_masked) nogil
+
+ # Generate random uint32 numbers in closed interval [off, off + rng].
+ uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state,
+ uint8_t off, uint8_t rng,
+ uint8_t mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+ np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state,
+ np.npy_bool off, np.npy_bool rng,
+ np.npy_bool mask, bint use_masked,
+ int *bcnt, uint32_t *buf) nogil
+
+ void random_bounded_uint64_fill(bitgen_t *bitgen_state,
+ uint64_t off, uint64_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint64_t *out) nogil
+ void random_bounded_uint32_fill(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint32_t *out) nogil
+ void random_bounded_uint16_fill(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint16_t *out) nogil
+ void random_bounded_uint8_fill(bitgen_t *bitgen_state,
+ uint8_t off, uint8_t rng, np.npy_intp cnt,
+ bint use_masked,
+ uint8_t *out) nogil
+ void random_bounded_bool_fill(bitgen_t *bitgen_state,
+ np.npy_bool off, np.npy_bool rng, np.npy_intp cnt,
+ bint use_masked,
+ np.npy_bool *out) nogil
+
+ void random_multinomial(bitgen_t *bitgen_state, int64_t n, int64_t *mnix,
+ double *pix, np.npy_intp d, binomial_t *binomial) nogil
diff --git a/numpy/random/examples/cython/extending.pyx b/numpy/random/examples/cython/extending.pyx
new file mode 100644
index 000000000..a6a4ba4bf
--- /dev/null
+++ b/numpy/random/examples/cython/extending.pyx
@@ -0,0 +1,78 @@
+#!/usr/bin/env python
+#cython: language_level=3
+
+from libc.stdint cimport uint32_t
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+
+import numpy as np
+cimport numpy as np
+cimport cython
+
+from numpy.random.common cimport bitgen_t
+from numpy.random import PCG64
+
+np.import_array()
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniform_mean(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+ cdef np.ndarray randoms
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ # Best practice is to acquire the lock whenever generating random values.
+ # This prevents other threads from modifying the state. Acquiring the lock
+ # is only necessary if if the GIL is also released, as in this example.
+ with x.lock, nogil:
+ for i in range(n):
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+ return randoms.mean()
+
+
+# This function is declated nogil so it can be used without the GIL below
+cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
+ cdef uint32_t mask, delta, val
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = rng.next_uint32(rng.state) & mask
+ while val > delta:
+ val = rng.next_uint32(rng.state) & mask
+
+ return lb + val
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef uint32_t[::1] out
+ cdef const char *capsule_name = "BitGenerator"
+
+ x = PCG64()
+ out = np.empty(n, dtype=np.uint32)
+ capsule = x.capsule
+
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *>PyCapsule_GetPointer(capsule, capsule_name)
+
+ with x.lock, nogil:
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, rng)
+ return np.asarray(out)
diff --git a/numpy/random/examples/cython/extending_distributions.pyx b/numpy/random/examples/cython/extending_distributions.pyx
new file mode 100644
index 000000000..3cefec97e
--- /dev/null
+++ b/numpy/random/examples/cython/extending_distributions.pyx
@@ -0,0 +1,59 @@
+#!/usr/bin/env python
+#cython: language_level=3
+"""
+This file shows how the distributions that are accessed through
+distributions.pxd can be used Cython code.
+"""
+import numpy as np
+cimport numpy as np
+cimport cython
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from numpy.random.common cimport *
+from numpy.random.distributions cimport random_gauss_zig
+from numpy.random import PCG64
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def normals_zig(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ # Best practice is to release GIL and acquire the lock
+ with x.lock, nogil:
+ for i in range(n):
+ random_values[i] = random_gauss_zig(rng)
+ randoms = np.asarray(random_values)
+ return randoms
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniforms(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef bitgen_t *rng
+ cdef const char *capsule_name = "BitGenerator"
+ cdef double[::1] random_values
+
+ x = PCG64()
+ capsule = x.capsule
+ # Optional check that the capsule if from a BitGenerator
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ with x.lock, nogil:
+ for i in range(n):
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+ return randoms
diff --git a/numpy/random/examples/cython/setup.py b/numpy/random/examples/cython/setup.py
new file mode 100644
index 000000000..69f057ed5
--- /dev/null
+++ b/numpy/random/examples/cython/setup.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python3
+"""
+Build the demos
+
+Usage: python setup.py build_ext -i
+"""
+
+import numpy as np
+from distutils.core import setup
+from Cython.Build import cythonize
+from setuptools.extension import Extension
+from os.path import join
+
+extending = Extension("extending",
+ sources=['extending.pyx'],
+ include_dirs=[np.get_include()])
+distributions = Extension("extending_distributions",
+ sources=['extending_distributions.pyx',
+ join('..', '..', 'src',
+ 'distributions', 'distributions.c')],
+ include_dirs=[np.get_include()])
+
+extensions = [extending, distributions]
+
+setup(
+ ext_modules=cythonize(extensions)
+)
diff --git a/numpy/random/examples/numba/extending.py b/numpy/random/examples/numba/extending.py
new file mode 100644
index 000000000..d41c2d76f
--- /dev/null
+++ b/numpy/random/examples/numba/extending.py
@@ -0,0 +1,77 @@
+import datetime as dt
+
+import numpy as np
+import numba as nb
+
+from numpy.random import PCG64
+
+x = PCG64()
+f = x.ctypes.next_uint32
+s = x.ctypes.state
+
+
+@nb.jit(nopython=True)
+def bounded_uint(lb, ub, state):
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = f(state) & mask
+ while val > delta:
+ val = f(state) & mask
+
+ return lb + val
+
+
+print(bounded_uint(323, 2394691, s.value))
+
+
+@nb.jit(nopython=True)
+def bounded_uints(lb, ub, n, state):
+ out = np.empty(n, dtype=np.uint32)
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, state)
+
+
+bounded_uints(323, 2394691, 10000000, s.value)
+
+g = x.cffi.next_double
+cffi_state = x.cffi.state
+state_addr = x.cffi.state_address
+
+
+def normals(n, state):
+ out = np.empty(n)
+ for i in range((n + 1) // 2):
+ x1 = 2.0 * g(state) - 1.0
+ x2 = 2.0 * g(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * g(state) - 1.0
+ x2 = 2.0 * g(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ out[2 * i] = f * x1
+ if 2 * i + 1 < n:
+ out[2 * i + 1] = f * x2
+ return out
+
+
+print(normals(10, cffi_state).var())
+# Warm up
+normalsj = nb.jit(normals, nopython=True)
+normalsj(1, state_addr)
+
+start = dt.datetime.now()
+normalsj(1000000, state_addr)
+ms = 1000 * (dt.datetime.now() - start).total_seconds()
+print('1,000,000 Polar-transform (numba/PCG64) randoms in '
+ '{ms:0.1f}ms'.format(ms=ms))
+
+start = dt.datetime.now()
+np.random.standard_normal(1000000)
+ms = 1000 * (dt.datetime.now() - start).total_seconds()
+print('1,000,000 Polar-transform (NumPy) randoms in {ms:0.1f}ms'.format(ms=ms))
diff --git a/numpy/random/examples/numba/extending_distributions.py b/numpy/random/examples/numba/extending_distributions.py
new file mode 100644
index 000000000..9233ccced
--- /dev/null
+++ b/numpy/random/examples/numba/extending_distributions.py
@@ -0,0 +1,61 @@
+r"""
+On *nix, execute in randomgen/src/distributions
+
+export PYTHON_INCLUDE=#path to Python's include folder, usually \
+ ${PYTHON_HOME}/include/python${PYTHON_VERSION}m
+export NUMPY_INCLUDE=#path to numpy's include folder, usually \
+ ${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
+gcc -shared -o libdistributions.so -fPIC distributions.c \
+ -I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
+mv libdistributions.so ../../examples/numba/
+
+On Windows
+
+rem PYTHON_HOME is setup dependent, this is an example
+set PYTHON_HOME=c:\Anaconda
+cl.exe /LD .\distributions.c -DDLL_EXPORT \
+ -I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python36.lib
+move distributions.dll ../../examples/numba/
+"""
+import os
+
+import numba as nb
+import numpy as np
+from cffi import FFI
+
+from numpy.random import PCG64
+
+ffi = FFI()
+if os.path.exists('./distributions.dll'):
+ lib = ffi.dlopen('./distributions.dll')
+elif os.path.exists('./libdistributions.so'):
+ lib = ffi.dlopen('./libdistributions.so')
+else:
+ raise RuntimeError('Required DLL/so file was not found.')
+
+ffi.cdef("""
+double random_gauss_zig(void *bitgen_state);
+""")
+x = PCG64()
+xffi = x.cffi
+bit_generator = xffi.bit_generator
+
+random_gauss_zig = lib.random_gauss_zig
+
+
+def normals(n, bit_generator):
+ out = np.empty(n)
+ for i in range(n):
+ out[i] = random_gauss_zig(bit_generator)
+ return out
+
+
+normalsj = nb.jit(normals, nopython=True)
+
+# Numba requires a memory address for void *
+# Can also get address from x.ctypes.bit_generator.value
+bit_generator_address = int(ffi.cast('uintptr_t', bit_generator))
+
+norm = normalsj(1000, bit_generator_address)
+print(norm[:12])
diff --git a/numpy/random/generator.pyx b/numpy/random/generator.pyx
new file mode 100644
index 000000000..df7485a97
--- /dev/null
+++ b/numpy/random/generator.pyx
@@ -0,0 +1,4030 @@
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+import operator
+import warnings
+
+import numpy as np
+from numpy.core.multiarray import normalize_axis_index
+
+from .bounded_integers import _integers_types
+from .pcg64 import PCG64
+
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from cpython cimport (Py_INCREF, PyFloat_AsDouble)
+from libc cimport string
+
+cimport cython
+cimport numpy as np
+
+from .bounded_integers cimport *
+from .common cimport *
+from .distributions cimport *
+
+
+__all__ = ['Generator', 'beta', 'binomial', 'bytes', 'chisquare', 'choice',
+ 'dirichlet', 'exponential', 'f', 'gamma',
+ 'geometric', 'gumbel', 'hypergeometric', 'integers', 'laplace',
+ 'logistic', 'lognormal', 'logseries', 'multinomial',
+ 'multivariate_normal', 'negative_binomial', 'noncentral_chisquare',
+ 'noncentral_f', 'normal', 'pareto', 'permutation',
+ 'poisson', 'power', 'random', 'rayleigh', 'shuffle',
+ 'standard_cauchy', 'standard_exponential', 'standard_gamma',
+ 'standard_normal', 'standard_t', 'triangular',
+ 'uniform', 'vonmises', 'wald', 'weibull', 'zipf']
+
+np.import_array()
+
+
+cdef bint _check_bit_generator(object bitgen):
+ """Check if an object satisfies the BitGenerator interface.
+ """
+ if not hasattr(bitgen, "capsule"):
+ return False
+ cdef const char *name = "BitGenerator"
+ return PyCapsule_IsValid(bitgen.capsule, name)
+
+
+cdef class Generator:
+ """
+ Generator(bit_generator)
+
+ Container for the BitGenerators.
+
+ ``Generator`` exposes a number of methods for generating random
+ numbers drawn from a variety of probability distributions. In addition to
+ the distribution-specific arguments, each method takes a keyword argument
+ `size` that defaults to ``None``. If `size` is ``None``, then a single
+ value is generated and returned. If `size` is an integer, then a 1-D
+ array filled with generated values is returned. If `size` is a tuple,
+ then an array with that shape is filled and returned.
+
+ The function :func:`numpy.random.default_rng` will instantiate
+ a `Generator` with numpy's default `BitGenerator`.
+
+ **No Compatibility Guarantee**
+
+ ``Generator`` does not provide a version compatibility guarantee. In
+ particular, as better algorithms evolve the bit stream may change.
+
+ Parameters
+ ----------
+ bit_generator : BitGenerator
+ BitGenerator to use as the core generator.
+
+ Notes
+ -----
+ The Python stdlib module `random` contains pseudo-random number generator
+ with a number of methods that are similar to the ones available in
+ ``Generator``. It uses Mersenne Twister, and this bit generator can
+ be accessed using ``MT19937``. ``Generator``, besides being
+ NumPy-aware, has the advantage that it provides a much larger number
+ of probability distributions to choose from.
+
+ Examples
+ --------
+ >>> from numpy.random import Generator, PCG64
+ >>> rg = Generator(PCG64())
+ >>> rg.standard_normal()
+ -0.203 # random
+
+ See Also
+ --------
+ default_rng : Recommended constructor for `Generator`.
+ """
+ cdef public object _bit_generator
+ cdef bitgen_t _bitgen
+ cdef binomial_t _binomial
+ cdef object lock
+ _poisson_lam_max = POISSON_LAM_MAX
+
+ def __init__(self, bit_generator):
+ self._bit_generator = bit_generator
+
+ capsule = bit_generator.capsule
+ cdef const char *name = "BitGenerator"
+ if not PyCapsule_IsValid(capsule, name):
+ raise ValueError("Invalid bit generator'. The bit generator must "
+ "be instantiated.")
+ self._bitgen = (<bitgen_t *> PyCapsule_GetPointer(capsule, name))[0]
+ self.lock = bit_generator.lock
+
+ def __repr__(self):
+ return self.__str__() + ' at 0x{:X}'.format(id(self))
+
+ def __str__(self):
+ _str = self.__class__.__name__
+ _str += '(' + self.bit_generator.__class__.__name__ + ')'
+ return _str
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.bit_generator.state
+
+ def __setstate__(self, state):
+ self.bit_generator.state = state
+
+ def __reduce__(self):
+ from ._pickle import __generator_ctor
+ return __generator_ctor, (self.bit_generator.state['bit_generator'],), self.bit_generator.state
+
+ @property
+ def bit_generator(self):
+ """
+ Gets the bit generator instance used by the generator
+
+ Returns
+ -------
+ bit_generator : BitGenerator
+ The bit generator instance used by the generator
+ """
+ return self._bit_generator
+
+ def random(self, size=None, dtype=np.float64, out=None):
+ """
+ random(size=None, dtype='d', out=None)
+
+ Return random floats in the half-open interval [0.0, 1.0).
+
+ Results are from the "continuous uniform" distribution over the
+ stated interval. To sample :math:`Unif[a, b), b > a` multiply
+ the output of `random` by `(b-a)` and add `a`::
+
+ (b - a) * random() + a
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is not None,
+ it must have the same shape as the provided size and must match the type of
+ the output values.
+
+ Returns
+ -------
+ out : float or ndarray of floats
+ Array of random floats of shape `size` (unless ``size=None``, in which
+ case a single float is returned).
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.random()
+ 0.47108547995356098 # random
+ >>> type(rng.random())
+ <class 'float'>
+ >>> rng.random((5,))
+ array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
+
+ Three-by-two array of random numbers from [-5, 0):
+
+ >>> 5 * rng.random((3, 2)) - 5
+ array([[-3.99149989, -0.52338984], # random
+ [-2.99091858, -0.79479508],
+ [-1.23204345, -1.75224494]])
+
+ """
+ cdef double temp
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ return double_fill(&random_double_fill, &self._bitgen, size, self.lock, out)
+ elif key == 'float32':
+ return float_fill(&random_float, &self._bitgen, size, self.lock, out)
+ else:
+ raise TypeError('Unsupported dtype "%s" for random' % key)
+
+ def beta(self, a, b, size=None):
+ """
+ beta(a, b, size=None)
+
+ Draw samples from a Beta distribution.
+
+ The Beta distribution is a special case of the Dirichlet distribution,
+ and is related to the Gamma distribution. It has the probability
+ distribution function
+
+ .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
+ (1 - x)^{\\beta - 1},
+
+ where the normalization, B, is the beta function,
+
+ .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
+ (1 - t)^{\\beta - 1} dt.
+
+ It is often seen in Bayesian inference and order statistics.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Alpha, positive (>0).
+ b : float or array_like of floats
+ Beta, positive (>0).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` and ``b`` are both scalars.
+ Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized beta distribution.
+
+ """
+ return cont(&random_beta, &self._bitgen, size, self.lock, 2,
+ a, 'a', CONS_POSITIVE,
+ b, 'b', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def exponential(self, scale=1.0, size=None):
+ """
+ exponential(scale=1.0, size=None)
+
+ Draw samples from an exponential distribution.
+
+ Its probability density function is
+
+ .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
+
+ for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
+ which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
+ The rate parameter is an alternative, widely used parameterization
+ of the exponential distribution [3]_.
+
+ The exponential distribution is a continuous analogue of the
+ geometric distribution. It describes many common situations, such as
+ the size of raindrops measured over many rainstorms [1]_, or the time
+ between page requests to Wikipedia [2]_.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats
+ The scale parameter, :math:`\\beta = 1/\\lambda`. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized exponential distribution.
+
+ References
+ ----------
+ .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
+ Random Signal Principles", 4th ed, 2001, p. 57.
+ .. [2] Wikipedia, "Poisson process",
+ https://en.wikipedia.org/wiki/Poisson_process
+ .. [3] Wikipedia, "Exponential distribution",
+ https://en.wikipedia.org/wiki/Exponential_distribution
+
+ """
+ return cont(&random_exponential, &self._bitgen, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_exponential(self, size=None, dtype=np.float64, method=u'zig', out=None):
+ """
+ standard_exponential(size=None, dtype='d', method='zig', out=None)
+
+ Draw samples from the standard exponential distribution.
+
+ `standard_exponential` is identical to the exponential distribution
+ with a scale parameter of 1.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : dtype, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ method : str, optional
+ Either 'inv' or 'zig'. 'inv' uses the default inverse CDF method.
+ 'zig' uses the much faster Ziggurat method of Marsaglia and Tsang.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is not None,
+ it must have the same shape as the provided size and must match the type of
+ the output values.
+
+ Returns
+ -------
+ out : float or ndarray
+ Drawn samples.
+
+ Examples
+ --------
+ Output a 3x8000 array:
+
+ >>> n = np.random.default_rng().standard_exponential((3, 8000))
+
+ """
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ if method == u'zig':
+ return double_fill(&random_standard_exponential_zig_fill, &self._bitgen, size, self.lock, out)
+ else:
+ return double_fill(&random_standard_exponential_fill, &self._bitgen, size, self.lock, out)
+ elif key == 'float32':
+ if method == u'zig':
+ return float_fill(&random_standard_exponential_zig_f, &self._bitgen, size, self.lock, out)
+ else:
+ return float_fill(&random_standard_exponential_f, &self._bitgen, size, self.lock, out)
+ else:
+ raise TypeError('Unsupported dtype "%s" for standard_exponential'
+ % key)
+
+ def integers(self, low, high=None, size=None, dtype=np.int64, endpoint=False):
+ """
+ integers(low, high=None, size=None, dtype='int64', endpoint=False)
+
+ Return random integers from `low` (inclusive) to `high` (exclusive), or
+ if endpoint=True, `low` (inclusive) to `high` (inclusive). Replaces
+ `RandomState.randint` (with endpoint=False) and
+ `RandomState.random_integers` (with endpoint=True)
+
+ Return random integers from the "discrete uniform" distribution of
+ the specified dtype. If `high` is None (the default), then results are
+ from 0 to `low`.
+
+ Parameters
+ ----------
+ low : int or array-like of ints
+ Lowest (signed) integers to be drawn from the distribution (unless
+ ``high=None``, in which case this parameter is 0 and this value is
+ used for `high`).
+ high : int or array-like of ints, optional
+ If provided, one above the largest (signed) integer to be drawn
+ from the distribution (see above for behavior if ``high=None``).
+ If array-like, must contain integer values
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result. All dtypes are determined by their
+ name, i.e., 'int64', 'int', etc, so byteorder is not available
+ and a specific precision may have different C types depending
+ on the platform. The default value is 'np.int'.
+ endpoint : bool, optional
+ If true, sample from the interval [low, high] instead of the
+ default [low, high)
+ Defaults to False
+
+ Returns
+ -------
+ out : int or ndarray of ints
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ Notes
+ -----
+ When using broadcasting with uint64 dtypes, the maximum value (2**64)
+ cannot be represented as a standard integer type. The high array (or
+ low if high is None) must have object dtype, e.g., array([2**64]).
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.integers(2, size=10)
+ array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
+ >>> rng.integers(1, size=10)
+ array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+
+ Generate a 2 x 4 array of ints between 0 and 4, inclusive:
+
+ >>> rng.integers(5, size=(2, 4))
+ array([[4, 0, 2, 1],
+ [3, 2, 2, 0]]) # random
+
+ Generate a 1 x 3 array with 3 different upper bounds
+
+ >>> rng.integers(1, [3, 5, 10])
+ array([2, 2, 9]) # random
+
+ Generate a 1 by 3 array with 3 different lower bounds
+
+ >>> rng.integers([1, 5, 7], 10)
+ array([9, 8, 7]) # random
+
+ Generate a 2 by 4 array using broadcasting with dtype of uint8
+
+ >>> rng.integers([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
+ array([[ 8, 6, 9, 7],
+ [ 1, 16, 9, 12]], dtype=uint8) # random
+
+ References
+ ----------
+ .. [1] Daniel Lemire., "Fast Random Integer Generation in an Interval",
+ ACM Transactions on Modeling and Computer Simulation 29 (1), 2019,
+ http://arxiv.org/abs/1805.10941.
+
+ """
+ if high is None:
+ high = low
+ low = 0
+
+ dt = np.dtype(dtype)
+ key = dt.name
+ if key not in _integers_types:
+ raise TypeError('Unsupported dtype "%s" for integers' % key)
+ if not dt.isnative:
+ raise ValueError('Providing a dtype with a non-native byteorder '
+ 'is not supported. If you require '
+ 'platform-independent byteorder, call byteswap '
+ 'when required.')
+
+ # Implementation detail: the old API used a masked method to generate
+ # bounded uniform integers. Lemire's method is preferable since it is
+ # faster. randomgen allows a choice, we will always use the faster one.
+ cdef bint _masked = False
+
+ if key == 'int32':
+ ret = _rand_int32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'int64':
+ ret = _rand_int64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'int16':
+ ret = _rand_int16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'int8':
+ ret = _rand_int8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint64':
+ ret = _rand_uint64(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint32':
+ ret = _rand_uint32(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint16':
+ ret = _rand_uint16(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'uint8':
+ ret = _rand_uint8(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+ elif key == 'bool':
+ ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
+
+ if size is None and dtype in (np.bool, np.int, np.long):
+ if np.array(ret).shape == ():
+ return dtype(ret)
+ return ret
+
+ def bytes(self, np.npy_intp length):
+ """
+ bytes(length)
+
+ Return random bytes.
+
+ Parameters
+ ----------
+ length : int
+ Number of random bytes.
+
+ Returns
+ -------
+ out : str
+ String of length `length`.
+
+ Examples
+ --------
+ >>> np.random.default_rng().bytes(10)
+ ' eh\\x85\\x022SZ\\xbf\\xa4' #random
+
+ """
+ cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
+ # Interpret the uint32s as little-endian to convert them to bytes
+ # consistently.
+ return self.integers(0, 4294967296, size=n_uint32,
+ dtype=np.uint32).astype('<u4').tobytes()[:length]
+
+ @cython.wraparound(True)
+ def choice(self, a, size=None, replace=True, p=None, axis=0, bint shuffle=True):
+ """
+ choice(a, size=None, replace=True, p=None, axis=0):
+
+ Generates a random sample from a given 1-D array
+
+ Parameters
+ ----------
+ a : 1-D array-like or int
+ If an ndarray, a random sample is generated from its elements.
+ If an int, the random sample is generated as if a were np.arange(a)
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn from the 1-d `a`. If `a` has more
+ than one dimension, the `size` shape will be inserted into the
+ `axis` dimension, so the output ``ndim`` will be ``a.ndim - 1 +
+ len(size)``. Default is None, in which case a single value is
+ returned.
+ replace : boolean, optional
+ Whether the sample is with or without replacement
+ p : 1-D array-like, optional
+ The probabilities associated with each entry in a.
+ If not given the sample assumes a uniform distribution over all
+ entries in a.
+ axis : int, optional
+ The axis along which the selection is performed. The default, 0,
+ selects by row.
+ shuffle : boolean, optional
+ Whether the sample is shuffled when sampling without replacement.
+ Default is True, False provides a speedup.
+
+ Returns
+ -------
+ samples : single item or ndarray
+ The generated random samples
+
+ Raises
+ ------
+ ValueError
+ If a is an int and less than zero, if p is not 1-dimensional, if
+ a is array-like with a size 0, if p is not a vector of
+ probabilities, if a and p have different lengths, or if
+ replace=False and the sample size is greater than the population
+ size.
+
+ See Also
+ --------
+ integers, shuffle, permutation
+
+ Examples
+ --------
+ Generate a uniform random sample from np.arange(5) of size 3:
+
+ >>> rng = np.random.default_rng()
+ >>> rng.choice(5, 3)
+ array([0, 3, 4]) # random
+ >>> #This is equivalent to rng.integers(0,5,3)
+
+ Generate a non-uniform random sample from np.arange(5) of size 3:
+
+ >>> rng.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
+ array([3, 3, 0]) # random
+
+ Generate a uniform random sample from np.arange(5) of size 3 without
+ replacement:
+
+ >>> rng.choice(5, 3, replace=False)
+ array([3,1,0]) # random
+ >>> #This is equivalent to rng.permutation(np.arange(5))[:3]
+
+ Generate a non-uniform random sample from np.arange(5) of size
+ 3 without replacement:
+
+ >>> rng.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
+ array([2, 3, 0]) # random
+
+ Any of the above can be repeated with an arbitrary array-like
+ instead of just integers. For instance:
+
+ >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
+ >>> rng.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
+ array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random
+ dtype='<U11')
+
+ """
+
+ cdef int64_t val, t, loc, size_i, pop_size_i
+ cdef int64_t *idx_data
+ cdef np.npy_intp j
+ cdef uint64_t set_size, mask
+ cdef uint64_t[::1] hash_set
+ # Format and Verify input
+ a = np.array(a, copy=False)
+ if a.ndim == 0:
+ try:
+ # __index__ must return an integer by python rules.
+ pop_size = operator.index(a.item())
+ except TypeError:
+ raise ValueError("a must be 1-dimensional or an integer")
+ if pop_size <= 0 and np.prod(size) != 0:
+ raise ValueError("a must be greater than 0 unless no samples are taken")
+ else:
+ pop_size = a.shape[axis]
+ if pop_size == 0 and np.prod(size) != 0:
+ raise ValueError("'a' cannot be empty unless no samples are taken")
+
+ if p is not None:
+ d = len(p)
+
+ atol = np.sqrt(np.finfo(np.float64).eps)
+ if isinstance(p, np.ndarray):
+ if np.issubdtype(p.dtype, np.floating):
+ atol = max(atol, np.sqrt(np.finfo(p.dtype).eps))
+
+ p = <np.ndarray>np.PyArray_FROM_OTF(
+ p, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pix = <double*>np.PyArray_DATA(p)
+
+ if p.ndim != 1:
+ raise ValueError("'p' must be 1-dimensional")
+ if p.size != pop_size:
+ raise ValueError("'a' and 'p' must have same size")
+ p_sum = kahan_sum(pix, d)
+ if np.isnan(p_sum):
+ raise ValueError("probabilities contain NaN")
+ if np.logical_or.reduce(p < 0):
+ raise ValueError("probabilities are not non-negative")
+ if abs(p_sum - 1.) > atol:
+ raise ValueError("probabilities do not sum to 1")
+
+ shape = size
+ if shape is not None:
+ size = np.prod(shape, dtype=np.intp)
+ else:
+ size = 1
+
+ # Actual sampling
+ if replace:
+ if p is not None:
+ cdf = p.cumsum()
+ cdf /= cdf[-1]
+ uniform_samples = self.random(shape)
+ idx = cdf.searchsorted(uniform_samples, side='right')
+ idx = np.array(idx, copy=False, dtype=np.int64) # searchsorted returns a scalar
+ else:
+ idx = self.integers(0, pop_size, size=shape, dtype=np.int64)
+ else:
+ if size > pop_size:
+ raise ValueError("Cannot take a larger sample than "
+ "population when 'replace=False'")
+ elif size < 0:
+ raise ValueError("negative dimensions are not allowed")
+
+ if p is not None:
+ if np.count_nonzero(p > 0) < size:
+ raise ValueError("Fewer non-zero entries in p than size")
+ n_uniq = 0
+ p = p.copy()
+ found = np.zeros(shape, dtype=np.int64)
+ flat_found = found.ravel()
+ while n_uniq < size:
+ x = self.random((size - n_uniq,))
+ if n_uniq > 0:
+ p[flat_found[0:n_uniq]] = 0
+ cdf = np.cumsum(p)
+ cdf /= cdf[-1]
+ new = cdf.searchsorted(x, side='right')
+ _, unique_indices = np.unique(new, return_index=True)
+ unique_indices.sort()
+ new = new.take(unique_indices)
+ flat_found[n_uniq:n_uniq + new.size] = new
+ n_uniq += new.size
+ idx = found
+ else:
+ size_i = size
+ pop_size_i = pop_size
+ # This is a heuristic tuning. should be improvable
+ if shuffle:
+ cutoff = 50
+ else:
+ cutoff = 20
+ if pop_size_i > 10000 and (size_i > (pop_size_i // cutoff)):
+ # Tail shuffle size elements
+ idx = np.PyArray_Arange(0, pop_size_i, 1, np.NPY_INT64)
+ idx_data = <int64_t*>(<np.ndarray>idx).data
+ with self.lock, nogil:
+ self._shuffle_int(pop_size_i, max(pop_size_i - size_i, 1),
+ idx_data)
+ # Copy to allow potentially large array backing idx to be gc
+ idx = idx[(pop_size - size):].copy()
+ else:
+ # Floyd's algorithm
+ idx = np.empty(size, dtype=np.int64)
+ idx_data = <int64_t*>np.PyArray_DATA(<np.ndarray>idx)
+ # smallest power of 2 larger than 1.2 * size
+ set_size = <uint64_t>(1.2 * size_i)
+ mask = _gen_mask(set_size)
+ set_size = 1 + mask
+ hash_set = np.full(set_size, <uint64_t>-1, np.uint64)
+ with self.lock, cython.wraparound(False), nogil:
+ for j in range(pop_size_i - size_i, pop_size_i):
+ val = random_bounded_uint64(&self._bitgen, 0, j, 0, 0)
+ loc = val & mask
+ while hash_set[loc] != <uint64_t>-1 and hash_set[loc] != <uint64_t>val:
+ loc = (loc + 1) & mask
+ if hash_set[loc] == <uint64_t>-1: # then val not in hash_set
+ hash_set[loc] = val
+ idx_data[j - pop_size_i + size_i] = val
+ else: # we need to insert j instead
+ loc = j & mask
+ while hash_set[loc] != <uint64_t>-1:
+ loc = (loc + 1) & mask
+ hash_set[loc] = j
+ idx_data[j - pop_size_i + size_i] = j
+ if shuffle:
+ self._shuffle_int(size_i, 1, idx_data)
+ if shape is not None:
+ idx.shape = shape
+
+ if shape is None and isinstance(idx, np.ndarray):
+ # In most cases a scalar will have been made an array
+ idx = idx.item(0)
+
+ # Use samples as indices for a if a is array-like
+ if a.ndim == 0:
+ return idx
+
+ if shape is not None and idx.ndim == 0:
+ # If size == () then the user requested a 0-d array as opposed to
+ # a scalar object when size is None. However a[idx] is always a
+ # scalar and not an array. So this makes sure the result is an
+ # array, taking into account that np.array(item) may not work
+ # for object arrays.
+ res = np.empty((), dtype=a.dtype)
+ res[()] = a[idx]
+ return res
+
+ # asarray downcasts on 32-bit platforms, always safe
+ # no-op on 64-bit platforms
+ return a.take(np.asarray(idx, dtype=np.intp), axis=axis)
+
+ def uniform(self, low=0.0, high=1.0, size=None):
+ """
+ uniform(low=0.0, high=1.0, size=None)
+
+ Draw samples from a uniform distribution.
+
+ Samples are uniformly distributed over the half-open interval
+ ``[low, high)`` (includes low, but excludes high). In other words,
+ any value within the given interval is equally likely to be drawn
+ by `uniform`.
+
+ Parameters
+ ----------
+ low : float or array_like of floats, optional
+ Lower boundary of the output interval. All values generated will be
+ greater than or equal to low. The default value is 0.
+ high : float or array_like of floats
+ Upper boundary of the output interval. All values generated will be
+ less than high. The default value is 1.0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``low`` and ``high`` are both scalars.
+ Otherwise, ``np.broadcast(low, high).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized uniform distribution.
+
+ See Also
+ --------
+ integers : Discrete uniform distribution, yielding integers.
+ random : Floats uniformly distributed over ``[0, 1)``.
+ random : Alias for `random`.
+
+ Notes
+ -----
+ The probability density function of the uniform distribution is
+
+ .. math:: p(x) = \\frac{1}{b - a}
+
+ anywhere within the interval ``[a, b)``, and zero elsewhere.
+
+ When ``high`` == ``low``, values of ``low`` will be returned.
+ If ``high`` < ``low``, the results are officially undefined
+ and may eventually raise an error, i.e. do not rely on this
+ function to behave when passed arguments satisfying that
+ inequality condition.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> s = np.random.default_rng().uniform(-1,0,1000)
+
+ All values are within the given interval:
+
+ >>> np.all(s >= -1)
+ True
+ >>> np.all(s < 0)
+ True
+
+ Display the histogram of the samples, along with the
+ probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 15, density=True)
+ >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ cdef bint is_scalar = True
+ cdef np.ndarray alow, ahigh, arange
+ cdef double _low, _high, range
+ cdef object temp
+
+ alow = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ ahigh = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0:
+ _low = PyFloat_AsDouble(low)
+ _high = PyFloat_AsDouble(high)
+ range = _high - _low
+ if not np.isfinite(range):
+ raise OverflowError('Range exceeds valid bounds')
+
+ return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
+ _low, '', CONS_NONE,
+ range, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ temp = np.subtract(ahigh, alow)
+ # needed to get around Pyrex's automatic reference-counting
+ # rules because EnsureArray steals a reference
+ Py_INCREF(temp)
+
+ arange = <np.ndarray>np.PyArray_EnsureArray(temp)
+ if not np.all(np.isfinite(arange)):
+ raise OverflowError('Range exceeds valid bounds')
+ return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
+ alow, '', CONS_NONE,
+ arange, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ # Complicated, continuous distributions:
+ def standard_normal(self, size=None, dtype=np.float64, out=None):
+ """
+ standard_normal(size=None, dtype='d', out=None)
+
+ Draw samples from a standard Normal distribution (mean=0, stdev=1).
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is not None,
+ it must have the same shape as the provided size and must match the type of
+ the output values.
+
+ Returns
+ -------
+ out : float or ndarray
+ A floating-point array of shape ``size`` of drawn samples, or a
+ single sample if ``size`` was not specified.
+
+ Notes
+ -----
+ For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
+
+ mu + sigma * gen.standard_normal(size=...)
+ gen.normal(mu, sigma, size=...)
+
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.standard_normal()
+ 2.1923875335537315 #random
+
+ >>> s = rng.standard_normal(8000)
+ >>> s
+ array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random
+ -0.38672696, -0.4685006 ]) # random
+ >>> s.shape
+ (8000,)
+ >>> s = rng.standard_normal(size=(3, 4, 2))
+ >>> s.shape
+ (3, 4, 2)
+
+ Two-by-four array of samples from :math:`N(3, 6.25)`:
+
+ >>> 3 + 2.5 * rng.standard_normal(size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ return double_fill(&random_gauss_zig_fill, &self._bitgen, size, self.lock, out)
+ elif key == 'float32':
+ return float_fill(&random_gauss_zig_f, &self._bitgen, size, self.lock, out)
+
+ else:
+ raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
+
+ def normal(self, loc=0.0, scale=1.0, size=None):
+ """
+ normal(loc=0.0, scale=1.0, size=None)
+
+ Draw random samples from a normal (Gaussian) distribution.
+
+ The probability density function of the normal distribution, first
+ derived by De Moivre and 200 years later by both Gauss and Laplace
+ independently [2]_, is often called the bell curve because of
+ its characteristic shape (see the example below).
+
+ The normal distributions occurs often in nature. For example, it
+ describes the commonly occurring distribution of samples influenced
+ by a large number of tiny, random disturbances, each with its own
+ unique distribution [2]_.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats
+ Mean ("centre") of the distribution.
+ scale : float or array_like of floats
+ Standard deviation (spread or "width") of the distribution. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized normal distribution.
+
+ See Also
+ --------
+ scipy.stats.norm : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gaussian distribution is
+
+ .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}
+ e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
+
+ where :math:`\\mu` is the mean and :math:`\\sigma` the standard
+ deviation. The square of the standard deviation, :math:`\\sigma^2`,
+ is called the variance.
+
+ The function has its peak at the mean, and its "spread" increases with
+ the standard deviation (the function reaches 0.607 times its maximum at
+ :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
+ :meth:`normal` is more likely to return samples lying close to the
+ mean, rather than those far away.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Normal distribution",
+ https://en.wikipedia.org/wiki/Normal_distribution
+ .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
+ Random Variables and Random Signal Principles", 4th ed., 2001,
+ pp. 51, 51, 125.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, sigma = 0, 0.1 # mean and standard deviation
+ >>> s = np.random.default_rng().normal(mu, sigma, 1000)
+
+ Verify the mean and the variance:
+
+ >>> abs(mu - np.mean(s))
+ 0.0 # may vary
+
+ >>> abs(sigma - np.std(s, ddof=1))
+ 0.1 # may vary
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
+ ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
+ ... linewidth=2, color='r')
+ >>> plt.show()
+
+ Two-by-four array of samples from N(3, 6.25):
+
+ >>> np.random.default_rng().normal(3, 2.5, size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ return cont(&random_normal_zig, &self._bitgen, size, self.lock, 2,
+ loc, '', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
+ """
+ standard_gamma(shape, size=None, dtype='d', out=None)
+
+ Draw samples from a standard Gamma distribution.
+
+ Samples are drawn from a Gamma distribution with specified parameters,
+ shape (sometimes designated "k") and scale=1.
+
+ Parameters
+ ----------
+ shape : float or array_like of floats
+ Parameter, must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``shape`` is a scalar. Otherwise,
+ ``np.array(shape).size`` samples are drawn.
+ dtype : {str, dtype}, optional
+ Desired dtype of the result, either 'd' (or 'float64') or 'f'
+ (or 'float32'). All dtypes are determined by their name. The
+ default value is 'd'.
+ out : ndarray, optional
+ Alternative output array in which to place the result. If size is
+ not None, it must have the same shape as the provided size and
+ must match the type of the output values.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized standard gamma distribution.
+
+ See Also
+ --------
+ scipy.stats.gamma : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gamma distribution is
+
+ .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
+
+ where :math:`k` is the shape and :math:`\\theta` the scale,
+ and :math:`\\Gamma` is the Gamma function.
+
+ The Gamma distribution is often used to model the times to failure of
+ electronic components, and arises naturally in processes for which the
+ waiting times between Poisson distributed events are relevant.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/GammaDistribution.html
+ .. [2] Wikipedia, "Gamma distribution",
+ https://en.wikipedia.org/wiki/Gamma_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> shape, scale = 2., 1. # mean and width
+ >>> s = np.random.default_rng().standard_gamma(shape, 1000000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> import scipy.special as sps # doctest: +SKIP
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
+ >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ # doctest: +SKIP
+ ... (sps.gamma(shape) * scale**shape))
+ >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ cdef void *func
+ key = np.dtype(dtype).name
+ if key == 'float64':
+ return cont(&random_standard_gamma_zig, &self._bitgen, size, self.lock, 1,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ out)
+ if key == 'float32':
+ return cont_f(&random_standard_gamma_zig_f, &self._bitgen, size, self.lock,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ out)
+ else:
+ raise TypeError('Unsupported dtype "%s" for standard_gamma' % key)
+
+ def gamma(self, shape, scale=1.0, size=None):
+ """
+ gamma(shape, scale=1.0, size=None)
+
+ Draw samples from a Gamma distribution.
+
+ Samples are drawn from a Gamma distribution with specified parameters,
+ `shape` (sometimes designated "k") and `scale` (sometimes designated
+ "theta"), where both parameters are > 0.
+
+ Parameters
+ ----------
+ shape : float or array_like of floats
+ The shape of the gamma distribution. Must be non-negative.
+ scale : float or array_like of floats, optional
+ The scale of the gamma distribution. Must be non-negative.
+ Default is equal to 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``shape`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized gamma distribution.
+
+ See Also
+ --------
+ scipy.stats.gamma : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gamma distribution is
+
+ .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
+
+ where :math:`k` is the shape and :math:`\\theta` the scale,
+ and :math:`\\Gamma` is the Gamma function.
+
+ The Gamma distribution is often used to model the times to failure of
+ electronic components, and arises naturally in processes for which the
+ waiting times between Poisson distributed events are relevant.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/GammaDistribution.html
+ .. [2] Wikipedia, "Gamma distribution",
+ https://en.wikipedia.org/wiki/Gamma_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
+ >>> s = np.random.default_rng().gamma(shape, scale, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> import scipy.special as sps # doctest: +SKIP
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
+ >>> y = bins**(shape-1)*(np.exp(-bins/scale) / # doctest: +SKIP
+ ... (sps.gamma(shape)*scale**shape))
+ >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ return cont(&random_gamma, &self._bitgen, size, self.lock, 2,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def f(self, dfnum, dfden, size=None):
+ """
+ f(dfnum, dfden, size=None)
+
+ Draw samples from an F distribution.
+
+ Samples are drawn from an F distribution with specified parameters,
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters must be greater than
+ zero.
+
+ The random variate of the F distribution (also known as the
+ Fisher distribution) is a continuous probability distribution
+ that arises in ANOVA tests, and is the ratio of two chi-square
+ variates.
+
+ Parameters
+ ----------
+ dfnum : float or array_like of floats
+ Degrees of freedom in numerator, must be > 0.
+ dfden : float or array_like of float
+ Degrees of freedom in denominator, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
+ Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Fisher distribution.
+
+ See Also
+ --------
+ scipy.stats.f : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The F statistic is used to compare in-group variances to between-group
+ variances. Calculating the distribution depends on the sampling, and
+ so it is a function of the respective degrees of freedom in the
+ problem. The variable `dfnum` is the number of samples minus one, the
+ between-groups degrees of freedom, while `dfden` is the within-groups
+ degrees of freedom, the sum of the number of samples in each group
+ minus the number of groups.
+
+ References
+ ----------
+ .. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
+ Fifth Edition, 2002.
+ .. [2] Wikipedia, "F-distribution",
+ https://en.wikipedia.org/wiki/F-distribution
+
+ Examples
+ --------
+ An example from Glantz[1], pp 47-40:
+
+ Two groups, children of diabetics (25 people) and children from people
+ without diabetes (25 controls). Fasting blood glucose was measured,
+ case group had a mean value of 86.1, controls had a mean value of
+ 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
+ data consistent with the null hypothesis that the parents diabetic
+ status does not affect their children's blood glucose levels?
+ Calculating the F statistic from the data gives a value of 36.01.
+
+ Draw samples from the distribution:
+
+ >>> dfnum = 1. # between group degrees of freedom
+ >>> dfden = 48. # within groups degrees of freedom
+ >>> s = np.random.default_rng().f(dfnum, dfden, 1000)
+
+ The lower bound for the top 1% of the samples is :
+
+ >>> np.sort(s)[-10]
+ 7.61988120985 # random
+
+ So there is about a 1% chance that the F statistic will exceed 7.62,
+ the measured value is 36, so the null hypothesis is rejected at the 1%
+ level.
+
+ """
+ return cont(&random_f, &self._bitgen, size, self.lock, 2,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def noncentral_f(self, dfnum, dfden, nonc, size=None):
+ """
+ noncentral_f(dfnum, dfden, nonc, size=None)
+
+ Draw samples from the noncentral F distribution.
+
+ Samples are drawn from an F distribution with specified parameters,
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters > 1.
+ `nonc` is the non-centrality parameter.
+
+ Parameters
+ ----------
+ dfnum : float or array_like of floats
+ Numerator degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.14.0
+ Earlier NumPy versions required dfnum > 1.
+ dfden : float or array_like of floats
+ Denominator degrees of freedom, must be > 0.
+ nonc : float or array_like of floats
+ Non-centrality parameter, the sum of the squares of the numerator
+ means, must be >= 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``dfnum``, ``dfden``, and ``nonc``
+ are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized noncentral Fisher distribution.
+
+ Notes
+ -----
+ When calculating the power of an experiment (power = probability of
+ rejecting the null hypothesis when a specific alternative is true) the
+ non-central F statistic becomes important. When the null hypothesis is
+ true, the F statistic follows a central F distribution. When the null
+ hypothesis is not true, then it follows a non-central F statistic.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Noncentral F-Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ .. [2] Wikipedia, "Noncentral F-distribution",
+ https://en.wikipedia.org/wiki/Noncentral_F-distribution
+
+ Examples
+ --------
+ In a study, testing for a specific alternative to the null hypothesis
+ requires use of the Noncentral F distribution. We need to calculate the
+ area in the tail of the distribution that exceeds the value of the F
+ distribution for the null hypothesis. We'll plot the two probability
+ distributions for comparison.
+
+ >>> rng = np.random.default_rng()
+ >>> dfnum = 3 # between group deg of freedom
+ >>> dfden = 20 # within groups degrees of freedom
+ >>> nonc = 3.0
+ >>> nc_vals = rng.noncentral_f(dfnum, dfden, nonc, 1000000)
+ >>> NF = np.histogram(nc_vals, bins=50, density=True)
+ >>> c_vals = rng.f(dfnum, dfden, 1000000)
+ >>> F = np.histogram(c_vals, bins=50, density=True)
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(F[1][1:], F[0])
+ >>> plt.plot(NF[1][1:], NF[0])
+ >>> plt.show()
+
+ """
+ return cont(&random_noncentral_f, &self._bitgen, size, self.lock, 3,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE, None)
+
+ def chisquare(self, df, size=None):
+ """
+ chisquare(df, size=None)
+
+ Draw samples from a chi-square distribution.
+
+ When `df` independent random variables, each with standard normal
+ distributions (mean 0, variance 1), are squared and summed, the
+ resulting distribution is chi-square (see Notes). This distribution
+ is often used in hypothesis testing.
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Number of degrees of freedom, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` is a scalar. Otherwise,
+ ``np.array(df).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized chi-square distribution.
+
+ Raises
+ ------
+ ValueError
+ When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
+ is given.
+
+ Notes
+ -----
+ The variable obtained by summing the squares of `df` independent,
+ standard normally distributed random variables:
+
+ .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i
+
+ is chi-square distributed, denoted
+
+ .. math:: Q \\sim \\chi^2_k.
+
+ The probability density function of the chi-squared distribution is
+
+ .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}
+ x^{k/2 - 1} e^{-x/2},
+
+ where :math:`\\Gamma` is the gamma function,
+
+ .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.
+
+ References
+ ----------
+ .. [1] NIST "Engineering Statistics Handbook"
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+ Examples
+ --------
+ >>> np.random.default_rng().chisquare(2,4)
+ array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
+
+ """
+ return cont(&random_chisquare, &self._bitgen, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def noncentral_chisquare(self, df, nonc, size=None):
+ """
+ noncentral_chisquare(df, nonc, size=None)
+
+ Draw samples from a noncentral chi-square distribution.
+
+ The noncentral :math:`\\chi^2` distribution is a generalization of
+ the :math:`\\chi^2` distribution.
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.10.0
+ Earlier NumPy versions required dfnum > 1.
+ nonc : float or array_like of floats
+ Non-centrality, must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` and ``nonc`` are both scalars.
+ Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized noncentral chi-square distribution.
+
+ Notes
+ -----
+ The probability density function for the noncentral Chi-square
+ distribution is
+
+ .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
+ \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
+ P_{Y_{df+2i}}(x),
+
+ where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Noncentral chi-squared distribution"
+ https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram
+
+ >>> rng = np.random.default_rng()
+ >>> import matplotlib.pyplot as plt
+ >>> values = plt.hist(rng.noncentral_chisquare(3, 20, 100000),
+ ... bins=200, density=True)
+ >>> plt.show()
+
+ Draw values from a noncentral chisquare with very small noncentrality,
+ and compare to a chisquare.
+
+ >>> plt.figure()
+ >>> values = plt.hist(rng.noncentral_chisquare(3, .0000001, 100000),
+ ... bins=np.arange(0., 25, .1), density=True)
+ >>> values2 = plt.hist(rng.chisquare(3, 100000),
+ ... bins=np.arange(0., 25, .1), density=True)
+ >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
+ >>> plt.show()
+
+ Demonstrate how large values of non-centrality lead to a more symmetric
+ distribution.
+
+ >>> plt.figure()
+ >>> values = plt.hist(rng.noncentral_chisquare(3, 20, 100000),
+ ... bins=200, density=True)
+ >>> plt.show()
+
+ """
+ return cont(&random_noncentral_chisquare, &self._bitgen, size, self.lock, 2,
+ df, 'df', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def standard_cauchy(self, size=None):
+ """
+ standard_cauchy(size=None)
+
+ Draw samples from a standard Cauchy distribution with mode = 0.
+
+ Also known as the Lorentz distribution.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ samples : ndarray or scalar
+ The drawn samples.
+
+ Notes
+ -----
+ The probability density function for the full Cauchy distribution is
+
+ .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+
+ (\\frac{x-x_0}{\\gamma})^2 \\bigr] }
+
+ and the Standard Cauchy distribution just sets :math:`x_0=0` and
+ :math:`\\gamma=1`
+
+ The Cauchy distribution arises in the solution to the driven harmonic
+ oscillator problem, and also describes spectral line broadening. It
+ also describes the distribution of values at which a line tilted at
+ a random angle will cut the x axis.
+
+ When studying hypothesis tests that assume normality, seeing how the
+ tests perform on data from a Cauchy distribution is a good indicator of
+ their sensitivity to a heavy-tailed distribution, since the Cauchy looks
+ very much like a Gaussian distribution, but with heavier tails.
+
+ References
+ ----------
+ .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
+ Distribution",
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
+ .. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/CauchyDistribution.html
+ .. [3] Wikipedia, "Cauchy distribution"
+ https://en.wikipedia.org/wiki/Cauchy_distribution
+
+ Examples
+ --------
+ Draw samples and plot the distribution:
+
+ >>> import matplotlib.pyplot as plt
+ >>> s = np.random.default_rng().standard_cauchy(1000000)
+ >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
+ >>> plt.hist(s, bins=100)
+ >>> plt.show()
+
+ """
+ return cont(&random_standard_cauchy, &self._bitgen, size, self.lock, 0,
+ 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
+
+ def standard_t(self, df, size=None):
+ """
+ standard_t(df, size=None)
+
+ Draw samples from a standard Student's t distribution with `df` degrees
+ of freedom.
+
+ A special case of the hyperbolic distribution. As `df` gets
+ large, the result resembles that of the standard normal
+ distribution (`standard_normal`).
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` is a scalar. Otherwise,
+ ``np.array(df).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized standard Student's t distribution.
+
+ Notes
+ -----
+ The probability density function for the t distribution is
+
+ .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}
+ \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}
+
+ The t test is based on an assumption that the data come from a
+ Normal distribution. The t test provides a way to test whether
+ the sample mean (that is the mean calculated from the data) is
+ a good estimate of the true mean.
+
+ The derivation of the t-distribution was first published in
+ 1908 by William Gosset while working for the Guinness Brewery
+ in Dublin. Due to proprietary issues, he had to publish under
+ a pseudonym, and so he used the name Student.
+
+ References
+ ----------
+ .. [1] Dalgaard, Peter, "Introductory Statistics With R",
+ Springer, 2002.
+ .. [2] Wikipedia, "Student's t-distribution"
+ https://en.wikipedia.org/wiki/Student's_t-distribution
+
+ Examples
+ --------
+ From Dalgaard page 83 [1]_, suppose the daily energy intake for 11
+ women in kilojoules (kJ) is:
+
+ >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\
+ ... 7515, 8230, 8770])
+
+ Does their energy intake deviate systematically from the recommended
+ value of 7725 kJ?
+
+ We have 10 degrees of freedom, so is the sample mean within 95% of the
+ recommended value?
+
+ >>> s = np.random.default_rng().standard_t(10, size=100000)
+ >>> np.mean(intake)
+ 6753.636363636364
+ >>> intake.std(ddof=1)
+ 1142.1232221373727
+
+ Calculate the t statistic, setting the ddof parameter to the unbiased
+ value so the divisor in the standard deviation will be degrees of
+ freedom, N-1.
+
+ >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(s, bins=100, density=True)
+
+ For a one-sided t-test, how far out in the distribution does the t
+ statistic appear?
+
+ >>> np.sum(s<t) / float(len(s))
+ 0.0090699999999999999 #random
+
+ So the p-value is about 0.009, which says the null hypothesis has a
+ probability of about 99% of being true.
+
+ """
+ return cont(&random_standard_t, &self._bitgen, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0, '', CONS_NONE,
+ 0, '', CONS_NONE,
+ None)
+
+ def vonmises(self, mu, kappa, size=None):
+ """
+ vonmises(mu, kappa, size=None)
+
+ Draw samples from a von Mises distribution.
+
+ Samples are drawn from a von Mises distribution with specified mode
+ (mu) and dispersion (kappa), on the interval [-pi, pi].
+
+ The von Mises distribution (also known as the circular normal
+ distribution) is a continuous probability distribution on the unit
+ circle. It may be thought of as the circular analogue of the normal
+ distribution.
+
+ Parameters
+ ----------
+ mu : float or array_like of floats
+ Mode ("center") of the distribution.
+ kappa : float or array_like of floats
+ Dispersion of the distribution, has to be >=0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mu`` and ``kappa`` are both scalars.
+ Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized von Mises distribution.
+
+ See Also
+ --------
+ scipy.stats.vonmises : probability density function, distribution, or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the von Mises distribution is
+
+ .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},
+
+ where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,
+ and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.
+
+ The von Mises is named for Richard Edler von Mises, who was born in
+ Austria-Hungary, in what is now the Ukraine. He fled to the United
+ States in 1939 and became a professor at Harvard. He worked in
+ probability theory, aerodynamics, fluid mechanics, and philosophy of
+ science.
+
+ References
+ ----------
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] von Mises, R., "Mathematical Theory of Probability
+ and Statistics", New York: Academic Press, 1964.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, kappa = 0.0, 4.0 # mean and dispersion
+ >>> s = np.random.default_rng().vonmises(mu, kappa, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.special import i0 # doctest: +SKIP
+ >>> plt.hist(s, 50, density=True)
+ >>> x = np.linspace(-np.pi, np.pi, num=51)
+ >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa)) # doctest: +SKIP
+ >>> plt.plot(x, y, linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ return cont(&random_vonmises, &self._bitgen, size, self.lock, 2,
+ mu, 'mu', CONS_NONE,
+ kappa, 'kappa', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def pareto(self, a, size=None):
+ """
+ pareto(a, size=None)
+
+ Draw samples from a Pareto II or Lomax distribution with
+ specified shape.
+
+ The Lomax or Pareto II distribution is a shifted Pareto
+ distribution. The classical Pareto distribution can be
+ obtained from the Lomax distribution by adding 1 and
+ multiplying by the scale parameter ``m`` (see Notes). The
+ smallest value of the Lomax distribution is zero while for the
+ classical Pareto distribution it is ``mu``, where the standard
+ Pareto distribution has location ``mu = 1``. Lomax can also
+ be considered as a simplified version of the Generalized
+ Pareto distribution (available in SciPy), with the scale set
+ to one and the location set to zero.
+
+ The Pareto distribution must be greater than zero, and is
+ unbounded above. It is also known as the "80-20 rule". In
+ this distribution, 80 percent of the weights are in the lowest
+ 20 percent of the range, while the other 20 percent fill the
+ remaining 80 percent of the range.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Shape of the distribution. Must be positive.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Pareto distribution.
+
+ See Also
+ --------
+ scipy.stats.lomax : probability density function, distribution or
+ cumulative density function, etc.
+ scipy.stats.genpareto : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Pareto distribution is
+
+ .. math:: p(x) = \\frac{am^a}{x^{a+1}}
+
+ where :math:`a` is the shape and :math:`m` the scale.
+
+ The Pareto distribution, named after the Italian economist
+ Vilfredo Pareto, is a power law probability distribution
+ useful in many real world problems. Outside the field of
+ economics it is generally referred to as the Bradford
+ distribution. Pareto developed the distribution to describe
+ the distribution of wealth in an economy. It has also found
+ use in insurance, web page access statistics, oil field sizes,
+ and many other problems, including the download frequency for
+ projects in Sourceforge [1]_. It is one of the so-called
+ "fat-tailed" distributions.
+
+
+ References
+ ----------
+ .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
+ Sourceforge projects.
+ .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
+ .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
+ Values, Birkhauser Verlag, Basel, pp 23-30.
+ .. [4] Wikipedia, "Pareto distribution",
+ https://en.wikipedia.org/wiki/Pareto_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a, m = 3., 2. # shape and mode
+ >>> s = (np.random.default_rng().pareto(a, 1000) + 1) * m
+
+ Display the histogram of the samples, along with the probability
+ density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, _ = plt.hist(s, 100, density=True)
+ >>> fit = a*m**a / bins**(a+1)
+ >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return cont(&random_pareto, &self._bitgen, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def weibull(self, a, size=None):
+ """
+ weibull(a, size=None)
+
+ Draw samples from a Weibull distribution.
+
+ Draw samples from a 1-parameter Weibull distribution with the given
+ shape parameter `a`.
+
+ .. math:: X = (-ln(U))^{1/a}
+
+ Here, U is drawn from the uniform distribution over (0,1].
+
+ The more common 2-parameter Weibull, including a scale parameter
+ :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Shape parameter of the distribution. Must be nonnegative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Weibull distribution.
+
+ See Also
+ --------
+ scipy.stats.weibull_max
+ scipy.stats.weibull_min
+ scipy.stats.genextreme
+ gumbel
+
+ Notes
+ -----
+ The Weibull (or Type III asymptotic extreme value distribution
+ for smallest values, SEV Type III, or Rosin-Rammler
+ distribution) is one of a class of Generalized Extreme Value
+ (GEV) distributions used in modeling extreme value problems.
+ This class includes the Gumbel and Frechet distributions.
+
+ The probability density for the Weibull distribution is
+
+ .. math:: p(x) = \\frac{a}
+ {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},
+
+ where :math:`a` is the shape and :math:`\\lambda` the scale.
+
+ The function has its peak (the mode) at
+ :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.
+
+ When ``a = 1``, the Weibull distribution reduces to the exponential
+ distribution.
+
+ References
+ ----------
+ .. [1] Waloddi Weibull, Royal Technical University, Stockholm,
+ 1939 "A Statistical Theory Of The Strength Of Materials",
+ Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
+ Generalstabens Litografiska Anstalts Forlag, Stockholm.
+ .. [2] Waloddi Weibull, "A Statistical Distribution Function of
+ Wide Applicability", Journal Of Applied Mechanics ASME Paper
+ 1951.
+ .. [3] Wikipedia, "Weibull distribution",
+ https://en.wikipedia.org/wiki/Weibull_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> a = 5. # shape
+ >>> s = rng.weibull(a, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.arange(1,100.)/50.
+ >>> def weib(x,n,a):
+ ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
+
+ >>> count, bins, ignored = plt.hist(rng.weibull(5.,1000))
+ >>> x = np.arange(1,100.)/50.
+ >>> scale = count.max()/weib(x, 1., 5.).max()
+ >>> plt.plot(x, weib(x, 1., 5.)*scale)
+ >>> plt.show()
+
+ """
+ return cont(&random_weibull, &self._bitgen, size, self.lock, 1,
+ a, 'a', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def power(self, a, size=None):
+ """
+ power(a, size=None)
+
+ Draws samples in [0, 1] from a power distribution with positive
+ exponent a - 1.
+
+ Also known as the power function distribution.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Parameter of the distribution. Must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized power distribution.
+
+ Raises
+ ------
+ ValueError
+ If a < 1.
+
+ Notes
+ -----
+ The probability density function is
+
+ .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.
+
+ The power function distribution is just the inverse of the Pareto
+ distribution. It may also be seen as a special case of the Beta
+ distribution.
+
+ It is used, for example, in modeling the over-reporting of insurance
+ claims.
+
+ References
+ ----------
+ .. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
+ in economics and actuarial sciences", Wiley, 2003.
+ .. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
+ Dataplot Reference Manual, Volume 2: Let Subcommands and Library
+ Functions", National Institute of Standards and Technology
+ Handbook Series, June 2003.
+ https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> a = 5. # shape
+ >>> samples = 1000
+ >>> s = rng.power(a, samples)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, bins=30)
+ >>> x = np.linspace(0, 1, 100)
+ >>> y = a*x**(a-1.)
+ >>> normed_y = samples*np.diff(bins)[0]*y
+ >>> plt.plot(x, normed_y)
+ >>> plt.show()
+
+ Compare the power function distribution to the inverse of the Pareto.
+
+ >>> from scipy import stats # doctest: +SKIP
+ >>> rvs = rng.power(5, 1000000)
+ >>> rvsp = rng.pareto(5, 1000000)
+ >>> xx = np.linspace(0,1,100)
+ >>> powpdf = stats.powerlaw.pdf(xx,5) # doctest: +SKIP
+
+ >>> plt.figure()
+ >>> plt.hist(rvs, bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
+ >>> plt.title('power(5)')
+
+ >>> plt.figure()
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
+ >>> plt.title('inverse of 1 + Generator.pareto(5)')
+
+ >>> plt.figure()
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
+ >>> plt.title('inverse of stats.pareto(5)')
+
+ """
+ return cont(&random_power, &self._bitgen, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def laplace(self, loc=0.0, scale=1.0, size=None):
+ """
+ laplace(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from the Laplace or double exponential distribution with
+ specified location (or mean) and scale (decay).
+
+ The Laplace distribution is similar to the Gaussian/normal distribution,
+ but is sharper at the peak and has fatter tails. It represents the
+ difference between two independent, identically distributed exponential
+ random variables.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ The position, :math:`\\mu`, of the distribution peak. Default is 0.
+ scale : float or array_like of floats, optional
+ :math:`\\lambda`, the exponential decay. Default is 1. Must be non-
+ negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Laplace distribution.
+
+ Notes
+ -----
+ It has the probability density function
+
+ .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}
+ \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).
+
+ The first law of Laplace, from 1774, states that the frequency
+ of an error can be expressed as an exponential function of the
+ absolute magnitude of the error, which leads to the Laplace
+ distribution. For many problems in economics and health
+ sciences, this distribution seems to model the data better
+ than the standard Gaussian distribution.
+
+ References
+ ----------
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
+ Generalizations, " Birkhauser, 2001.
+ .. [3] Weisstein, Eric W. "Laplace Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/LaplaceDistribution.html
+ .. [4] Wikipedia, "Laplace distribution",
+ https://en.wikipedia.org/wiki/Laplace_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution
+
+ >>> loc, scale = 0., 1.
+ >>> s = np.random.default_rng().laplace(loc, scale, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> x = np.arange(-8., 8., .01)
+ >>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
+ >>> plt.plot(x, pdf)
+
+ Plot Gaussian for comparison:
+
+ >>> g = (1/(scale * np.sqrt(2 * np.pi)) *
+ ... np.exp(-(x - loc)**2 / (2 * scale**2)))
+ >>> plt.plot(x,g)
+
+ """
+ return cont(&random_laplace, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def gumbel(self, loc=0.0, scale=1.0, size=None):
+ """
+ gumbel(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from a Gumbel distribution.
+
+ Draw samples from a Gumbel distribution with specified location and
+ scale. For more information on the Gumbel distribution, see
+ Notes and References below.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ The location of the mode of the distribution. Default is 0.
+ scale : float or array_like of floats, optional
+ The scale parameter of the distribution. Default is 1. Must be non-
+ negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Gumbel distribution.
+
+ See Also
+ --------
+ scipy.stats.gumbel_l
+ scipy.stats.gumbel_r
+ scipy.stats.genextreme
+ weibull
+
+ Notes
+ -----
+ The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme
+ Value Type I) distribution is one of a class of Generalized Extreme
+ Value (GEV) distributions used in modeling extreme value problems.
+ The Gumbel is a special case of the Extreme Value Type I distribution
+ for maximums from distributions with "exponential-like" tails.
+
+ The probability density for the Gumbel distribution is
+
+ .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/
+ \\beta}},
+
+ where :math:`\\mu` is the mode, a location parameter, and
+ :math:`\\beta` is the scale parameter.
+
+ The Gumbel (named for German mathematician Emil Julius Gumbel) was used
+ very early in the hydrology literature, for modeling the occurrence of
+ flood events. It is also used for modeling maximum wind speed and
+ rainfall rates. It is a "fat-tailed" distribution - the probability of
+ an event in the tail of the distribution is larger than if one used a
+ Gaussian, hence the surprisingly frequent occurrence of 100-year
+ floods. Floods were initially modeled as a Gaussian process, which
+ underestimated the frequency of extreme events.
+
+ It is one of a class of extreme value distributions, the Generalized
+ Extreme Value (GEV) distributions, which also includes the Weibull and
+ Frechet.
+
+ The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance
+ of :math:`\\frac{\\pi^2}{6}\\beta^2`.
+
+ References
+ ----------
+ .. [1] Gumbel, E. J., "Statistics of Extremes,"
+ New York: Columbia University Press, 1958.
+ .. [2] Reiss, R.-D. and Thomas, M., "Statistical Analysis of Extreme
+ Values from Insurance, Finance, Hydrology and Other Fields,"
+ Basel: Birkhauser Verlag, 2001.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> mu, beta = 0, 0.1 # location and scale
+ >>> s = rng.gumbel(mu, beta, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
+ ... * np.exp( -np.exp( -(bins - mu) /beta) ),
+ ... linewidth=2, color='r')
+ >>> plt.show()
+
+ Show how an extreme value distribution can arise from a Gaussian process
+ and compare to a Gaussian:
+
+ >>> means = []
+ >>> maxima = []
+ >>> for i in range(0,1000) :
+ ... a = rng.normal(mu, beta, 1000)
+ ... means.append(a.mean())
+ ... maxima.append(a.max())
+ >>> count, bins, ignored = plt.hist(maxima, 30, density=True)
+ >>> beta = np.std(maxima) * np.sqrt(6) / np.pi
+ >>> mu = np.mean(maxima) - 0.57721*beta
+ >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
+ ... * np.exp(-np.exp(-(bins - mu)/beta)),
+ ... linewidth=2, color='r')
+ >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
+ ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
+ ... linewidth=2, color='g')
+ >>> plt.show()
+
+ """
+ return cont(&random_gumbel, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def logistic(self, loc=0.0, scale=1.0, size=None):
+ """
+ logistic(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from a logistic distribution.
+
+ Samples are drawn from a logistic distribution with specified
+ parameters, loc (location or mean, also median), and scale (>0).
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ Parameter of the distribution. Default is 0.
+ scale : float or array_like of floats, optional
+ Parameter of the distribution. Must be non-negative.
+ Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized logistic distribution.
+
+ See Also
+ --------
+ scipy.stats.logistic : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Logistic distribution is
+
+ .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2},
+
+ where :math:`\\mu` = location and :math:`s` = scale.
+
+ The Logistic distribution is used in Extreme Value problems where it
+ can act as a mixture of Gumbel distributions, in Epidemiology, and by
+ the World Chess Federation (FIDE) where it is used in the Elo ranking
+ system, assuming the performance of each player is a logistically
+ distributed random variable.
+
+ References
+ ----------
+ .. [1] Reiss, R.-D. and Thomas M. (2001), "Statistical Analysis of
+ Extreme Values, from Insurance, Finance, Hydrology and Other
+ Fields," Birkhauser Verlag, Basel, pp 132-133.
+ .. [2] Weisstein, Eric W. "Logistic Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/LogisticDistribution.html
+ .. [3] Wikipedia, "Logistic-distribution",
+ https://en.wikipedia.org/wiki/Logistic_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> loc, scale = 10, 1
+ >>> s = np.random.default_rng().logistic(loc, scale, 10000)
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, bins=50)
+
+ # plot against distribution
+
+ >>> def logist(x, loc, scale):
+ ... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
+ >>> lgst_val = logist(bins, loc, scale)
+ >>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
+ >>> plt.show()
+
+ """
+ return cont(&random_logistic, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def lognormal(self, mean=0.0, sigma=1.0, size=None):
+ """
+ lognormal(mean=0.0, sigma=1.0, size=None)
+
+ Draw samples from a log-normal distribution.
+
+ Draw samples from a log-normal distribution with specified mean,
+ standard deviation, and array shape. Note that the mean and standard
+ deviation are not the values for the distribution itself, but of the
+ underlying normal distribution it is derived from.
+
+ Parameters
+ ----------
+ mean : float or array_like of floats, optional
+ Mean value of the underlying normal distribution. Default is 0.
+ sigma : float or array_like of floats, optional
+ Standard deviation of the underlying normal distribution. Must be
+ non-negative. Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mean`` and ``sigma`` are both scalars.
+ Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized log-normal distribution.
+
+ See Also
+ --------
+ scipy.stats.lognorm : probability density function, distribution,
+ cumulative density function, etc.
+
+ Notes
+ -----
+ A variable `x` has a log-normal distribution if `log(x)` is normally
+ distributed. The probability density function for the log-normal
+ distribution is:
+
+ .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}
+ e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}
+
+ where :math:`\\mu` is the mean and :math:`\\sigma` is the standard
+ deviation of the normally distributed logarithm of the variable.
+ A log-normal distribution results if a random variable is the *product*
+ of a large number of independent, identically-distributed variables in
+ the same way that a normal distribution results if the variable is the
+ *sum* of a large number of independent, identically-distributed
+ variables.
+
+ References
+ ----------
+ .. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
+ Distributions across the Sciences: Keys and Clues,"
+ BioScience, Vol. 51, No. 5, May, 2001.
+ https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
+ .. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
+ Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> mu, sigma = 3., 1. # mean and standard deviation
+ >>> s = rng.lognormal(mu, sigma, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
+
+ >>> x = np.linspace(min(bins), max(bins), 10000)
+ >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
+ ... / (x * sigma * np.sqrt(2 * np.pi)))
+
+ >>> plt.plot(x, pdf, linewidth=2, color='r')
+ >>> plt.axis('tight')
+ >>> plt.show()
+
+ Demonstrate that taking the products of random samples from a uniform
+ distribution can be fit well by a log-normal probability density
+ function.
+
+ >>> # Generate a thousand samples: each is the product of 100 random
+ >>> # values, drawn from a normal distribution.
+ >>> rng = rng
+ >>> b = []
+ >>> for i in range(1000):
+ ... a = 10. + rng.standard_normal(100)
+ ... b.append(np.product(a))
+
+ >>> b = np.array(b) / np.min(b) # scale values to be positive
+ >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
+ >>> sigma = np.std(np.log(b))
+ >>> mu = np.mean(np.log(b))
+
+ >>> x = np.linspace(min(bins), max(bins), 10000)
+ >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
+ ... / (x * sigma * np.sqrt(2 * np.pi)))
+
+ >>> plt.plot(x, pdf, color='r', linewidth=2)
+ >>> plt.show()
+
+ """
+ return cont(&random_lognormal, &self._bitgen, size, self.lock, 2,
+ mean, 'mean', CONS_NONE,
+ sigma, 'sigma', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def rayleigh(self, scale=1.0, size=None):
+ """
+ rayleigh(scale=1.0, size=None)
+
+ Draw samples from a Rayleigh distribution.
+
+ The :math:`\\chi` and Weibull distributions are generalizations of the
+ Rayleigh.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats, optional
+ Scale, also equals the mode. Must be non-negative. Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Rayleigh distribution.
+
+ Notes
+ -----
+ The probability density function for the Rayleigh distribution is
+
+ .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}
+
+ The Rayleigh distribution would arise, for example, if the East
+ and North components of the wind velocity had identical zero-mean
+ Gaussian distributions. Then the wind speed would have a Rayleigh
+ distribution.
+
+ References
+ ----------
+ .. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
+ https://web.archive.org/web/20090514091424/http://brighton-webs.co.uk:80/distributions/rayleigh.asp
+ .. [2] Wikipedia, "Rayleigh distribution"
+ https://en.wikipedia.org/wiki/Rayleigh_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram
+
+ >>> from matplotlib.pyplot import hist
+ >>> rng = np.random.default_rng()
+ >>> values = hist(rng.rayleigh(3, 100000), bins=200, density=True)
+
+ Wave heights tend to follow a Rayleigh distribution. If the mean wave
+ height is 1 meter, what fraction of waves are likely to be larger than 3
+ meters?
+
+ >>> meanvalue = 1
+ >>> modevalue = np.sqrt(2 / np.pi) * meanvalue
+ >>> s = rng.rayleigh(modevalue, 1000000)
+
+ The percentage of waves larger than 3 meters is:
+
+ >>> 100.*sum(s>3)/1000000.
+ 0.087300000000000003 # random
+
+ """
+ return cont(&random_rayleigh, &self._bitgen, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def wald(self, mean, scale, size=None):
+ """
+ wald(mean, scale, size=None)
+
+ Draw samples from a Wald, or inverse Gaussian, distribution.
+
+ As the scale approaches infinity, the distribution becomes more like a
+ Gaussian. Some references claim that the Wald is an inverse Gaussian
+ with mean equal to 1, but this is by no means universal.
+
+ The inverse Gaussian distribution was first studied in relationship to
+ Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
+ because there is an inverse relationship between the time to cover a
+ unit distance and distance covered in unit time.
+
+ Parameters
+ ----------
+ mean : float or array_like of floats
+ Distribution mean, must be > 0.
+ scale : float or array_like of floats
+ Scale parameter, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mean`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Wald distribution.
+
+ Notes
+ -----
+ The probability density function for the Wald distribution is
+
+ .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
+ \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
+
+ As noted above the inverse Gaussian distribution first arise
+ from attempts to model Brownian motion. It is also a
+ competitor to the Weibull for use in reliability modeling and
+ modeling stock returns and interest rate processes.
+
+ References
+ ----------
+ .. [1] Brighton Webs Ltd., Wald Distribution,
+ https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
+ .. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
+ Distribution: Theory : Methodology, and Applications", CRC Press,
+ 1988.
+ .. [3] Wikipedia, "Inverse Gaussian distribution"
+ https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(np.random.default_rng().wald(3, 2, 100000), bins=200, density=True)
+ >>> plt.show()
+
+ """
+ return cont(&random_wald, &self._bitgen, size, self.lock, 2,
+ mean, 'mean', CONS_POSITIVE,
+ scale, 'scale', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def triangular(self, left, mode, right, size=None):
+ """
+ triangular(left, mode, right, size=None)
+
+ Draw samples from the triangular distribution over the
+ interval ``[left, right]``.
+
+ The triangular distribution is a continuous probability
+ distribution with lower limit left, peak at mode, and upper
+ limit right. Unlike the other distributions, these parameters
+ directly define the shape of the pdf.
+
+ Parameters
+ ----------
+ left : float or array_like of floats
+ Lower limit.
+ mode : float or array_like of floats
+ The value where the peak of the distribution occurs.
+ The value must fulfill the condition ``left <= mode <= right``.
+ right : float or array_like of floats
+ Upper limit, must be larger than `left`.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``left``, ``mode``, and ``right``
+ are all scalars. Otherwise, ``np.broadcast(left, mode, right).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized triangular distribution.
+
+ Notes
+ -----
+ The probability density function for the triangular distribution is
+
+ .. math:: P(x;l, m, r) = \\begin{cases}
+ \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
+ \\frac{2(r-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
+ 0& \\text{otherwise}.
+ \\end{cases}
+
+ The triangular distribution is often used in ill-defined
+ problems where the underlying distribution is not known, but
+ some knowledge of the limits and mode exists. Often it is used
+ in simulations.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Triangular distribution"
+ https://en.wikipedia.org/wiki/Triangular_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(np.random.default_rng().triangular(-3, 0, 8, 100000), bins=200,
+ ... density=True)
+ >>> plt.show()
+
+ """
+ cdef bint is_scalar = True
+ cdef double fleft, fmode, fright
+ cdef np.ndarray oleft, omode, oright
+
+ oleft = <np.ndarray>np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ omode = <np.ndarray>np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ oright = <np.ndarray>np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0:
+ fleft = PyFloat_AsDouble(left)
+ fright = PyFloat_AsDouble(right)
+ fmode = PyFloat_AsDouble(mode)
+
+ if fleft > fmode:
+ raise ValueError("left > mode")
+ if fmode > fright:
+ raise ValueError("mode > right")
+ if fleft == fright:
+ raise ValueError("left == right")
+ return cont(&random_triangular, &self._bitgen, size, self.lock, 3,
+ fleft, '', CONS_NONE,
+ fmode, '', CONS_NONE,
+ fright, '', CONS_NONE, None)
+
+ if np.any(np.greater(oleft, omode)):
+ raise ValueError("left > mode")
+ if np.any(np.greater(omode, oright)):
+ raise ValueError("mode > right")
+ if np.any(np.equal(oleft, oright)):
+ raise ValueError("left == right")
+
+ return cont_broadcast_3(&random_triangular, &self._bitgen, size, self.lock,
+ oleft, '', CONS_NONE,
+ omode, '', CONS_NONE,
+ oright, '', CONS_NONE)
+
+ # Complicated, discrete distributions:
+ def binomial(self, n, p, size=None):
+ """
+ binomial(n, p, size=None)
+
+ Draw samples from a binomial distribution.
+
+ Samples are drawn from a binomial distribution with specified
+ parameters, n trials and p probability of success where
+ n an integer >= 0 and p is in the interval [0,1]. (n may be
+ input as a float, but it is truncated to an integer in use)
+
+ Parameters
+ ----------
+ n : int or array_like of ints
+ Parameter of the distribution, >= 0. Floats are also accepted,
+ but they will be truncated to integers.
+ p : float or array_like of floats
+ Parameter of the distribution, >= 0 and <=1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``n`` and ``p`` are both scalars.
+ Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized binomial distribution, where
+ each sample is equal to the number of successes over the n trials.
+
+ See Also
+ --------
+ scipy.stats.binom : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the binomial distribution is
+
+ .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},
+
+ where :math:`n` is the number of trials, :math:`p` is the probability
+ of success, and :math:`N` is the number of successes.
+
+ When estimating the standard error of a proportion in a population by
+ using a random sample, the normal distribution works well unless the
+ product p*n <=5, where p = population proportion estimate, and n =
+ number of samples, in which case the binomial distribution is used
+ instead. For example, a sample of 15 people shows 4 who are left
+ handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,
+ so the binomial distribution should be used in this case.
+
+ References
+ ----------
+ .. [1] Dalgaard, Peter, "Introductory Statistics with R",
+ Springer-Verlag, 2002.
+ .. [2] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
+ Fifth Edition, 2002.
+ .. [3] Lentner, Marvin, "Elementary Applied Statistics", Bogden
+ and Quigley, 1972.
+ .. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/BinomialDistribution.html
+ .. [5] Wikipedia, "Binomial distribution",
+ https://en.wikipedia.org/wiki/Binomial_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> n, p = 10, .5 # number of trials, probability of each trial
+ >>> s = rng.binomial(n, p, 1000)
+ # result of flipping a coin 10 times, tested 1000 times.
+
+ A real world example. A company drills 9 wild-cat oil exploration
+ wells, each with an estimated probability of success of 0.1. All nine
+ wells fail. What is the probability of that happening?
+
+ Let's do 20,000 trials of the model, and count the number that
+ generate zero positive results.
+
+ >>> sum(rng.binomial(9, 0.1, 20000) == 0)/20000.
+ # answer = 0.38885, or 38%.
+
+ """
+
+ # Uses a custom implementation since self._binomial is required
+ cdef double _dp = 0
+ cdef int64_t _in = 0
+ cdef bint is_scalar = True
+ cdef np.npy_intp i, cnt
+ cdef np.ndarray randoms
+ cdef np.int64_t *randoms_data
+ cdef np.broadcast it
+
+ p_arr = <np.ndarray>np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0
+ n_arr = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0
+
+ if not is_scalar:
+ check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1)
+ check_array_constraint(n_arr, 'n', CONS_NON_NEGATIVE)
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(p_arr, n_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+ cnt = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
+ with self.lock, nogil:
+ for i in range(cnt):
+ _dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ _in = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = random_binomial(&self._bitgen, _dp, _in, &self._binomial)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+ _dp = PyFloat_AsDouble(p)
+ _in = <int64_t>n
+ check_constraint(_dp, 'p', CONS_BOUNDED_0_1)
+ check_constraint(<double>_in, 'n', CONS_NON_NEGATIVE)
+
+ if size is None:
+ with self.lock:
+ return random_binomial(&self._bitgen, _dp, _in, &self._binomial)
+
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ cnt = np.PyArray_SIZE(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+
+ with self.lock, nogil:
+ for i in range(cnt):
+ randoms_data[i] = random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
+
+ return randoms
+
+ def negative_binomial(self, n, p, size=None):
+ """
+ negative_binomial(n, p, size=None)
+
+ Draw samples from a negative binomial distribution.
+
+ Samples are drawn from a negative binomial distribution with specified
+ parameters, `n` successes and `p` probability of success where `n`
+ is > 0 and `p` is in the interval [0, 1].
+
+ Parameters
+ ----------
+ n : float or array_like of floats
+ Parameter of the distribution, > 0.
+ p : float or array_like of floats
+ Parameter of the distribution, >= 0 and <=1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``n`` and ``p`` are both scalars.
+ Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized negative binomial distribution,
+ where each sample is equal to N, the number of failures that
+ occurred before a total of n successes was reached.
+
+ Notes
+ -----
+ The probability mass function of the negative binomial distribution is
+
+ .. math:: P(N;n,p) = \\frac{\\Gamma(N+n)}{N!\\Gamma(n)}p^{n}(1-p)^{N},
+
+ where :math:`n` is the number of successes, :math:`p` is the
+ probability of success, :math:`N+n` is the number of trials, and
+ :math:`\\Gamma` is the gamma function. When :math:`n` is an integer,
+ :math:`\\frac{\\Gamma(N+n)}{N!\\Gamma(n)} = \\binom{N+n-1}{N}`, which is
+ the more common form of this term in the the pmf. The negative
+ binomial distribution gives the probability of N failures given n
+ successes, with a success on the last trial.
+
+ If one throws a die repeatedly until the third time a "1" appears,
+ then the probability distribution of the number of non-"1"s that
+ appear before the third "1" is a negative binomial distribution.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Negative Binomial Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NegativeBinomialDistribution.html
+ .. [2] Wikipedia, "Negative binomial distribution",
+ https://en.wikipedia.org/wiki/Negative_binomial_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ A real world example. A company drills wild-cat oil
+ exploration wells, each with an estimated probability of
+ success of 0.1. What is the probability of having one success
+ for each successive well, that is what is the probability of a
+ single success after drilling 5 wells, after 6 wells, etc.?
+
+ >>> s = np.random.default_rng().negative_binomial(1, 0.1, 100000)
+ >>> for i in range(1, 11): # doctest: +SKIP
+ ... probability = sum(s<i) / 100000.
+ ... print(i, "wells drilled, probability of one success =", probability)
+
+ """
+ return disc(&random_negative_binomial, &self._bitgen, size, self.lock, 2, 0,
+ n, 'n', CONS_POSITIVE_NOT_NAN,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE)
+
+ def poisson(self, lam=1.0, size=None):
+ """
+ poisson(lam=1.0, size=None)
+
+ Draw samples from a Poisson distribution.
+
+ The Poisson distribution is the limit of the binomial distribution
+ for large N.
+
+ Parameters
+ ----------
+ lam : float or array_like of floats
+ Expectation of interval, must be >= 0. A sequence of expectation
+ intervals must be broadcastable over the requested size.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``lam`` is a scalar. Otherwise,
+ ``np.array(lam).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Poisson distribution.
+
+ Notes
+ -----
+ The Poisson distribution
+
+ .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}
+
+ For events with an expected separation :math:`\\lambda` the Poisson
+ distribution :math:`f(k; \\lambda)` describes the probability of
+ :math:`k` events occurring within the observed
+ interval :math:`\\lambda`.
+
+ Because the output is limited to the range of the C int64 type, a
+ ValueError is raised when `lam` is within 10 sigma of the maximum
+ representable value.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Poisson Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/PoissonDistribution.html
+ .. [2] Wikipedia, "Poisson distribution",
+ https://en.wikipedia.org/wiki/Poisson_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> import numpy as np
+ >>> rng = np.random.default_rng()
+ >>> s = rng.poisson(5, 10000)
+
+ Display histogram of the sample:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 14, density=True)
+ >>> plt.show()
+
+ Draw each 100 values for lambda 100 and 500:
+
+ >>> s = rng.poisson(lam=(100., 500.), size=(100, 2))
+
+ """
+ return disc(&random_poisson, &self._bitgen, size, self.lock, 1, 0,
+ lam, 'lam', CONS_POISSON,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def zipf(self, a, size=None):
+ """
+ zipf(a, size=None)
+
+ Draw samples from a Zipf distribution.
+
+ Samples are drawn from a Zipf distribution with specified parameter
+ `a` > 1.
+
+ The Zipf distribution (also known as the zeta distribution) is a
+ continuous probability distribution that satisfies Zipf's law: the
+ frequency of an item is inversely proportional to its rank in a
+ frequency table.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Distribution parameter. Must be greater than 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Zipf distribution.
+
+ See Also
+ --------
+ scipy.stats.zipf : probability density function, distribution, or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Zipf distribution is
+
+ .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},
+
+ where :math:`\\zeta` is the Riemann Zeta function.
+
+ It is named for the American linguist George Kingsley Zipf, who noted
+ that the frequency of any word in a sample of a language is inversely
+ proportional to its rank in the frequency table.
+
+ References
+ ----------
+ .. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
+ Frequency in Language," Cambridge, MA: Harvard Univ. Press,
+ 1932.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = 2. # parameter
+ >>> s = np.random.default_rng().zipf(a, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy import special # doctest: +SKIP
+
+ Truncate s values at 50 so plot is interesting:
+
+ >>> count, bins, ignored = plt.hist(s[s<50],
+ ... 50, density=True)
+ >>> x = np.arange(1., 50.)
+ >>> y = x**(-a) / special.zetac(a) # doctest: +SKIP
+ >>> plt.plot(x, y/max(y), linewidth=2, color='r') # doctest: +SKIP
+ >>> plt.show()
+
+ """
+ return disc(&random_zipf, &self._bitgen, size, self.lock, 1, 0,
+ a, 'a', CONS_GT_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def geometric(self, p, size=None):
+ """
+ geometric(p, size=None)
+
+ Draw samples from the geometric distribution.
+
+ Bernoulli trials are experiments with one of two outcomes:
+ success or failure (an example of such an experiment is flipping
+ a coin). The geometric distribution models the number of trials
+ that must be run in order to achieve success. It is therefore
+ supported on the positive integers, ``k = 1, 2, ...``.
+
+ The probability mass function of the geometric distribution is
+
+ .. math:: f(k) = (1 - p)^{k - 1} p
+
+ where `p` is the probability of success of an individual trial.
+
+ Parameters
+ ----------
+ p : float or array_like of floats
+ The probability of success of an individual trial.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``p`` is a scalar. Otherwise,
+ ``np.array(p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized geometric distribution.
+
+ Examples
+ --------
+ Draw ten thousand values from the geometric distribution,
+ with the probability of an individual success equal to 0.35:
+
+ >>> z = np.random.default_rng().geometric(p=0.35, size=10000)
+
+ How many trials succeeded after a single run?
+
+ >>> (z == 1).sum() / 10000.
+ 0.34889999999999999 #random
+
+ """
+ return disc(&random_geometric, &self._bitgen, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_GT_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def hypergeometric(self, ngood, nbad, nsample, size=None):
+ """
+ hypergeometric(ngood, nbad, nsample, size=None)
+
+ Draw samples from a Hypergeometric distribution.
+
+ Samples are drawn from a hypergeometric distribution with specified
+ parameters, `ngood` (ways to make a good selection), `nbad` (ways to make
+ a bad selection), and `nsample` (number of items sampled, which is less
+ than or equal to the sum ``ngood + nbad``).
+
+ Parameters
+ ----------
+ ngood : int or array_like of ints
+ Number of ways to make a good selection. Must be nonnegative and
+ less than 10**9.
+ nbad : int or array_like of ints
+ Number of ways to make a bad selection. Must be nonnegative and
+ less than 10**9.
+ nsample : int or array_like of ints
+ Number of items sampled. Must be nonnegative and less than
+ ``ngood + nbad``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if `ngood`, `nbad`, and `nsample`
+ are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized hypergeometric distribution. Each
+ sample is the number of good items within a randomly selected subset of
+ size `nsample` taken from a set of `ngood` good items and `nbad` bad items.
+
+ See Also
+ --------
+ scipy.stats.hypergeom : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Hypergeometric distribution is
+
+ .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}},
+
+ where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
+
+ for P(x) the probability of ``x`` good results in the drawn sample,
+ g = `ngood`, b = `nbad`, and n = `nsample`.
+
+ Consider an urn with black and white marbles in it, `ngood` of them
+ are black and `nbad` are white. If you draw `nsample` balls without
+ replacement, then the hypergeometric distribution describes the
+ distribution of black balls in the drawn sample.
+
+ Note that this distribution is very similar to the binomial
+ distribution, except that in this case, samples are drawn without
+ replacement, whereas in the Binomial case samples are drawn with
+ replacement (or the sample space is infinite). As the sample space
+ becomes large, this distribution approaches the binomial.
+
+ The arguments `ngood` and `nbad` each must be less than `10**9`. For
+ extremely large arguments, the algorithm that is used to compute the
+ samples [4]_ breaks down because of loss of precision in floating point
+ calculations. For such large values, if `nsample` is not also large,
+ the distribution can be approximated with the binomial distribution,
+ `binomial(n=nsample, p=ngood/(ngood + nbad))`.
+
+ References
+ ----------
+ .. [1] Lentner, Marvin, "Elementary Applied Statistics", Bogden
+ and Quigley, 1972.
+ .. [2] Weisstein, Eric W. "Hypergeometric Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/HypergeometricDistribution.html
+ .. [3] Wikipedia, "Hypergeometric distribution",
+ https://en.wikipedia.org/wiki/Hypergeometric_distribution
+ .. [4] Stadlober, Ernst, "The ratio of uniforms approach for generating
+ discrete random variates", Journal of Computational and Applied
+ Mathematics, 31, pp. 181-189 (1990).
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> rng = np.random.default_rng()
+ >>> ngood, nbad, nsamp = 100, 2, 10
+ # number of good, number of bad, and number of samples
+ >>> s = rng.hypergeometric(ngood, nbad, nsamp, 1000)
+ >>> from matplotlib.pyplot import hist
+ >>> hist(s)
+ # note that it is very unlikely to grab both bad items
+
+ Suppose you have an urn with 15 white and 15 black marbles.
+ If you pull 15 marbles at random, how likely is it that
+ 12 or more of them are one color?
+
+ >>> s = rng.hypergeometric(15, 15, 15, 100000)
+ >>> sum(s>=12)/100000. + sum(s<=3)/100000.
+ # answer = 0.003 ... pretty unlikely!
+
+ """
+ DEF HYPERGEOM_MAX = 10**9
+ cdef bint is_scalar = True
+ cdef np.ndarray ongood, onbad, onsample
+ cdef int64_t lngood, lnbad, lnsample
+
+ ongood = <np.ndarray>np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ALIGNED)
+ onbad = <np.ndarray>np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ALIGNED)
+ onsample = <np.ndarray>np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0:
+
+ lngood = <int64_t>ngood
+ lnbad = <int64_t>nbad
+ lnsample = <int64_t>nsample
+
+ if lngood >= HYPERGEOM_MAX or lnbad >= HYPERGEOM_MAX:
+ raise ValueError("both ngood and nbad must be less than %d" %
+ HYPERGEOM_MAX)
+ if lngood + lnbad < lnsample:
+ raise ValueError("ngood + nbad < nsample")
+ return disc(&random_hypergeometric, &self._bitgen, size, self.lock, 0, 3,
+ lngood, 'ngood', CONS_NON_NEGATIVE,
+ lnbad, 'nbad', CONS_NON_NEGATIVE,
+ lnsample, 'nsample', CONS_NON_NEGATIVE)
+
+ if np.any(ongood >= HYPERGEOM_MAX) or np.any(onbad >= HYPERGEOM_MAX):
+ raise ValueError("both ngood and nbad must be less than %d" %
+ HYPERGEOM_MAX)
+
+ if np.any(np.less(np.add(ongood, onbad), onsample)):
+ raise ValueError("ngood + nbad < nsample")
+
+ return discrete_broadcast_iii(&random_hypergeometric, &self._bitgen, size, self.lock,
+ ongood, 'ngood', CONS_NON_NEGATIVE,
+ onbad, 'nbad', CONS_NON_NEGATIVE,
+ onsample, 'nsample', CONS_NON_NEGATIVE)
+
+ def logseries(self, p, size=None):
+ """
+ logseries(p, size=None)
+
+ Draw samples from a logarithmic series distribution.
+
+ Samples are drawn from a log series distribution with specified
+ shape parameter, 0 < ``p`` < 1.
+
+ Parameters
+ ----------
+ p : float or array_like of floats
+ Shape parameter for the distribution. Must be in the range (0, 1).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``p`` is a scalar. Otherwise,
+ ``np.array(p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized logarithmic series distribution.
+
+ See Also
+ --------
+ scipy.stats.logser : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability mass function for the Log Series distribution is
+
+ .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},
+
+ where p = probability.
+
+ The log series distribution is frequently used to represent species
+ richness and occurrence, first proposed by Fisher, Corbet, and
+ Williams in 1943 [2]. It may also be used to model the numbers of
+ occupants seen in cars [3].
+
+ References
+ ----------
+ .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional
+ species diversity through the log series distribution of
+ occurrences: BIODIVERSITY RESEARCH Diversity & Distributions,
+ Volume 5, Number 5, September 1999 , pp. 187-195(9).
+ .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The
+ relation between the number of species and the number of
+ individuals in a random sample of an animal population.
+ Journal of Animal Ecology, 12:42-58.
+ .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small
+ Data Sets, CRC Press, 1994.
+ .. [4] Wikipedia, "Logarithmic distribution",
+ https://en.wikipedia.org/wiki/Logarithmic_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = .6
+ >>> s = np.random.default_rng().logseries(a, 10000)
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s)
+
+ # plot against distribution
+
+ >>> def logseries(k, p):
+ ... return -p**k/(k*np.log(1-p))
+ >>> plt.plot(bins, logseries(bins, a) * count.max()/
+ ... logseries(bins, a).max(), 'r')
+ >>> plt.show()
+
+ """
+ return disc(&random_logseries, &self._bitgen, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ # Multivariate distributions:
+ def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
+ tol=1e-8):
+ """
+ multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
+
+ Draw random samples from a multivariate normal distribution.
+
+ The multivariate normal, multinormal or Gaussian distribution is a
+ generalization of the one-dimensional normal distribution to higher
+ dimensions. Such a distribution is specified by its mean and
+ covariance matrix. These parameters are analogous to the mean
+ (average or "center") and variance (standard deviation, or "width,"
+ squared) of the one-dimensional normal distribution.
+
+ Parameters
+ ----------
+ mean : 1-D array_like, of length N
+ Mean of the N-dimensional distribution.
+ cov : 2-D array_like, of shape (N, N)
+ Covariance matrix of the distribution. It must be symmetric and
+ positive-semidefinite for proper sampling.
+ size : int or tuple of ints, optional
+ Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
+ generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
+ each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
+ If no shape is specified, a single (`N`-D) sample is returned.
+ check_valid : { 'warn', 'raise', 'ignore' }, optional
+ Behavior when the covariance matrix is not positive semidefinite.
+ tol : float, optional
+ Tolerance when checking the singular values in covariance matrix.
+ cov is cast to double before the check.
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
+
+ Notes
+ -----
+ The mean is a coordinate in N-dimensional space, which represents the
+ location where samples are most likely to be generated. This is
+ analogous to the peak of the bell curve for the one-dimensional or
+ univariate normal distribution.
+
+ Covariance indicates the level to which two variables vary together.
+ From the multivariate normal distribution, we draw N-dimensional
+ samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix
+ element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.
+ The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its
+ "spread").
+
+ Instead of specifying the full covariance matrix, popular
+ approximations include:
+
+ - Spherical covariance (`cov` is a multiple of the identity matrix)
+ - Diagonal covariance (`cov` has non-negative elements, and only on
+ the diagonal)
+
+ This geometrical property can be seen in two dimensions by plotting
+ generated data-points:
+
+ >>> mean = [0, 0]
+ >>> cov = [[1, 0], [0, 100]] # diagonal covariance
+
+ Diagonal covariance means that points are oriented along x or y-axis:
+
+ >>> import matplotlib.pyplot as plt
+ >>> x, y = np.random.default_rng().multivariate_normal(mean, cov, 5000).T
+ >>> plt.plot(x, y, 'x')
+ >>> plt.axis('equal')
+ >>> plt.show()
+
+ Note that the covariance matrix must be positive semidefinite (a.k.a.
+ nonnegative-definite). Otherwise, the behavior of this method is
+ undefined and backwards compatibility is not guaranteed.
+
+ References
+ ----------
+ .. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
+ Processes," 3rd ed., New York: McGraw-Hill, 1991.
+ .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
+ Classification," 2nd ed., New York: Wiley, 2001.
+
+ Examples
+ --------
+ >>> mean = (1, 2)
+ >>> cov = [[1, 0], [0, 1]]
+ >>> x = np.random.default_rng().multivariate_normal(mean, cov, (3, 3))
+ >>> x.shape
+ (3, 3, 2)
+
+ The following is probably true, given that 0.6 is roughly twice the
+ standard deviation:
+
+ >>> list((x[0,0,:] - mean) < 0.6)
+ [True, True] # random
+
+ """
+ from numpy.dual import svd
+
+ # Check preconditions on arguments
+ mean = np.array(mean)
+ cov = np.array(cov)
+ if size is None:
+ shape = []
+ elif isinstance(size, (int, long, np.integer)):
+ shape = [size]
+ else:
+ shape = size
+
+ if len(mean.shape) != 1:
+ raise ValueError("mean must be 1 dimensional")
+ if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
+ raise ValueError("cov must be 2 dimensional and square")
+ if mean.shape[0] != cov.shape[0]:
+ raise ValueError("mean and cov must have same length")
+
+ # Compute shape of output and create a matrix of independent
+ # standard normally distributed random numbers. The matrix has rows
+ # with the same length as mean and as many rows are necessary to
+ # form a matrix of shape final_shape.
+ final_shape = list(shape[:])
+ final_shape.append(mean.shape[0])
+ x = self.standard_normal(final_shape).reshape(-1, mean.shape[0])
+
+ # Transform matrix of standard normals into matrix where each row
+ # contains multivariate normals with the desired covariance.
+ # Compute A such that dot(transpose(A),A) == cov.
+ # Then the matrix products of the rows of x and A has the desired
+ # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
+ # decomposition of cov is such an A.
+ #
+ # Also check that cov is positive-semidefinite. If so, the u.T and v
+ # matrices should be equal up to roundoff error if cov is
+ # symmetric and the singular value of the corresponding row is
+ # not zero. We continue to use the SVD rather than Cholesky in
+ # order to preserve current outputs. Note that symmetry has not
+ # been checked.
+
+ # GH10839, ensure double to make tol meaningful
+ cov = cov.astype(np.double)
+ (u, s, v) = svd(cov)
+
+ if check_valid != 'ignore':
+ if check_valid != 'warn' and check_valid != 'raise':
+ raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'")
+
+ psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
+ if not psd:
+ if check_valid == 'warn':
+ warnings.warn("covariance is not positive-semidefinite.",
+ RuntimeWarning)
+ else:
+ raise ValueError("covariance is not positive-semidefinite.")
+
+ x = np.dot(x, np.sqrt(s)[:, None] * v)
+ x += mean
+ x.shape = tuple(final_shape)
+ return x
+
+ def multinomial(self, object n, object pvals, size=None):
+ """
+ multinomial(n, pvals, size=None)
+
+ Draw samples from a multinomial distribution.
+
+ The multinomial distribution is a multivariate generalization of the
+ binomial distribution. Take an experiment with one of ``p``
+ possible outcomes. An example of such an experiment is throwing a dice,
+ where the outcome can be 1 through 6. Each sample drawn from the
+ distribution represents `n` such experiments. Its values,
+ ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
+ outcome was ``i``.
+
+ Parameters
+ ----------
+ n : int or array-like of ints
+ Number of experiments.
+ pvals : sequence of floats, length p
+ Probabilities of each of the ``p`` different outcomes. These
+ must sum to 1 (however, the last element is always assumed to
+ account for the remaining probability, as long as
+ ``sum(pvals[:-1]) <= 1)``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
+
+ Examples
+ --------
+ Throw a dice 20 times:
+
+ >>> rng = np.random.default_rng()
+ >>> rng.multinomial(20, [1/6.]*6, size=1)
+ array([[4, 1, 7, 5, 2, 1]]) # random
+
+ It landed 4 times on 1, once on 2, etc.
+
+ Now, throw the dice 20 times, and 20 times again:
+
+ >>> rng.multinomial(20, [1/6.]*6, size=2)
+ array([[3, 4, 3, 3, 4, 3],
+ [2, 4, 3, 4, 0, 7]]) # random
+
+ For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
+ we threw 2 times 1, 4 times 2, etc.
+
+ Now, do one experiment throwing the dice 10 time, and 10 times again,
+ and another throwing the dice 20 times, and 20 times again:
+
+ >>> rng.multinomial([[10], [20]], [1/6.]*6, size=2)
+ array([[[2, 4, 0, 1, 2, 1],
+ [1, 3, 0, 3, 1, 2]],
+ [[1, 4, 4, 4, 4, 3],
+ [3, 3, 2, 5, 5, 2]]]) # random
+
+ The first array shows the outcomes of throwing the dice 10 times, and
+ the second shows the outcomes from throwing the dice 20 times.
+
+ A loaded die is more likely to land on number 6:
+
+ >>> rng.multinomial(100, [1/7.]*5 + [2/7.])
+ array([11, 16, 14, 17, 16, 26]) # random
+
+ The probability inputs should be normalized. As an implementation
+ detail, the value of the last entry is ignored and assumed to take
+ up any leftover probability mass, but this should not be relied on.
+ A biased coin which has twice as much weight on one side as on the
+ other should be sampled like so:
+
+ >>> rng.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
+ array([38, 62]) # random
+
+ not like:
+
+ >>> rng.multinomial(100, [1.0, 2.0]) # WRONG
+ Traceback (most recent call last):
+ ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
+
+ """
+
+ cdef np.npy_intp d, i, sz, offset
+ cdef np.ndarray parr, mnarr, on, temp_arr
+ cdef double *pix
+ cdef int64_t *mnix
+ cdef int64_t ni
+ cdef np.broadcast it
+
+ d = len(pvals)
+ on = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
+ parr = <np.ndarray>np.PyArray_FROM_OTF(
+ pvals, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pix = <double*>np.PyArray_DATA(parr)
+ check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
+ if kahan_sum(pix, d-1) > (1.0 + 1e-12):
+ raise ValueError("sum(pvals[:-1]) > 1.0")
+
+ if np.PyArray_NDIM(on) != 0: # vector
+ check_array_constraint(on, 'n', CONS_NON_NEGATIVE)
+ if size is None:
+ it = np.PyArray_MultiIterNew1(on)
+ else:
+ temp = np.empty(size, dtype=np.int8)
+ temp_arr = <np.ndarray>temp
+ it = np.PyArray_MultiIterNew2(on, temp_arr)
+ shape = it.shape + (d,)
+ multin = np.zeros(shape, dtype=np.int64)
+ mnarr = <np.ndarray>multin
+ mnix = <int64_t*>np.PyArray_DATA(mnarr)
+ offset = 0
+ sz = it.size
+ with self.lock, nogil:
+ for i in range(sz):
+ ni = (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
+ random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
+ offset += d
+ np.PyArray_MultiIter_NEXT(it)
+ return multin
+
+ if size is None:
+ shape = (d,)
+ else:
+ try:
+ shape = (operator.index(size), d)
+ except:
+ shape = tuple(size) + (d,)
+
+ multin = np.zeros(shape, dtype=np.int64)
+ mnarr = <np.ndarray>multin
+ mnix = <int64_t*>np.PyArray_DATA(mnarr)
+ sz = np.PyArray_SIZE(mnarr)
+ ni = n
+ check_constraint(ni, 'n', CONS_NON_NEGATIVE)
+ offset = 0
+ with self.lock, nogil:
+ for i in range(sz // d):
+ random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
+ offset += d
+
+ return multin
+
+ def dirichlet(self, object alpha, size=None):
+ """
+ dirichlet(alpha, size=None)
+
+ Draw samples from the Dirichlet distribution.
+
+ Draw `size` samples of dimension k from a Dirichlet distribution. A
+ Dirichlet-distributed random variable can be seen as a multivariate
+ generalization of a Beta distribution. The Dirichlet distribution
+ is a conjugate prior of a multinomial distribution in Bayesian
+ inference.
+
+ Parameters
+ ----------
+ alpha : array
+ Parameter of the distribution (k dimension for sample of
+ dimension k).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ samples : ndarray,
+ The drawn samples, of shape (size, alpha.ndim).
+
+ Raises
+ -------
+ ValueError
+ If any value in alpha is less than or equal to zero
+
+ Notes
+ -----
+ The Dirichlet distribution is a distribution over vectors
+ :math:`x` that fulfil the conditions :math:`x_i>0` and
+ :math:`\\sum_{i=1}^k x_i = 1`.
+
+ The probability density function :math:`p` of a
+ Dirichlet-distributed random vector :math:`X` is
+ proportional to
+
+ .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i},
+
+ where :math:`\\alpha` is a vector containing the positive
+ concentration parameters.
+
+ The method uses the following property for computation: let :math:`Y`
+ be a random vector which has components that follow a standard gamma
+ distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y`
+ is Dirichlet-distributed
+
+ References
+ ----------
+ .. [1] David McKay, "Information Theory, Inference and Learning
+ Algorithms," chapter 23,
+ http://www.inference.org.uk/mackay/itila/
+ .. [2] Wikipedia, "Dirichlet distribution",
+ https://en.wikipedia.org/wiki/Dirichlet_distribution
+
+ Examples
+ --------
+ Taking an example cited in Wikipedia, this distribution can be used if
+ one wanted to cut strings (each of initial length 1.0) into K pieces
+ with different lengths, where each piece had, on average, a designated
+ average length, but allowing some variation in the relative sizes of
+ the pieces.
+
+ >>> s = np.random.default_rng().dirichlet((10, 5, 3), 20).transpose()
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.barh(range(20), s[0])
+ >>> plt.barh(range(20), s[1], left=s[0], color='g')
+ >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
+ >>> plt.title("Lengths of Strings")
+
+ """
+
+ # =================
+ # Pure python algo
+ # =================
+ # alpha = N.atleast_1d(alpha)
+ # k = alpha.size
+
+ # if n == 1:
+ # val = N.zeros(k)
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val)
+ # else:
+ # val = N.zeros((k, n))
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val, axis = 0)
+ # val = val.T
+ # return val
+
+ cdef np.npy_intp k, totsize, i, j
+ cdef np.ndarray alpha_arr, val_arr
+ cdef double *alpha_data
+ cdef double *val_data
+ cdef double acc, invacc
+
+ k = len(alpha)
+ alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(
+ alpha, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ if np.any(np.less_equal(alpha_arr, 0)):
+ raise ValueError('alpha <= 0')
+ alpha_data = <double*>np.PyArray_DATA(alpha_arr)
+
+ if size is None:
+ shape = (k,)
+ else:
+ try:
+ shape = (operator.index(size), k)
+ except:
+ shape = tuple(size) + (k,)
+
+ diric = np.zeros(shape, np.float64)
+ val_arr = <np.ndarray>diric
+ val_data= <double*>np.PyArray_DATA(val_arr)
+
+ i = 0
+ totsize = np.PyArray_SIZE(val_arr)
+ with self.lock, nogil:
+ while i < totsize:
+ acc = 0.0
+ for j in range(k):
+ val_data[i+j] = random_standard_gamma_zig(&self._bitgen,
+ alpha_data[j])
+ acc = acc + val_data[i + j]
+ invacc = 1/acc
+ for j in range(k):
+ val_data[i + j] = val_data[i + j] * invacc
+ i = i + k
+
+ return diric
+
+ # Shuffling and permutations:
+ def shuffle(self, object x, axis=0):
+ """
+ shuffle(x, axis=0)
+
+ Modify a sequence in-place by shuffling its contents.
+
+ The order of sub-arrays is changed but their contents remains the same.
+
+ Parameters
+ ----------
+ x : array_like
+ The array or list to be shuffled.
+ axis : int, optional
+ The axis which `x` is shuffled along. Default is 0.
+ It is only supported on `ndarray` objects.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> arr = np.arange(10)
+ >>> rng.shuffle(arr)
+ >>> arr
+ [1 7 5 2 9 4 3 6 0 8] # random
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.shuffle(arr)
+ >>> arr
+ array([[3, 4, 5], # random
+ [6, 7, 8],
+ [0, 1, 2]])
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.shuffle(arr, axis=1)
+ >>> arr
+ array([[2, 0, 1], # random
+ [5, 3, 4],
+ [8, 6, 7]])
+ """
+ cdef:
+ np.npy_intp i, j, n = len(x), stride, itemsize
+ char* x_ptr
+ char* buf_ptr
+
+ axis = normalize_axis_index(axis, np.ndim(x))
+
+ if type(x) is np.ndarray and x.ndim == 1 and x.size:
+ # Fast, statically typed path: shuffle the underlying buffer.
+ # Only for non-empty, 1d objects of class ndarray (subclasses such
+ # as MaskedArrays may not support this approach).
+ x_ptr = <char*><size_t>np.PyArray_DATA(x)
+ stride = x.strides[0]
+ itemsize = x.dtype.itemsize
+ # As the array x could contain python objects we use a buffer
+ # of bytes for the swaps to avoid leaving one of the objects
+ # within the buffer and erroneously decrementing it's refcount
+ # when the function exits.
+ buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
+ buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
+ with self.lock:
+ # We trick gcc into providing a specialized implementation for
+ # the most common case, yielding a ~33% performance improvement.
+ # Note that apparently, only one branch can ever be specialized.
+ if itemsize == sizeof(np.npy_intp):
+ self._shuffle_raw(n, 1, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
+ else:
+ self._shuffle_raw(n, 1, itemsize, stride, x_ptr, buf_ptr)
+ elif isinstance(x, np.ndarray) and x.ndim and x.size:
+ x = np.swapaxes(x, 0, axis)
+ buf = np.empty_like(x[0, ...])
+ with self.lock:
+ for i in reversed(range(1, len(x))):
+ j = random_interval(&self._bitgen, i)
+ if i == j:
+ # i == j is not needed and memcpy is undefined.
+ continue
+ buf[...] = x[j]
+ x[j] = x[i]
+ x[i] = buf
+ else:
+ # Untyped path.
+ if axis != 0:
+ raise NotImplementedError("Axis argument is only supported "
+ "on ndarray objects")
+ with self.lock:
+ for i in reversed(range(1, n)):
+ j = random_interval(&self._bitgen, i)
+ x[i], x[j] = x[j], x[i]
+
+ cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp first,
+ np.npy_intp itemsize, np.npy_intp stride,
+ char* data, char* buf):
+ """
+ Parameters
+ ----------
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ itemsize
+ Size in bytes of item
+ stride
+ Array stride
+ data
+ Location of data
+ buf
+ Location of buffer (itemsize)
+ """
+ cdef np.npy_intp i, j
+ for i in reversed(range(first, n)):
+ j = random_interval(&self._bitgen, i)
+ string.memcpy(buf, data + j * stride, itemsize)
+ string.memcpy(data + j * stride, data + i * stride, itemsize)
+ string.memcpy(data + i * stride, buf, itemsize)
+
+ cdef inline void _shuffle_int(self, np.npy_intp n, np.npy_intp first,
+ int64_t* data) nogil:
+ """
+ Parameters
+ ----------
+ n
+ Number of elements in data
+ first
+ First observation to shuffle. Shuffles n-1,
+ n-2, ..., first, so that when first=1 the entire
+ array is shuffled
+ data
+ Location of data
+ """
+ cdef np.npy_intp i, j
+ cdef int64_t temp
+ for i in reversed(range(first, n)):
+ j = random_bounded_uint64(&self._bitgen, 0, i, 0, 0)
+ temp = data[j]
+ data[j] = data[i]
+ data[i] = temp
+
+ def permutation(self, object x, axis=0):
+ """
+ permutation(x, axis=0)
+
+ Randomly permute a sequence, or return a permuted range.
+
+ Parameters
+ ----------
+ x : int or array_like
+ If `x` is an integer, randomly permute ``np.arange(x)``.
+ If `x` is an array, make a copy and shuffle the elements
+ randomly.
+ axis : int, optional
+ The axis which `x` is shuffled along. Default is 0.
+
+ Returns
+ -------
+ out : ndarray
+ Permuted sequence or array range.
+
+ Examples
+ --------
+ >>> rng = np.random.default_rng()
+ >>> rng.permutation(10)
+ array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
+
+ >>> rng.permutation([1, 4, 9, 12, 15])
+ array([15, 1, 9, 4, 12]) # random
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.permutation(arr)
+ array([[6, 7, 8], # random
+ [0, 1, 2],
+ [3, 4, 5]])
+
+ >>> rng.permutation("abc")
+ Traceback (most recent call last):
+ ...
+ numpy.AxisError: x must be an integer or at least 1-dimensional
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> rng.permutation(arr, axis=1)
+ array([[0, 2, 1], # random
+ [3, 5, 4],
+ [6, 8, 7]])
+
+ """
+ if isinstance(x, (int, np.integer)):
+ arr = np.arange(x)
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ axis = normalize_axis_index(axis, arr.ndim)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # Return a copy if same memory
+ if np.may_share_memory(arr, x):
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[axis], dtype=np.intp)
+ self.shuffle(idx)
+ slices = [slice(None)]*arr.ndim
+ slices[axis] = idx
+ return arr[tuple(slices)]
+
+
+def default_rng(seed=None):
+ """Construct a new Generator with the default BitGenerator (PCG64).
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], ISeedSequence, BitGenerator, Generator}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in an implementor of the `ISeedSequence` interface like
+ `SeedSequence`.
+ Additionally, when passed a `BitGenerator`, it will be wrapped by
+ `Generator`. If passed a `Generator`, it will be returned unaltered.
+
+ Notes
+ -----
+ When `seed` is omitted or ``None``, a new `BitGenerator` and `Generator` will
+ be instantiated each time. This function does not manage a default global
+ instance.
+ """
+ if _check_bit_generator(seed):
+ # We were passed a BitGenerator, so just wrap it up.
+ return Generator(seed)
+ elif isinstance(seed, Generator):
+ # Pass through a Generator.
+ return seed
+ # Otherwise we need to instantiate a new BitGenerator and Generator as
+ # normal.
+ return Generator(PCG64(seed))
diff --git a/numpy/random/info.py b/numpy/random/info.py
deleted file mode 100644
index b9fd7f26a..000000000
--- a/numpy/random/info.py
+++ /dev/null
@@ -1,5 +0,0 @@
-from __future__ import division, absolute_import, print_function
-
-from .. import __doc__
-
-depends = ['core']
diff --git a/numpy/random/legacy_distributions.pxd b/numpy/random/legacy_distributions.pxd
new file mode 100644
index 000000000..c681388db
--- /dev/null
+++ b/numpy/random/legacy_distributions.pxd
@@ -0,0 +1,50 @@
+#cython: language_level=3
+
+from libc.stdint cimport int64_t
+
+import numpy as np
+cimport numpy as np
+
+from .distributions cimport bitgen_t, binomial_t
+
+cdef extern from "legacy-distributions.h":
+
+ struct aug_bitgen:
+ bitgen_t *bit_generator
+ int has_gauss
+ double gauss
+
+ ctypedef aug_bitgen aug_bitgen_t
+
+ double legacy_gauss(aug_bitgen_t *aug_state) nogil
+ double legacy_pareto(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_weibull(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) nogil
+ double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) nogil
+ double legacy_standard_t(aug_bitgen_t *aug_state, double df) nogil
+
+ double legacy_standard_exponential(aug_bitgen_t *aug_state) nogil
+ double legacy_power(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) nogil
+ double legacy_power(aug_bitgen_t *aug_state, double a) nogil
+ double legacy_chisquare(aug_bitgen_t *aug_state, double df) nogil
+ double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
+ double nonc) nogil
+ double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden,
+ double nonc) nogil
+ double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) nogil
+ double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) nogil
+ int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial) nogil
+ int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) nogil
+ int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good, int64_t bad, int64_t sample) nogil
+ int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) nogil
+ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) nogil
+ int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) nogil
+ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) nogil
+ void legacy_random_multinomial(bitgen_t *bitgen_state, long n, long *mnix, double *pix, np.npy_intp d, binomial_t *binomial) nogil
+ double legacy_standard_cauchy(aug_bitgen_t *state) nogil
+ double legacy_beta(aug_bitgen_t *aug_state, double a, double b) nogil
+ double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) nogil
+ double legacy_exponential(aug_bitgen_t *aug_state, double scale) nogil
+ double legacy_power(aug_bitgen_t *state, double a) nogil
diff --git a/numpy/random/mt19937.pyx b/numpy/random/mt19937.pyx
new file mode 100644
index 000000000..7d0f6cd22
--- /dev/null
+++ b/numpy/random/mt19937.pyx
@@ -0,0 +1,274 @@
+import operator
+
+import numpy as np
+cimport numpy as np
+
+from .common cimport *
+from .bit_generator cimport BitGenerator, SeedSequence
+
+__all__ = ['MT19937']
+
+np.import_array()
+
+cdef extern from "src/mt19937/mt19937.h":
+
+ struct s_mt19937_state:
+ uint32_t key[624]
+ int pos
+
+ ctypedef s_mt19937_state mt19937_state
+
+ uint64_t mt19937_next64(mt19937_state *state) nogil
+ uint32_t mt19937_next32(mt19937_state *state) nogil
+ double mt19937_next_double(mt19937_state *state) nogil
+ void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key, int key_length)
+ void mt19937_seed(mt19937_state *state, uint32_t seed)
+ void mt19937_jump(mt19937_state *state)
+
+ enum:
+ RK_STATE_LEN
+
+cdef uint64_t mt19937_uint64(void *st) nogil:
+ return mt19937_next64(<mt19937_state *> st)
+
+cdef uint32_t mt19937_uint32(void *st) nogil:
+ return mt19937_next32(<mt19937_state *> st)
+
+cdef double mt19937_double(void *st) nogil:
+ return mt19937_next_double(<mt19937_state *> st)
+
+cdef uint64_t mt19937_raw(void *st) nogil:
+ return <uint64_t>mt19937_next32(<mt19937_state *> st)
+
+cdef class MT19937(BitGenerator):
+ """
+ MT19937(seed=None)
+
+ Container for the Mersenne Twister pseudo-random number generator.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], ISeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in an implementor of the `ISeedSequence` interface like
+ `SeedSequence`.
+
+ Attributes
+ ----------
+ lock: threading.Lock
+ Lock instance that is shared so that the same bit git generator can
+ be used in multiple Generators without corrupting the state. Code that
+ generates values from a bit generator should hold the bit generator's
+ lock.
+
+ Notes
+ -----
+ ``MT19937`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers [1]_. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ The Python stdlib module "random" also contains a Mersenne Twister
+ pseudo-random number generator.
+
+ **State and Seeding**
+
+ The ``MT19937`` state vector consists of a 624-element array of
+ 32-bit unsigned integers plus a single integer value between 0 and 624
+ that indexes the current position within the main array.
+
+ The input seed is processed by `SeedSequence` to fill the whole state. The
+ first element is reset such that only its most significant bit is set.
+
+ **Parallel Features**
+
+ The preferred way to use a BitGenerator in parallel applications is to use
+ the `SeedSequence.spawn` method to obtain entropy values, and to use these
+ to generate new BitGenerators:
+
+ >>> from numpy.random import Generator, MT19937, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> rg = [Generator(MT19937(s)) for s in sg.spawn(10)]
+
+ Another method is to use `MT19937.jumped` which advances the state as-if
+ :math:`2^{128}` random numbers have been generated ([1]_, [2]_). This
+ allows the original sequence to be split so that distinct segments can be
+ used in each worker process. All generators should be chained to ensure
+ that the segments come from the same sequence.
+
+ >>> from numpy.random import Generator, MT19937, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> bit_generator = MT19937(sg)
+ >>> rg = []
+ >>> for _ in range(10):
+ ... rg.append(Generator(bit_generator))
+ ... # Chain the BitGenerators
+ ... bit_generator = bit_generator.jumped()
+
+ **Compatibility Guarantee**
+
+ ``MT19937`` makes a guarantee that a fixed seed and will always produce
+ the same random integer stream.
+
+ References
+ ----------
+ .. [1] Hiroshi Haramoto, Makoto Matsumoto, and Pierre L\'Ecuyer, "A Fast
+ Jump Ahead Algorithm for Linear Recurrences in a Polynomial Space",
+ Sequences and Their Applications - SETA, 290--298, 2008.
+ .. [2] Hiroshi Haramoto, Makoto Matsumoto, Takuji Nishimura, François
+ Panneton, Pierre L\'Ecuyer, "Efficient Jump Ahead for F2-Linear
+ Random Number Generators", INFORMS JOURNAL ON COMPUTING, Vol. 20,
+ No. 3, Summer 2008, pp. 385-390.
+
+ """
+ cdef mt19937_state rng_state
+
+ def __init__(self, seed=None):
+ BitGenerator.__init__(self, seed)
+ val = self._seed_seq.generate_state(RK_STATE_LEN, np.uint32)
+ # MSB is 1; assuring non-zero initial array
+ self.rng_state.key[0] = 0x80000000UL
+ for i in range(1, RK_STATE_LEN):
+ self.rng_state.key[i] = val[i]
+ self.rng_state.pos = i
+
+ self._bitgen.state = &self.rng_state
+ self._bitgen.next_uint64 = &mt19937_uint64
+ self._bitgen.next_uint32 = &mt19937_uint32
+ self._bitgen.next_double = &mt19937_double
+ self._bitgen.next_raw = &mt19937_raw
+
+ def _legacy_seeding(self, seed):
+ """
+ _legacy_seeding(seed)
+
+ Seed the generator in a backward compatible way. For modern
+ applications, creating a new instance is preferable. Calling this
+ overrides self._seed_seq
+
+ Parameters
+ ----------
+ seed : {None, int, array_like}
+ Random seed initializing the pseudo-random number generator.
+ Can be an integer in [0, 2**32-1], array of integers in
+ [0, 2**32-1], a `SeedSequence, or ``None``. If `seed`
+ is ``None``, then fresh, unpredictable entropy will be pulled from
+ the OS.
+
+ Raises
+ ------
+ ValueError
+ If seed values are out of range for the PRNG.
+ """
+ cdef np.ndarray obj
+ with self.lock:
+ try:
+ if seed is None:
+ seed = SeedSequence()
+ val = seed.generate_state(RK_STATE_LEN)
+ # MSB is 1; assuring non-zero initial array
+ self.rng_state.key[0] = 0x80000000UL
+ for i in range(1, RK_STATE_LEN):
+ self.rng_state.key[i] = val[i]
+ else:
+ if hasattr(seed, 'squeeze'):
+ seed = seed.squeeze()
+ idx = operator.index(seed)
+ if idx > int(2**32 - 1) or idx < 0:
+ raise ValueError("Seed must be between 0 and 2**32 - 1")
+ mt19937_seed(&self.rng_state, seed)
+ except TypeError:
+ obj = np.asarray(seed)
+ if obj.size == 0:
+ raise ValueError("Seed must be non-empty")
+ obj = obj.astype(np.int64, casting='safe')
+ if obj.ndim != 1:
+ raise ValueError("Seed array must be 1-d")
+ if ((obj > int(2**32 - 1)) | (obj < 0)).any():
+ raise ValueError("Seed must be between 0 and 2**32 - 1")
+ obj = obj.astype(np.uint32, casting='unsafe', order='C')
+ mt19937_init_by_array(&self.rng_state, <uint32_t*> obj.data, np.PyArray_DIM(obj, 0))
+ self._seed_seq = None
+
+ cdef jump_inplace(self, iter):
+ """
+ Jump state in-place
+
+ Not part of public API
+
+ Parameters
+ ----------
+ iter : integer, positive
+ Number of times to jump the state of the rng.
+ """
+ cdef np.npy_intp i
+ for i in range(iter):
+ mt19937_jump(&self.rng_state)
+
+
+ def jumped(self, np.npy_intp jumps=1):
+ """
+ jumped(jumps=1)
+
+ Returns a new bit generator with the state jumped
+
+ The state of the returned big generator is jumped as-if
+ 2**(128 * jumps) random numbers have been generated.
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the bit generator returned
+
+ Returns
+ -------
+ bit_generator : MT19937
+ New instance of generator jumped iter times
+ """
+ cdef MT19937 bit_generator
+
+ bit_generator = self.__class__()
+ bit_generator.state = self.state
+ bit_generator.jump_inplace(jumps)
+
+ return bit_generator
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ key = np.zeros(624, dtype=np.uint32)
+ for i in range(624):
+ key[i] = self.rng_state.key[i]
+
+ return {'bit_generator': self.__class__.__name__,
+ 'state': {'key': key, 'pos': self.rng_state.pos}}
+
+ @state.setter
+ def state(self, value):
+ if isinstance(value, tuple):
+ if value[0] != 'MT19937' or len(value) not in (3, 5):
+ raise ValueError('state is not a legacy MT19937 state')
+ value ={'bit_generator': 'MT19937',
+ 'state': {'key': value[1], 'pos': value[2]}}
+
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'PRNG'.format(self.__class__.__name__))
+ key = value['state']['key']
+ for i in range(624):
+ self.rng_state.key[i] = key[i]
+ self.rng_state.pos = value['state']['pos']
diff --git a/numpy/random/mtrand/mtrand.pyx b/numpy/random/mtrand.pyx
index ab5f64336..c469a4645 100644
--- a/numpy/random/mtrand/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -1,627 +1,76 @@
-# mtrand.pyx -- A Pyrex wrapper of Jean-Sebastien Roy's RandomKit
-#
-# Copyright 2005 Robert Kern (robert.kern@gmail.com)
-#
-# Permission is hereby granted, free of charge, to any person obtaining a
-# copy of this software and associated documentation files (the
-# "Software"), to deal in the Software without restriction, including
-# without limitation the rights to use, copy, modify, merge, publish,
-# distribute, sublicense, and/or sell copies of the Software, and to
-# permit persons to whom the Software is furnished to do so, subject to
-# the following conditions:
-#
-# The above copyright notice and this permission notice shall be included
-# in all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
-# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
-# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
-# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
-# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
-include "Python.pxi"
-include "numpy.pxd"
-include "randint_helpers.pxi"
-include "cpython/pycapsule.pxd"
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+import operator
+import warnings
-from libc cimport string
+import numpy as np
-cdef extern from "math.h":
- double exp(double x)
- double log(double x)
- double floor(double x)
- double sin(double x)
- double cos(double x)
-
-cdef extern from "numpy/npy_math.h":
- int npy_isfinite(double x)
-
-cdef extern from "mtrand_py_helper.h":
- object empty_py_bytes(npy_intp length, void **bytes)
-
-cdef extern from "randomkit.h":
-
- ctypedef struct rk_state:
- unsigned long key[624]
- int pos
- int has_gauss
- double gauss
-
- ctypedef enum rk_error:
- RK_NOERR = 0
- RK_ENODEV = 1
- RK_ERR_MAX = 2
-
- char *rk_strerror[2]
-
- # 0xFFFFFFFFUL
- unsigned long RK_MAX
-
- void rk_seed(unsigned long seed, rk_state *state)
- rk_error rk_randomseed(rk_state *state)
- unsigned long rk_random(rk_state *state)
- long rk_long(rk_state *state) nogil
- unsigned long rk_ulong(rk_state *state) nogil
- unsigned long rk_interval(unsigned long max, rk_state *state) nogil
- double rk_double(rk_state *state) nogil
- void rk_fill(void *buffer, size_t size, rk_state *state) nogil
- rk_error rk_devfill(void *buffer, size_t size, int strong)
- rk_error rk_altfill(void *buffer, size_t size, int strong,
- rk_state *state) nogil
- double rk_gauss(rk_state *state) nogil
- void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt,
- npy_uint64 *out, rk_state *state) nogil
- void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt,
- npy_uint32 *out, rk_state *state) nogil
- void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt,
- npy_uint16 *out, rk_state *state) nogil
- void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt,
- npy_uint8 *out, rk_state *state) nogil
- void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt,
- npy_bool *out, rk_state *state) nogil
-
-
-cdef extern from "distributions.h":
- # do not need the GIL, but they do need a lock on the state !! */
-
- double rk_normal(rk_state *state, double loc, double scale) nogil
- double rk_standard_exponential(rk_state *state) nogil
- double rk_exponential(rk_state *state, double scale) nogil
- double rk_uniform(rk_state *state, double loc, double scale) nogil
- double rk_standard_gamma(rk_state *state, double shape) nogil
- double rk_gamma(rk_state *state, double shape, double scale) nogil
- double rk_beta(rk_state *state, double a, double b) nogil
- double rk_chisquare(rk_state *state, double df) nogil
- double rk_noncentral_chisquare(rk_state *state, double df, double nonc) nogil
- double rk_f(rk_state *state, double dfnum, double dfden) nogil
- double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc) nogil
- double rk_standard_cauchy(rk_state *state) nogil
- double rk_standard_t(rk_state *state, double df) nogil
- double rk_vonmises(rk_state *state, double mu, double kappa) nogil
- double rk_pareto(rk_state *state, double a) nogil
- double rk_weibull(rk_state *state, double a) nogil
- double rk_power(rk_state *state, double a) nogil
- double rk_laplace(rk_state *state, double loc, double scale) nogil
- double rk_gumbel(rk_state *state, double loc, double scale) nogil
- double rk_logistic(rk_state *state, double loc, double scale) nogil
- double rk_lognormal(rk_state *state, double mode, double sigma) nogil
- double rk_rayleigh(rk_state *state, double mode) nogil
- double rk_wald(rk_state *state, double mean, double scale) nogil
- double rk_triangular(rk_state *state, double left, double mode, double right) nogil
-
- long rk_binomial(rk_state *state, long n, double p) nogil
- long rk_binomial_btpe(rk_state *state, long n, double p) nogil
- long rk_binomial_inversion(rk_state *state, long n, double p) nogil
- long rk_negative_binomial(rk_state *state, double n, double p) nogil
- long rk_poisson(rk_state *state, double lam) nogil
- long rk_poisson_mult(rk_state *state, double lam) nogil
- long rk_poisson_ptrs(rk_state *state, double lam) nogil
- long rk_zipf(rk_state *state, double a) nogil
- long rk_geometric(rk_state *state, double p) nogil
- long rk_hypergeometric(rk_state *state, long good, long bad, long sample) nogil
- long rk_logseries(rk_state *state, double p) nogil
-
-ctypedef double (* rk_cont0)(rk_state *state) nogil
-ctypedef double (* rk_cont1)(rk_state *state, double a) nogil
-ctypedef double (* rk_cont2)(rk_state *state, double a, double b) nogil
-ctypedef double (* rk_cont3)(rk_state *state, double a, double b, double c) nogil
-
-ctypedef long (* rk_disc0)(rk_state *state) nogil
-ctypedef long (* rk_discnp)(rk_state *state, long n, double p) nogil
-ctypedef long (* rk_discdd)(rk_state *state, double n, double p) nogil
-ctypedef long (* rk_discnmN)(rk_state *state, long n, long m, long N) nogil
-ctypedef long (* rk_discd)(rk_state *state, double a) nogil
-
-
-cdef extern from "initarray.h":
- void init_by_array(rk_state *self, unsigned long *init_key,
- npy_intp key_length)
-
-# Initialize numpy
-import_array()
+from .bounded_integers import _integers_types
+from .mt19937 import MT19937 as _MT19937
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from cpython cimport (Py_INCREF, PyFloat_AsDouble)
+from libc cimport string
cimport cython
-import numpy as np
-import operator
-import warnings
+cimport numpy as np
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
-
-cdef object cont0_array(rk_state *state, rk_cont0 func, object size,
- object lock):
- cdef double *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state)
- return rv
- else:
- array = <ndarray>np.empty(size, np.float64)
- length = PyArray_SIZE(array)
- array_data = <double *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state)
- return array
-
-
-cdef object cont1_array_sc(rk_state *state, rk_cont1 func, object size, double a,
- object lock):
- cdef double *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, a)
- return rv
- else:
- array = <ndarray>np.empty(size, np.float64)
- length = PyArray_SIZE(array)
- array_data = <double *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, a)
- return array
-
-cdef object cont1_array(rk_state *state, rk_cont1 func, object size,
- ndarray oa, object lock):
- cdef double *array_data
- cdef double *oa_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
- cdef flatiter itera
- cdef broadcast multi
-
- if size is None:
- array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa),
- PyArray_DIMS(oa) , NPY_DOUBLE)
- length = PyArray_SIZE(array)
- array_data = <double *>PyArray_DATA(array)
- itera = <flatiter>PyArray_IterNew(<object>oa)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, (<double *>PyArray_ITER_DATA(itera))[0])
- PyArray_ITER_NEXT(itera)
- else:
- array = <ndarray>np.empty(size, np.float64)
- array_data = <double *>PyArray_DATA(array)
- multi = <broadcast>PyArray_MultiIterNew(2, <void *>array,
- <void *>oa)
- if (multi.size != PyArray_SIZE(array)):
- raise ValueError("size is not compatible with inputs")
- with lock, nogil:
- for i from 0 <= i < multi.size:
- oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
- array_data[i] = func(state, oa_data[0])
- PyArray_MultiIter_NEXTi(multi, 1)
- return array
-
-cdef object cont2_array_sc(rk_state *state, rk_cont2 func, object size, double a,
- double b, object lock):
- cdef double *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, a, b)
- return rv
- else:
- array = <ndarray>np.empty(size, np.float64)
- length = PyArray_SIZE(array)
- array_data = <double *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, a, b)
- return array
-
-
-cdef object cont2_array(rk_state *state, rk_cont2 func, object size,
- ndarray oa, ndarray ob, object lock):
- cdef double *array_data
- cdef double *oa_data
- cdef double *ob_data
- cdef ndarray array "arrayObject"
- cdef npy_intp i
- cdef broadcast multi
-
- if size is None:
- multi = <broadcast>np.broadcast(oa, ob)
- array = <ndarray>np.empty(multi.shape, dtype=np.float64)
- else:
- array = <ndarray>np.empty(size, dtype=np.float64)
- multi = <broadcast>np.broadcast(oa, ob, array)
- if multi.shape != array.shape:
- raise ValueError("size is not compatible with inputs")
-
- array_data = <double *>PyArray_DATA(array)
-
- with lock, nogil:
- for i in range(multi.size):
- oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
- ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
- array_data[i] = func(state, oa_data[0], ob_data[0])
- PyArray_MultiIter_NEXT(multi)
-
- return array
-
-cdef object cont3_array_sc(rk_state *state, rk_cont3 func, object size, double a,
- double b, double c, object lock):
-
- cdef double *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, a, b, c)
- return rv
- else:
- array = <ndarray>np.empty(size, np.float64)
- length = PyArray_SIZE(array)
- array_data = <double *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, a, b, c)
- return array
-
-cdef object cont3_array(rk_state *state, rk_cont3 func, object size,
- ndarray oa, ndarray ob, ndarray oc, object lock):
-
- cdef double *array_data
- cdef double *oa_data
- cdef double *ob_data
- cdef double *oc_data
- cdef ndarray array "arrayObject"
- cdef npy_intp i
- cdef broadcast multi
-
- if size is None:
- multi = <broadcast>np.broadcast(oa, ob, oc)
- array = <ndarray>np.empty(multi.shape, dtype=np.float64)
- else:
- array = <ndarray>np.empty(size, dtype=np.float64)
- multi = <broadcast>np.broadcast(oa, ob, oc, array)
- if multi.shape != array.shape:
- raise ValueError("size is not compatible with inputs")
-
- array_data = <double *>PyArray_DATA(array)
-
- with lock, nogil:
- for i in range(multi.size):
- oa_data = <double *>PyArray_MultiIter_DATA(multi, 0)
- ob_data = <double *>PyArray_MultiIter_DATA(multi, 1)
- oc_data = <double *>PyArray_MultiIter_DATA(multi, 2)
- array_data[i] = func(state, oa_data[0], ob_data[0], oc_data[0])
- PyArray_MultiIter_NEXT(multi)
-
- return array
-
-cdef object disc0_array(rk_state *state, rk_disc0 func, object size, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state)
- return rv
- else:
- array = <ndarray>np.empty(size, int)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state)
- return array
-
-cdef object discnp_array_sc(rk_state *state, rk_discnp func, object size,
- long n, double p, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, n, p)
- return rv
- else:
- array = <ndarray>np.empty(size, int)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, n, p)
- return array
-
-cdef object discnp_array(rk_state *state, rk_discnp func, object size,
- ndarray on, ndarray op, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp i
- cdef double *op_data
- cdef long *on_data
- cdef broadcast multi
-
- if size is None:
- multi = <broadcast>np.broadcast(on, op)
- array = <ndarray>np.empty(multi.shape, dtype=int)
- else:
- array = <ndarray>np.empty(size, dtype=int)
- multi = <broadcast>np.broadcast(on, op, array)
- if multi.shape != array.shape:
- raise ValueError("size is not compatible with inputs")
-
- array_data = <long *>PyArray_DATA(array)
-
- with lock, nogil:
- for i in range(multi.size):
- on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
- op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
- array_data[i] = func(state, on_data[0], op_data[0])
- PyArray_MultiIter_NEXT(multi)
-
- return array
-
-cdef object discdd_array_sc(rk_state *state, rk_discdd func, object size,
- double n, double p, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, n, p)
- return rv
- else:
- array = <ndarray>np.empty(size, int)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, n, p)
- return array
-
-cdef object discdd_array(rk_state *state, rk_discdd func, object size,
- ndarray on, ndarray op, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp i
- cdef double *op_data
- cdef double *on_data
- cdef broadcast multi
-
- if size is None:
- multi = <broadcast>np.broadcast(on, op)
- array = <ndarray>np.empty(multi.shape, dtype=int)
- else:
- array = <ndarray>np.empty(size, dtype=int)
- multi = <broadcast>np.broadcast(on, op, array)
- if multi.shape != array.shape:
- raise ValueError("size is not compatible with inputs")
-
- array_data = <long *>PyArray_DATA(array)
-
- with lock, nogil:
- for i in range(multi.size):
- on_data = <double *>PyArray_MultiIter_DATA(multi, 0)
- op_data = <double *>PyArray_MultiIter_DATA(multi, 1)
- array_data[i] = func(state, on_data[0], op_data[0])
- PyArray_MultiIter_NEXT(multi)
-
- return array
-
-cdef object discnmN_array_sc(rk_state *state, rk_discnmN func, object size,
- long n, long m, long N, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, n, m, N)
- return rv
- else:
- array = <ndarray>np.empty(size, int)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, n, m, N)
- return array
-
-cdef object discnmN_array(rk_state *state, rk_discnmN func, object size,
- ndarray on, ndarray om, ndarray oN, object lock):
- cdef long *array_data
- cdef long *on_data
- cdef long *om_data
- cdef long *oN_data
- cdef ndarray array "arrayObject"
- cdef npy_intp i
- cdef broadcast multi
-
- if size is None:
- multi = <broadcast>np.broadcast(on, om, oN)
- array = <ndarray>np.empty(multi.shape, dtype=int)
- else:
- array = <ndarray>np.empty(size, dtype=int)
- multi = <broadcast>np.broadcast(on, om, oN, array)
- if multi.shape != array.shape:
- raise ValueError("size is not compatible with inputs")
-
- array_data = <long *>PyArray_DATA(array)
-
- with lock, nogil:
- for i in range(multi.size):
- on_data = <long *>PyArray_MultiIter_DATA(multi, 0)
- om_data = <long *>PyArray_MultiIter_DATA(multi, 1)
- oN_data = <long *>PyArray_MultiIter_DATA(multi, 2)
- array_data[i] = func(state, on_data[0], om_data[0], oN_data[0])
- PyArray_MultiIter_NEXT(multi)
-
- return array
-
-cdef object discd_array_sc(rk_state *state, rk_discd func, object size,
- double a, object lock):
- cdef long *array_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
-
- if size is None:
- with lock, nogil:
- rv = func(state, a)
- return rv
- else:
- array = <ndarray>np.empty(size, int)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, a)
- return array
-
-cdef object discd_array(rk_state *state, rk_discd func, object size, ndarray oa,
- object lock):
- cdef long *array_data
- cdef double *oa_data
- cdef ndarray array "arrayObject"
- cdef npy_intp length
- cdef npy_intp i
- cdef broadcast multi
- cdef flatiter itera
-
- if size is None:
- array = <ndarray>PyArray_SimpleNew(PyArray_NDIM(oa),
- PyArray_DIMS(oa), NPY_LONG)
- length = PyArray_SIZE(array)
- array_data = <long *>PyArray_DATA(array)
- itera = <flatiter>PyArray_IterNew(<object>oa)
- with lock, nogil:
- for i from 0 <= i < length:
- array_data[i] = func(state, (<double *>PyArray_ITER_DATA(itera))[0])
- PyArray_ITER_NEXT(itera)
- else:
- array = <ndarray>np.empty(size, int)
- array_data = <long *>PyArray_DATA(array)
- multi = <broadcast>PyArray_MultiIterNew(2, <void *>array, <void *>oa)
- if (multi.size != PyArray_SIZE(array)):
- raise ValueError("size is not compatible with inputs")
- with lock, nogil:
- for i from 0 <= i < multi.size:
- oa_data = <double *>PyArray_MultiIter_DATA(multi, 1)
- array_data[i] = func(state, oa_data[0])
- PyArray_MultiIter_NEXTi(multi, 1)
- return array
-
-cdef double kahan_sum(double *darr, npy_intp n):
- cdef double c, y, t, sum
- cdef npy_intp i
- sum = darr[0]
- c = 0.0
- for i from 1 <= i < n:
- y = darr[i] - c
- t = sum + y
- c = (t-sum) - y
- sum = t
- return sum
-
-def _shape_from_size(size, d):
- if size is None:
- shape = (d,)
- else:
- try:
- shape = (operator.index(size), d)
- except TypeError:
- shape = tuple(size) + (d,)
- return shape
-
-# Look up table for randint functions keyed by dtype.
-# The stored data is a tuple (lbnd, ubnd, func), where lbnd is the smallest
-# value for the type, ubnd is one greater than the largest value, and func is the
-# function to call.
-_randint_type = {
- np.dtype(np.bool_): (0, 2, _rand_bool),
- np.dtype(np.int8): (-2**7, 2**7, _rand_int8),
- np.dtype(np.int16): (-2**15, 2**15, _rand_int16),
- np.dtype(np.int32): (-2**31, 2**31, _rand_int32),
- np.dtype(np.int64): (-2**63, 2**63, _rand_int64),
- np.dtype(np.uint8): (0, 2**8, _rand_uint8),
- np.dtype(np.uint16): (0, 2**16, _rand_uint16),
- np.dtype(np.uint32): (0, 2**32, _rand_uint32),
- np.dtype(np.uint64): (0, 2**64, _rand_uint64)
-}
+from .bounded_integers cimport *
+from .common cimport *
+from .distributions cimport *
+from .legacy_distributions cimport *
+
+np.import_array()
+
+cdef object int64_to_long(object x):
+ """
+ Convert int64 to long for legacy compatibility, which used long for integer
+ distributions
+ """
+ cdef int64_t x64
+
+ if np.isscalar(x):
+ x64 = x
+ return <long>x64
+ return x.astype('l', casting='unsafe')
cdef class RandomState:
"""
RandomState(seed=None)
- Container for the Mersenne Twister pseudo-random number generator.
-
- `RandomState` exposes a number of methods for generating random numbers
- drawn from a variety of probability distributions. In addition to the
- distribution-specific arguments, each method takes a keyword argument
- `size` that defaults to ``None``. If `size` is ``None``, then a single
- value is generated and returned. If `size` is an integer, then a 1-D
- array filled with generated values is returned. If `size` is a tuple,
- then an array with that shape is filled and returned.
-
- *Compatibility Guarantee*
- A fixed seed and a fixed series of calls to 'RandomState' methods using
- the same parameters will always produce the same results up to roundoff
- error except when the values were incorrect. Incorrect values will be
- fixed and the NumPy version in which the fix was made will be noted in
- the relevant docstring. Extension of existing parameter ranges and the
- addition of new parameters is allowed as long the previous behavior
- remains unchanged.
+ Container for the slow Mersenne Twister pseudo-random number generator.
+ Consider using a different BitGenerator with the Generator container
+ instead.
+
+ `RandomState` and `Generator` expose a number of methods for generating
+ random numbers drawn from a variety of probability distributions. In
+ addition to the distribution-specific arguments, each method takes a
+ keyword argument `size` that defaults to ``None``. If `size` is ``None``,
+ then a single value is generated and returned. If `size` is an integer,
+ then a 1-D array filled with generated values is returned. If `size` is a
+ tuple, then an array with that shape is filled and returned.
+
+ **Compatibility Guarantee**
+
+ A fixed bit generator using a fixed seed and a fixed series of calls to
+ 'RandomState' methods using the same parameters will always produce the
+ same results up to roundoff error except when the values were incorrect.
+ `RandomState` is effectively frozen and will only receive updates that
+ are required by changes in the the internals of Numpy. More substantial
+ changes, including algorithmic improvements, are reserved for
+ `Generator`.
Parameters
----------
- seed : {None, int, array_like}, optional
- Random seed used to initialize the pseudo-random number generator. Can
- be any integer between 0 and 2**32 - 1 inclusive, an array (or other
- sequence) of such integers, or ``None`` (the default). If `seed` is
- ``None``, then `RandomState` will try to read data from
- ``/dev/urandom`` (or the Windows analogue) if available or seed from
- the clock otherwise.
+ seed : {None, int, array_like, BitGenerator}, optional
+ Random seed used to initialize the pseudo-random number generator or
+ an instantized BitGenerator. If an integer or array, used as a seed for
+ the MT19937 BitGenerator. Values can be any integer between 0 and
+ 2**32 - 1 inclusive, an array (or other sequence) of such integers,
+ or ``None`` (the default). If `seed` is ``None``, then the `MT19937`
+ BitGenerator is initialized by reading data from ``/dev/urandom``
+ (or the Windows analogue) if available or seed from the clock
+ otherwise.
Notes
-----
@@ -631,70 +80,90 @@ cdef class RandomState:
NumPy-aware, has the advantage that it provides a much larger number
of probability distributions to choose from.
+ See Also
+ --------
+ Generator
+ MT19937
+ :ref:`bit_generator`
+
"""
- cdef rk_state *internal_state
+ cdef public object _bit_generator
+ cdef bitgen_t _bitgen
+ cdef aug_bitgen_t _aug_state
+ cdef binomial_t _binomial
cdef object lock
- cdef object state_address
- poisson_lam_max = np.iinfo('l').max - np.sqrt(np.iinfo('l').max)*10
+ _poisson_lam_max = POISSON_LAM_MAX
def __init__(self, seed=None):
- self.internal_state = <rk_state*>PyMem_Malloc(sizeof(rk_state))
- self.state_address = PyCapsule_New(self.internal_state, NULL, NULL)
- self.lock = Lock()
- self.seed(seed)
+ if seed is None:
+ bit_generator = _MT19937()
+ elif not hasattr(seed, 'capsule'):
+ bit_generator = _MT19937()
+ bit_generator._legacy_seeding(seed)
+ else:
+ bit_generator = seed
+
+ self._bit_generator = bit_generator
+ capsule = bit_generator.capsule
+ cdef const char *name = "BitGenerator"
+ if not PyCapsule_IsValid(capsule, name):
+ raise ValueError("Invalid bit generator. The bit generator must "
+ "be instantized.")
+ self._bitgen = (<bitgen_t *> PyCapsule_GetPointer(capsule, name))[0]
+ self._aug_state.bit_generator = &self._bitgen
+ self._reset_gauss()
+ self.lock = bit_generator.lock
+
+ def __repr__(self):
+ return self.__str__() + ' at 0x{:X}'.format(id(self))
+
+ def __str__(self):
+ _str = self.__class__.__name__
+ _str += '(' + self._bit_generator.__class__.__name__ + ')'
+ return _str
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.get_state(legacy=False)
+
+ def __setstate__(self, state):
+ self.set_state(state)
- def __dealloc__(self):
- if self.internal_state != NULL:
- PyMem_Free(self.internal_state)
- self.internal_state = NULL
+ def __reduce__(self):
+ state = self.get_state(legacy=False)
+ from ._pickle import __randomstate_ctor
+ return __randomstate_ctor, (state['bit_generator'],), state
+
+ cdef _reset_gauss(self):
+ self._aug_state.has_gauss = 0
+ self._aug_state.gauss = 0.0
def seed(self, seed=None):
"""
- seed(seed=None)
+ seed(self, seed=None)
- Seed the generator.
+ Reseed a legacy MT19937 BitGenerator
- This method is called when `RandomState` is initialized. It can be
- called again to re-seed the generator. For details, see `RandomState`.
-
- Parameters
- ----------
- seed : int or 1-d array_like, optional
- Seed for `RandomState`.
- Must be convertible to 32 bit unsigned integers.
+ Notes
+ -----
+ This is a convenience, legacy function.
- See Also
- --------
- RandomState
+ The best practice is to **not** reseed a BitGenerator, rather to
+ recreate a new one. This method is here for legacy reasons.
+ This example demonstrates best practice.
+ >>> from numpy.random import MT19937
+ >>> from numpy.random import RandomState, SeedSequence
+ >>> rs = RandomState(MT19937(SeedSequence(123456789)))
+ # Later, you want to restart the stream
+ >>> rs = RandomState(MT19937(SeedSequence(987654321)))
"""
- cdef rk_error errcode
- cdef ndarray obj "arrayObject_obj"
- try:
- if seed is None:
- with self.lock:
- errcode = rk_randomseed(self.internal_state)
- else:
- idx = operator.index(seed)
- if (idx >= 2**32) or (idx < 0):
- raise ValueError("Seed must be between 0 and 2**32 - 1")
- with self.lock:
- rk_seed(idx, self.internal_state)
- except TypeError:
- obj = np.asarray(seed)
- if obj.size == 0:
- raise ValueError("Seed must be non-empty")
- obj = obj.astype(np.int64, casting='safe')
- if obj.ndim != 1:
- raise ValueError("Seed array must be 1-d")
- if ((obj >= 2**32) | (obj < 0)).any():
- raise ValueError("Seed values must be between 0 and 2**32 - 1")
- obj = obj.astype('L', casting='unsafe')
- with self.lock:
- init_by_array(self.internal_state, <unsigned long *>PyArray_DATA(obj),
- PyArray_DIM(obj, 0))
+ if not isinstance(self._bit_generator, _MT19937):
+ raise TypeError('can only re-seed a MT19937 BitGenerator')
+ self._bit_generator._legacy_seeding(seed)
+ self._reset_gauss()
- def get_state(self):
+ def get_state(self, legacy=True):
"""
get_state()
@@ -704,7 +173,7 @@ cdef class RandomState:
Returns
-------
- out : tuple(str, ndarray of 624 uints, int, int, float)
+ out : {tuple(str, ndarray of 624 uints, int, int, float), dict}
The returned tuple has the following items:
1. the string 'MT19937'.
@@ -713,6 +182,13 @@ cdef class RandomState:
4. an integer ``has_gauss``.
5. a float ``cached_gaussian``.
+ If `legacy` is False, or the BitGenerator is not NT19937, then
+ state is returned as a dictionary.
+
+ legacy : bool
+ Flag indicating the return a legacy tuple state when the BitGenerator
+ is MT19937.
+
See Also
--------
set_state
@@ -724,15 +200,18 @@ cdef class RandomState:
the user should know exactly what he/she is doing.
"""
- cdef ndarray state "arrayObject_state"
- state = <ndarray>np.empty(624, np.uint)
- with self.lock:
- memcpy(<void*>PyArray_DATA(state), <void*>(self.internal_state.key), 624*sizeof(long))
- has_gauss = self.internal_state.has_gauss
- gauss = self.internal_state.gauss
- pos = self.internal_state.pos
- state = <ndarray>np.asarray(state, np.uint32)
- return ('MT19937', state, pos, has_gauss, gauss)
+ st = self._bit_generator.state
+ if st['bit_generator'] != 'MT19937' and legacy:
+ warnings.warn('get_state and legacy can only be used with the '
+ 'MT19937 BitGenerator. To silence this warning, '
+ 'set `legacy` to False.', RuntimeWarning)
+ legacy = False
+ st['has_gauss'] = self._aug_state.has_gauss
+ st['gauss'] = self._aug_state.gauss
+ if legacy:
+ return (st['bit_generator'], st['state']['key'], st['state']['pos'],
+ st['has_gauss'], st['gauss'])
+ return st
def set_state(self, state):
"""
@@ -740,12 +219,14 @@ cdef class RandomState:
Set the internal state of the generator from a tuple.
- For use if one has reason to manually (re-)set the internal state of the
- "Mersenne Twister"[1]_ pseudo-random number generating algorithm.
+ For use if one has reason to manually (re-)set the internal state of
+ the bit generator used by the RandomState instance. By default,
+ RandomState uses the "Mersenne Twister"[1]_ pseudo-random number
+ generating algorithm.
Parameters
----------
- state : tuple(str, ndarray of 624 uints, int, int, float)
+ state : {tuple(str, ndarray of 624 uints, int, int, float), dict}
The `state` tuple has the following items:
1. the string 'MT19937', specifying the Mersenne Twister algorithm.
@@ -754,6 +235,9 @@ cdef class RandomState:
4. an integer ``has_gauss``.
5. a float ``cached_gaussian``.
+ If state is a dictionary, it is directly set using the BitGenerators
+ `state` property.
+
Returns
-------
out : None
@@ -781,41 +265,27 @@ cdef class RandomState:
Vol. 8, No. 1, pp. 3-30, Jan. 1998.
"""
- cdef ndarray obj "arrayObject_obj"
- cdef int pos
- algorithm_name = state[0]
- if algorithm_name != 'MT19937':
- raise ValueError("algorithm must be 'MT19937'")
- key, pos = state[1:3]
- if len(state) == 3:
- has_gauss = 0
- cached_gaussian = 0.0
+ if isinstance(state, dict):
+ if 'bit_generator' not in state or 'state' not in state:
+ raise ValueError('state dictionary is not valid.')
+ st = state
else:
- has_gauss, cached_gaussian = state[3:5]
- try:
- obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_ULONG, 1, 1)
- except TypeError:
- # compatibility -- could be an older pickle
- obj = <ndarray>PyArray_ContiguousFromObject(key, NPY_LONG, 1, 1)
- if PyArray_DIM(obj, 0) != 624:
- raise ValueError("state must be 624 longs")
- with self.lock:
- memcpy(<void*>(self.internal_state.key), <void*>PyArray_DATA(obj), 624*sizeof(long))
- self.internal_state.pos = pos
- self.internal_state.has_gauss = has_gauss
- self.internal_state.gauss = cached_gaussian
-
- # Pickling support:
- def __getstate__(self):
- return self.get_state()
-
- def __setstate__(self, state):
- self.set_state(state)
+ if not isinstance(state, (tuple, list)):
+ raise TypeError('state must be a dict or a tuple.')
+ if state[0] != 'MT19937':
+ raise ValueError('set_state can only be used with legacy MT19937'
+ 'state instances.')
+ st = {'bit_generator': state[0],
+ 'state': {'key': state[1], 'pos': state[2]}}
+ if len(state) > 3:
+ st['has_gauss'] = state[3]
+ st['gauss'] = state[4]
+ value = st
+
+ self._aug_state.gauss = st.get('gauss', 0.0)
+ self._aug_state.has_gauss = st.get('has_gauss', 0)
+ self._bit_generator.state = st
- def __reduce__(self):
- return (np.random.__RandomState_ctor, (), self.get_state())
-
- # Basic distributions:
def random_sample(self, size=None):
"""
random_sample(size=None)
@@ -844,30 +314,168 @@ cdef class RandomState:
Examples
--------
>>> np.random.random_sample()
- 0.47108547995356098
+ 0.47108547995356098 # random
>>> type(np.random.random_sample())
- <type 'float'>
+ <class 'float'>
>>> np.random.random_sample((5,))
- array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428])
+ array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
Three-by-two array of random numbers from [-5, 0):
>>> 5 * np.random.random_sample((3, 2)) - 5
- array([[-3.99149989, -0.52338984],
+ array([[-3.99149989, -0.52338984], # random
[-2.99091858, -0.79479508],
[-1.23204345, -1.75224494]])
"""
- return cont0_array(self.internal_state, rk_double, size, self.lock)
+ cdef double temp
+ return double_fill(&random_double_fill, &self._bitgen, size, self.lock, None)
+
+ def random(self, size=None):
+ """
+ random(size=None)
+
+ Return random floats in the half-open interval [0.0, 1.0). Alias for
+ `random_sample` to ease forward-porting to the new random API.
+ """
+ return self.random_sample(size=size)
+
+ def beta(self, a, b, size=None):
+ """
+ beta(a, b, size=None)
+
+ Draw samples from a Beta distribution.
+
+ The Beta distribution is a special case of the Dirichlet distribution,
+ and is related to the Gamma distribution. It has the probability
+ distribution function
+
+ .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
+ (1 - x)^{\\beta - 1},
+
+ where the normalization, B, is the beta function,
+
+ .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
+ (1 - t)^{\\beta - 1} dt.
+
+ It is often seen in Bayesian inference and order statistics.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Alpha, positive (>0).
+ b : float or array_like of floats
+ Beta, positive (>0).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` and ``b`` are both scalars.
+ Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized beta distribution.
+
+ """
+ return cont(&legacy_beta, &self._aug_state, size, self.lock, 2,
+ a, 'a', CONS_POSITIVE,
+ b, 'b', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def exponential(self, scale=1.0, size=None):
+ """
+ exponential(scale=1.0, size=None)
+
+ Draw samples from an exponential distribution.
+
+ Its probability density function is
+
+ .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
+
+ for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
+ which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
+ The rate parameter is an alternative, widely used parameterization
+ of the exponential distribution [3]_.
+
+ The exponential distribution is a continuous analogue of the
+ geometric distribution. It describes many common situations, such as
+ the size of raindrops measured over many rainstorms [1]_, or the time
+ between page requests to Wikipedia [2]_.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats
+ The scale parameter, :math:`\\beta = 1/\\lambda`. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized exponential distribution.
+
+ References
+ ----------
+ .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
+ Random Signal Principles", 4th ed, 2001, p. 57.
+ .. [2] Wikipedia, "Poisson process",
+ https://en.wikipedia.org/wiki/Poisson_process
+ .. [3] Wikipedia, "Exponential distribution",
+ https://en.wikipedia.org/wiki/Exponential_distribution
+
+ """
+ return cont(&legacy_exponential, &self._aug_state, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_exponential(self, size=None):
+ """
+ standard_exponential(size=None)
+
+ Draw samples from the standard exponential distribution.
+
+ `standard_exponential` is identical to the exponential distribution
+ with a scale parameter of 1.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : float or ndarray
+ Drawn samples.
+
+ Examples
+ --------
+ Output a 3x8000 array:
+
+ >>> n = np.random.standard_exponential((3, 8000))
+
+ """
+ return cont(&legacy_standard_exponential, &self._aug_state, size, self.lock, 0,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None)
def tomaxint(self, size=None):
"""
tomaxint(size=None)
- Random integers between 0 and ``sys.maxint``, inclusive.
-
Return a sample of uniformly distributed random integers in the interval
- [0, ``sys.maxint``].
+ [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long
+ integer type and its precision is platform dependent.
Parameters
----------
@@ -889,23 +497,35 @@ cdef class RandomState:
Examples
--------
- >>> RS = np.random.mtrand.RandomState() # need a RandomState object
- >>> RS.tomaxint((2,2,2))
- array([[[1170048599, 1600360186],
+ >>> rs = np.random.RandomState() # need a RandomState object
+ >>> rs.tomaxint((2,2,2))
+ array([[[1170048599, 1600360186], # random
[ 739731006, 1947757578]],
[[1871712945, 752307660],
[1601631370, 1479324245]]])
- >>> import sys
- >>> sys.maxint
- 2147483647
- >>> RS.tomaxint((2,2,2)) < sys.maxint
+ >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int).max
array([[[ True, True],
[ True, True]],
[[ True, True],
[ True, True]]])
"""
- return disc0_array(self.internal_state, rk_long, size, self.lock)
+ cdef np.npy_intp n
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+
+ if size is None:
+ with self.lock:
+ return random_positive_int(&self._bitgen)
+
+ randoms = <np.ndarray>np.empty(size, dtype=np.int64)
+ randoms_data = <int64_t*>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ for i in range(n):
+ with self.lock, nogil:
+ randoms_data[i] = random_positive_int(&self._bitgen)
+ return randoms
def randint(self, low, high=None, size=None, dtype=int):
"""
@@ -919,13 +539,14 @@ cdef class RandomState:
Parameters
----------
- low : int
- Lowest (signed) integer to be drawn from the distribution (unless
+ low : int or array-like of ints
+ Lowest (signed) integers to be drawn from the distribution (unless
``high=None``, in which case this parameter is one above the
*highest* such integer).
- high : int, optional
+ high : int or array-like of ints, optional
If provided, one above the largest (signed) integer to be drawn
from the distribution (see above for behavior if ``high=None``).
+ If array-like, must contain integer values
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. Default is None, in which case a
@@ -948,59 +569,86 @@ cdef class RandomState:
--------
random.random_integers : similar to `randint`, only for the closed
interval [`low`, `high`], and 1 is the lowest value if `high` is
- omitted. In particular, this other one is the one to use to generate
- uniformly distributed discrete non-integers.
+ omitted.
Examples
--------
>>> np.random.randint(2, size=10)
- array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0])
+ array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
>>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
>>> np.random.randint(5, size=(2, 4))
- array([[4, 0, 2, 1],
+ array([[4, 0, 2, 1], # random
[3, 2, 2, 0]])
+ Generate a 1 x 3 array with 3 different upper bounds
+
+ >>> np.random.randint(1, [3, 5, 10])
+ array([2, 2, 9]) # random
+
+ Generate a 1 by 3 array with 3 different lower bounds
+
+ >>> np.random.randint([1, 5, 7], 10)
+ array([9, 8, 7]) # random
+
+ Generate a 2 by 4 array using broadcasting with dtype of uint8
+
+ >>> np.random.randint([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
+ array([[ 8, 6, 9, 7], # random
+ [ 1, 16, 9, 12]], dtype=uint8)
"""
+
if high is None:
high = low
low = 0
- raw_dtype = dtype
- dtype = np.dtype(dtype)
- try:
- lowbnd, highbnd, randfunc = _randint_type[dtype]
- except KeyError:
- raise TypeError('Unsupported dtype "%s" for randint' % dtype)
-
- # TODO: Do not cast these inputs to Python int
- #
- # This is a workaround until gh-8851 is resolved (bug in NumPy
- # integer comparison and subtraction involving uint64 and non-
- # uint64). Afterwards, remove these two lines.
- ilow = int(low)
- ihigh = int(high)
-
- if ilow < lowbnd:
- raise ValueError("low is out of bounds for %s" % dtype)
- if ihigh > highbnd:
- raise ValueError("high is out of bounds for %s" % dtype)
- if ilow >= ihigh and np.prod(size) != 0:
- raise ValueError("Range cannot be empty (low >= high) unless no samples are taken")
-
- with self.lock:
- ret = randfunc(ilow, ihigh - 1, size, self.state_address)
-
- # back-compat: keep python scalars when a python type is passed
- if size is None and raw_dtype in (bool, int, np.long):
- return raw_dtype(ret)
-
+ dt = np.dtype(dtype)
+ key = dt.name
+ if key not in _integers_types:
+ raise TypeError('Unsupported dtype "%s" for randint' % key)
+ if not dt.isnative:
+ # numpy 1.17.0, 2019-05-28
+ warnings.warn('Providing a dtype with a non-native byteorder is '
+ 'not supported. If you require platform-independent '
+ 'byteorder, call byteswap when required.\nIn future '
+ 'version, providing byteorder will raise a '
+ 'ValueError', DeprecationWarning)
+
+ # Implementation detail: the use a masked method to generate
+ # bounded uniform integers. Lemire's method is preferable since it is
+ # faster. randomgen allows a choice, we will always use the slower but
+ # backward compatible one.
+ cdef bint _masked = True
+ cdef bint _endpoint = False
+
+ if key == 'int32':
+ ret = _rand_int32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'int64':
+ ret = _rand_int64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'int16':
+ ret = _rand_int16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'int8':
+ ret = _rand_int8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'uint64':
+ ret = _rand_uint64(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'uint32':
+ ret = _rand_uint32(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'uint16':
+ ret = _rand_uint16(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'uint8':
+ ret = _rand_uint8(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+ elif key == 'bool':
+ ret = _rand_bool(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
+
+ if size is None and dtype in (np.bool, np.int, np.long):
+ if np.array(ret).shape == ():
+ return dtype(ret)
return ret
- def bytes(self, npy_intp length):
+ def bytes(self, np.npy_intp length):
"""
bytes(length)
@@ -1022,13 +670,13 @@ cdef class RandomState:
' eh\\x85\\x022SZ\\xbf\\xa4' #random
"""
- cdef void *bytes
- bytestring = empty_py_bytes(length, &bytes)
- with self.lock, nogil:
- rk_fill(bytes, length, self.internal_state)
- return bytestring
-
+ cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
+ # Interpret the uint32s as little-endian to convert them to bytes
+ # consistently.
+ return self.randint(0, 4294967296, size=n_uint32,
+ dtype=np.uint32).astype('<u4').tobytes()[:length]
+ @cython.wraparound(True)
def choice(self, a, size=None, replace=True, p=None):
"""
choice(a, size=None, replace=True, p=None)
@@ -1038,7 +686,7 @@ cdef class RandomState:
.. versionadded:: 1.7.0
Parameters
- -----------
+ ----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a were np.arange(a)
@@ -1054,12 +702,12 @@ cdef class RandomState:
entries in a.
Returns
- --------
+ -------
samples : single item or ndarray
The generated random samples
Raises
- -------
+ ------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
@@ -1068,42 +716,42 @@ cdef class RandomState:
size
See Also
- ---------
+ --------
randint, shuffle, permutation
Examples
- ---------
+ --------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3)
- array([0, 3, 4])
+ array([0, 3, 4]) # random
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
- array([3, 3, 0])
+ array([3, 3, 0]) # random
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False)
- array([3,1,0])
+ array([3,1,0]) # random
>>> #This is equivalent to np.random.permutation(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
- array([2, 3, 0])
+ array([2, 3, 0]) # random
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
- array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
- dtype='|S11')
+ array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random
+ dtype='<U11')
"""
@@ -1114,11 +762,11 @@ cdef class RandomState:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
- raise ValueError("'a' must be 1-dimensional or an integer")
+ raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0 and np.prod(size) != 0:
- raise ValueError("'a' must be greater than 0 unless no samples are taken")
+ raise ValueError("a must be greater than 0 unless no samples are taken")
elif a.ndim != 1:
- raise ValueError("'a' must be 1-dimensional")
+ raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0 and np.prod(size) != 0:
@@ -1132,16 +780,20 @@ cdef class RandomState:
if np.issubdtype(p.dtype, np.floating):
atol = max(atol, np.sqrt(np.finfo(p.dtype).eps))
- p = <ndarray>PyArray_ContiguousFromObject(p, NPY_DOUBLE, 1, 1)
- pix = <double*>PyArray_DATA(p)
+ p = <np.ndarray>np.PyArray_FROM_OTF(
+ p, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pix = <double*>np.PyArray_DATA(p)
if p.ndim != 1:
raise ValueError("'p' must be 1-dimensional")
if p.size != pop_size:
raise ValueError("'a' and 'p' must have same size")
+ p_sum = kahan_sum(pix, d)
+ if np.isnan(p_sum):
+ raise ValueError("probabilities contain NaN")
if np.logical_or.reduce(p < 0):
raise ValueError("probabilities are not non-negative")
- if abs(kahan_sum(pix, d) - 1.) > atol:
+ if abs(p_sum - 1.) > atol:
raise ValueError("probabilities do not sum to 1")
shape = size
@@ -1157,20 +809,24 @@ cdef class RandomState:
cdf /= cdf[-1]
uniform_samples = self.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
- idx = np.array(idx, copy=False) # searchsorted returns a scalar
+ # searchsorted returns a scalar
+ # force cast to int for LLP64
+ idx = np.array(idx, copy=False).astype(int, casting='unsafe')
else:
idx = self.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
+ elif size < 0:
+ raise ValueError("negative dimensions are not allowed")
if p is not None:
if np.count_nonzero(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
- found = np.zeros(shape, dtype=np.int)
+ found = np.zeros(shape, dtype=int)
flat_found = found.ravel()
while n_uniq < size:
x = self.rand(size - n_uniq)
@@ -1194,7 +850,7 @@ cdef class RandomState:
# In most cases a scalar will have been made an array
idx = idx.item(0)
- #Use samples as indices for a if a is array-like
+ # Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
@@ -1210,7 +866,6 @@ cdef class RandomState:
return a[idx]
-
def uniform(self, low=0.0, high=1.0, size=None):
"""
uniform(low=0.0, high=1.0, size=None)
@@ -1288,34 +943,39 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray olow, ohigh, odiff
- cdef double flow, fhigh, fscale
+ cdef bint is_scalar = True
+ cdef np.ndarray alow, ahigh, arange
+ cdef double _low, _high, range
cdef object temp
- olow = <ndarray>PyArray_FROM_OTF(low, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- ohigh = <ndarray>PyArray_FROM_OTF(high, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if olow.shape == ohigh.shape == ():
- flow = PyFloat_AsDouble(low)
- fhigh = PyFloat_AsDouble(high)
- fscale = fhigh - flow
+ alow = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ ahigh = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED)
- if not npy_isfinite(fscale):
+ if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0:
+ _low = PyFloat_AsDouble(low)
+ _high = PyFloat_AsDouble(high)
+ range = _high - _low
+ if not np.isfinite(range):
raise OverflowError('Range exceeds valid bounds')
- return cont2_array_sc(self.internal_state, rk_uniform, size, flow,
- fscale, self.lock)
-
- temp = np.subtract(ohigh, olow)
- Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
- # rules because EnsureArray steals a reference
- odiff = <ndarray>PyArray_EnsureArray(temp)
-
- if not np.all(np.isfinite(odiff)):
+ return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
+ _low, '', CONS_NONE,
+ range, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ temp = np.subtract(ahigh, alow)
+ Py_INCREF(temp)
+ # needed to get around Pyrex's automatic reference-counting
+ # rules because EnsureArray steals a reference
+ arange = <np.ndarray>np.PyArray_EnsureArray(temp)
+ if not np.all(np.isfinite(arange)):
raise OverflowError('Range exceeds valid bounds')
-
- return cont2_array(self.internal_state, rk_uniform, size, olow, odiff,
- self.lock)
+ return cont(&random_uniform, &self._bitgen, size, self.lock, 2,
+ alow, '', CONS_NONE,
+ arange, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
def rand(self, *args):
"""
@@ -1323,6 +983,12 @@ cdef class RandomState:
Random values in a given shape.
+ .. note::
+ This is a convenience function for users porting code from Matlab,
+ and wraps `numpy.random.random_sample`. That function takes a
+ tuple to specify the size of the output, which is consistent with
+ other NumPy functions like `numpy.zeros` and `numpy.ones`.
+
Create an array of the given shape and populate it with
random samples from a uniform distribution
over ``[0, 1)``.
@@ -1330,7 +996,7 @@ cdef class RandomState:
Parameters
----------
d0, d1, ..., dn : int, optional
- The dimensions of the returned array, should all be positive.
+ The dimensions of the returned array, must be non-negative.
If no argument is given a single Python float is returned.
Returns
@@ -1342,12 +1008,6 @@ cdef class RandomState:
--------
random
- Notes
- -----
- This is a convenience function. If you want an interface that
- takes a shape-tuple as the first argument, refer to
- np.random.random_sample .
-
Examples
--------
>>> np.random.rand(3,2)
@@ -1367,21 +1027,22 @@ cdef class RandomState:
Return a sample (or samples) from the "standard normal" distribution.
- If positive, int_like or int-convertible arguments are provided,
- `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled
- with random floats sampled from a univariate "normal" (Gaussian)
- distribution of mean 0 and variance 1 (if any of the :math:`d_i` are
- floats, they are first converted to integers by truncation). A single
- float randomly sampled from the distribution is returned if no
- argument is provided.
+ .. note::
+ This is a convenience function for users porting code from Matlab,
+ and wraps `numpy.random.standard_normal`. That function takes a
+ tuple to specify the size of the output, which is consistent with
+ other NumPy functions like `numpy.zeros` and `numpy.ones`.
- This is a convenience function. If you want an interface that takes a
- tuple as the first argument, use `numpy.random.standard_normal` instead.
+ If positive int_like arguments are provided, `randn` generates an array
+ of shape ``(d0, d1, ..., dn)``, filled
+ with random floats sampled from a univariate "normal" (Gaussian)
+ distribution of mean 0 and variance 1. A single float randomly sampled
+ from the distribution is returned if no argument is provided.
Parameters
----------
d0, d1, ..., dn : int, optional
- The dimensions of the returned array, should be all positive.
+ The dimensions of the returned array, must be non-negative.
If no argument is given a single Python float is returned.
Returns
@@ -1394,6 +1055,7 @@ cdef class RandomState:
See Also
--------
standard_normal : Similar, but takes a tuple as its argument.
+ normal : Also accepts mu and sigma arguments.
Notes
-----
@@ -1404,19 +1066,19 @@ cdef class RandomState:
Examples
--------
>>> np.random.randn()
- 2.1923875335537315 #random
+ 2.1923875335537315 # random
Two-by-four array of samples from N(3, 6.25):
- >>> 2.5 * np.random.randn(2, 4) + 3
- array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random
- [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
+ >>> 3 + 2.5 * np.random.randn(2, 4)
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
if len(args) == 0:
return self.standard_normal()
else:
- return self.standard_normal(args)
+ return self.standard_normal(size=args)
def random_integers(self, low, high=None, size=None):
"""
@@ -1427,8 +1089,8 @@ cdef class RandomState:
Return random integers of type np.int from the "discrete uniform"
distribution in the closed interval [`low`, `high`]. If `high` is
None (the default), then results are from [1, `low`]. The np.int
- type translates to the C long type used by Python 2 for "short"
- integers and its precision is platform dependent.
+ type translates to the C long integer type and its precision
+ is platform dependent.
This function has been deprecated. Use randint instead.
@@ -1470,11 +1132,11 @@ cdef class RandomState:
Examples
--------
>>> np.random.random_integers(5)
- 4
+ 4 # random
>>> type(np.random.random_integers(5))
- <type 'int'>
+ <class 'numpy.int64'>
>>> np.random.random_integers(5, size=(3,2))
- array([[5, 4],
+ array([[5, 4], # random
[3, 3],
[4, 5]])
@@ -1483,7 +1145,7 @@ cdef class RandomState:
:math:`{0, 5/8, 10/8, 15/8, 20/8}`):
>>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.
- array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ])
+ array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ]) # random
Roll two six sided dice 1000 times and sum the results:
@@ -1507,12 +1169,11 @@ cdef class RandomState:
else:
warnings.warn(("This function is deprecated. Please call "
- "randint({low}, {high} + 1) instead".format(
- low=low, high=high)), DeprecationWarning)
-
- return self.randint(low, high + 1, size=size, dtype='l')
-
+ "randint({low}, {high} + 1) "
+ "instead".format(low=low, high=high)),
+ DeprecationWarning)
+ return self.randint(low, int(high) + 1, size=size, dtype='l')
# Complicated, continuous distributions:
def standard_normal(self, size=None):
@@ -1531,22 +1192,49 @@ cdef class RandomState:
Returns
-------
out : float or ndarray
- Drawn samples.
+ A floating-point array of shape ``size`` of drawn samples, or a
+ single sample if ``size`` was not specified.
+
+ Notes
+ -----
+ For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
+
+ mu + sigma * np.random.standard_normal(size=...)
+ np.random.normal(mu, sigma, size=...)
+
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
Examples
--------
+ >>> np.random.standard_normal()
+ 2.1923875335537315 #random
+
>>> s = np.random.standard_normal(8000)
>>> s
- array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random
- -0.38672696, -0.4685006 ]) #random
+ array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random
+ -0.38672696, -0.4685006 ]) # random
>>> s.shape
(8000,)
>>> s = np.random.standard_normal(size=(3, 4, 2))
>>> s.shape
(3, 4, 2)
+ Two-by-four array of samples from :math:`N(3, 6.25)`:
+
+ >>> 3 + 2.5 * np.random.standard_normal(size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
"""
- return cont0_array(self.internal_state, rk_gauss, size, self.lock)
+ return cont(&legacy_gauss, &self._aug_state, size, self.lock, 0,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None)
def normal(self, loc=0.0, scale=1.0, size=None):
"""
@@ -1569,7 +1257,8 @@ cdef class RandomState:
loc : float or array_like of floats
Mean ("centre") of the distribution.
scale : float or array_like of floats
- Standard deviation (spread or "width") of the distribution.
+ Standard deviation (spread or "width") of the distribution. Must be
+ non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1620,11 +1309,11 @@ cdef class RandomState:
Verify the mean and the variance:
- >>> abs(mu - np.mean(s)) < 0.01
- True
+ >>> abs(mu - np.mean(s))
+ 0.0 # may vary
- >>> abs(sigma - np.std(s, ddof=1)) < 0.01
- True
+ >>> abs(sigma - np.std(s, ddof=1))
+ 0.1 # may vary
Display the histogram of the samples, along with
the probability density function:
@@ -1636,180 +1325,18 @@ cdef class RandomState:
... linewidth=2, color='r')
>>> plt.show()
- """
- cdef ndarray oloc, oscale
- cdef double floc, fscale
-
- oloc = <ndarray>PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oloc.shape == oscale.shape == ():
- floc = PyFloat_AsDouble(loc)
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont2_array_sc(self.internal_state, rk_normal, size, floc,
- fscale, self.lock)
-
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0")
- return cont2_array(self.internal_state, rk_normal, size, oloc, oscale,
- self.lock)
-
- def beta(self, a, b, size=None):
- """
- beta(a, b, size=None)
-
- Draw samples from a Beta distribution.
-
- The Beta distribution is a special case of the Dirichlet distribution,
- and is related to the Gamma distribution. It has the probability
- distribution function
-
- .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
- (1 - x)^{\\beta - 1},
-
- where the normalisation, B, is the beta function,
-
- .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
- (1 - t)^{\\beta - 1} dt.
-
- It is often seen in Bayesian inference and order statistics.
-
- Parameters
- ----------
- a : float or array_like of floats
- Alpha, non-negative.
- b : float or array_like of floats
- Beta, non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` and ``b`` are both scalars.
- Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized beta distribution.
-
- """
- cdef ndarray oa, ob
- cdef double fa, fb
-
- oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- ob = <ndarray>PyArray_FROM_OTF(b, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oa.shape == ob.shape == ():
- fa = PyFloat_AsDouble(a)
- fb = PyFloat_AsDouble(b)
-
- if fa <= 0:
- raise ValueError("a <= 0")
- if fb <= 0:
- raise ValueError("b <= 0")
- return cont2_array_sc(self.internal_state, rk_beta, size, fa, fb,
- self.lock)
-
- if np.any(np.less_equal(oa, 0)):
- raise ValueError("a <= 0")
- if np.any(np.less_equal(ob, 0)):
- raise ValueError("b <= 0")
- return cont2_array(self.internal_state, rk_beta, size, oa, ob,
- self.lock)
-
- def exponential(self, scale=1.0, size=None):
- """
- exponential(scale=1.0, size=None)
-
- Draw samples from an exponential distribution.
-
- Its probability density function is
-
- .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
-
- for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
- which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
- The rate parameter is an alternative, widely used parameterization
- of the exponential distribution [3]_.
-
- The exponential distribution is a continuous analogue of the
- geometric distribution. It describes many common situations, such as
- the size of raindrops measured over many rainstorms [1]_, or the time
- between page requests to Wikipedia [2]_.
-
- Parameters
- ----------
- scale : float or array_like of floats
- The scale parameter, :math:`\\beta = 1/\\lambda`.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``scale`` is a scalar. Otherwise,
- ``np.array(scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized exponential distribution.
-
- References
- ----------
- .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
- Random Signal Principles", 4th ed, 2001, p. 57.
- .. [2] Wikipedia, "Poisson process",
- https://en.wikipedia.org/wiki/Poisson_process
- .. [3] Wikipedia, "Exponential distribution",
- https://en.wikipedia.org/wiki/Exponential_distribution
-
- """
- cdef ndarray oscale
- cdef double fscale
-
- oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oscale.shape == ():
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont1_array_sc(self.internal_state, rk_exponential, size,
- fscale, self.lock)
-
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0")
- return cont1_array(self.internal_state, rk_exponential, size, oscale,
- self.lock)
-
- def standard_exponential(self, size=None):
- """
- standard_exponential(size=None)
-
- Draw samples from the standard exponential distribution.
-
- `standard_exponential` is identical to the exponential distribution
- with a scale parameter of 1.
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- out : float or ndarray
- Drawn samples.
-
- Examples
- --------
- Output a 3x8000 array:
+ Two-by-four array of samples from N(3, 6.25):
- >>> n = np.random.standard_exponential((3, 8000))
+ >>> np.random.normal(3, 2.5, size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
"""
- return cont0_array(self.internal_state, rk_standard_exponential, size,
- self.lock)
+ return cont(&legacy_normal, &self._aug_state, size, self.lock, 2,
+ loc, '', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ None)
def standard_gamma(self, shape, size=None):
"""
@@ -1823,7 +1350,7 @@ cdef class RandomState:
Parameters
----------
shape : float or array_like of floats
- Parameter, should be > 0.
+ Parameter, must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1872,30 +1399,19 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> import scipy.special as sps
+ >>> import scipy.special as sps # doctest: +SKIP
>>> count, bins, ignored = plt.hist(s, 50, density=True)
- >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\
+ >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ # doctest: +SKIP
... (sps.gamma(shape) * scale**shape))
- >>> plt.plot(bins, y, linewidth=2, color='r')
+ >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
>>> plt.show()
"""
- cdef ndarray oshape
- cdef double fshape
-
- oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oshape.shape == ():
- fshape = PyFloat_AsDouble(shape)
- if np.signbit(fshape):
- raise ValueError("shape < 0")
- return cont1_array_sc(self.internal_state, rk_standard_gamma,
- size, fshape, self.lock)
-
- if np.any(np.signbit(oshape)):
- raise ValueError("shape < 0")
- return cont1_array(self.internal_state, rk_standard_gamma, size,
- oshape, self.lock)
+ return cont(&legacy_standard_gamma, &self._aug_state, size, self.lock, 1,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
def gamma(self, shape, scale=1.0, size=None):
"""
@@ -1910,9 +1426,9 @@ cdef class RandomState:
Parameters
----------
shape : float or array_like of floats
- The shape of the gamma distribution. Should be greater than zero.
+ The shape of the gamma distribution. Must be non-negative.
scale : float or array_like of floats, optional
- The scale of the gamma distribution. Should be greater than zero.
+ The scale of the gamma distribution. Must be non-negative.
Default is equal to 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -1962,36 +1478,18 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> import scipy.special as sps
+ >>> import scipy.special as sps # doctest: +SKIP
>>> count, bins, ignored = plt.hist(s, 50, density=True)
- >>> y = bins**(shape-1)*(np.exp(-bins/scale) /
+ >>> y = bins**(shape-1)*(np.exp(-bins/scale) / # doctest: +SKIP
... (sps.gamma(shape)*scale**shape))
- >>> plt.plot(bins, y, linewidth=2, color='r')
+ >>> plt.plot(bins, y, linewidth=2, color='r') # doctest: +SKIP
>>> plt.show()
"""
- cdef ndarray oshape, oscale
- cdef double fshape, fscale
-
- oshape = <ndarray>PyArray_FROM_OTF(shape, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oshape.shape == oscale.shape == ():
- fshape = PyFloat_AsDouble(shape)
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fshape):
- raise ValueError("shape < 0")
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont2_array_sc(self.internal_state, rk_gamma, size, fshape,
- fscale, self.lock)
-
- if np.any(np.signbit(oshape)):
- raise ValueError("shape < 0")
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0")
- return cont2_array(self.internal_state, rk_gamma, size, oshape, oscale,
- self.lock)
+ return cont(&legacy_gamma, &self._aug_state, size, self.lock, 2,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def f(self, dfnum, dfden, size=None):
"""
@@ -2001,7 +1499,7 @@ cdef class RandomState:
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
- freedom in denominator), where both parameters should be greater than
+ freedom in denominator), where both parameters must be greater than
zero.
The random variate of the F distribution (also known as the
@@ -2012,9 +1510,9 @@ cdef class RandomState:
Parameters
----------
dfnum : float or array_like of floats
- Degrees of freedom in numerator, should be > 0.
+ Degrees of freedom in numerator, must be > 0.
dfden : float or array_like of float
- Degrees of freedom in denominator, should be > 0.
+ Degrees of freedom in denominator, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2068,37 +1566,18 @@ cdef class RandomState:
The lower bound for the top 1% of the samples is :
- >>> sort(s)[-10]
- 7.61988120985
+ >>> np.sort(s)[-10]
+ 7.61988120985 # random
So there is about a 1% chance that the F statistic will exceed 7.62,
the measured value is 36, so the null hypothesis is rejected at the 1%
level.
"""
- cdef ndarray odfnum, odfden
- cdef double fdfnum, fdfden
-
- odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if odfnum.shape == odfden.shape == ():
- fdfnum = PyFloat_AsDouble(dfnum)
- fdfden = PyFloat_AsDouble(dfden)
-
- if fdfnum <= 0:
- raise ValueError("dfnum <= 0")
- if fdfden <= 0:
- raise ValueError("dfden <= 0")
- return cont2_array_sc(self.internal_state, rk_f, size, fdfnum,
- fdfden, self.lock)
-
- if np.any(np.less_equal(odfnum, 0.0)):
- raise ValueError("dfnum <= 0")
- if np.any(np.less_equal(odfden, 0.0)):
- raise ValueError("dfden <= 0")
- return cont2_array(self.internal_state, rk_f, size, odfnum, odfden,
- self.lock)
+ return cont(&legacy_f, &self._aug_state, size, self.lock, 2,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
def noncentral_f(self, dfnum, dfden, nonc, size=None):
"""
@@ -2114,15 +1593,15 @@ cdef class RandomState:
Parameters
----------
dfnum : float or array_like of floats
- Numerator degrees of freedom, should be > 0.
+ Numerator degrees of freedom, must be > 0.
.. versionchanged:: 1.14.0
Earlier NumPy versions required dfnum > 1.
dfden : float or array_like of floats
- Denominator degrees of freedom, should be > 0.
+ Denominator degrees of freedom, must be > 0.
nonc : float or array_like of floats
Non-centrality parameter, the sum of the squares of the numerator
- means, should be >= 0.
+ means, must be >= 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2166,40 +1645,16 @@ cdef class RandomState:
>>> NF = np.histogram(nc_vals, bins=50, density=True)
>>> c_vals = np.random.f(dfnum, dfden, 1000000)
>>> F = np.histogram(c_vals, bins=50, density=True)
+ >>> import matplotlib.pyplot as plt
>>> plt.plot(F[1][1:], F[0])
>>> plt.plot(NF[1][1:], NF[0])
>>> plt.show()
"""
- cdef ndarray odfnum, odfden, ononc
- cdef double fdfnum, fdfden, fnonc
-
- odfnum = <ndarray>PyArray_FROM_OTF(dfnum, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- odfden = <ndarray>PyArray_FROM_OTF(dfden, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if odfnum.shape == odfden.shape == ononc.shape == ():
- fdfnum = PyFloat_AsDouble(dfnum)
- fdfden = PyFloat_AsDouble(dfden)
- fnonc = PyFloat_AsDouble(nonc)
-
- if fdfnum <= 0:
- raise ValueError("dfnum <= 0")
- if fdfden <= 0:
- raise ValueError("dfden <= 0")
- if fnonc < 0:
- raise ValueError("nonc < 0")
- return cont3_array_sc(self.internal_state, rk_noncentral_f, size,
- fdfnum, fdfden, fnonc, self.lock)
-
- if np.any(np.less_equal(odfnum, 0.0)):
- raise ValueError("dfnum <= 0")
- if np.any(np.less_equal(odfden, 0.0)):
- raise ValueError("dfden <= 0")
- if np.any(np.less(ononc, 0.0)):
- raise ValueError("nonc < 0")
- return cont3_array(self.internal_state, rk_noncentral_f, size, odfnum,
- odfden, ononc, self.lock)
+ return cont(&legacy_noncentral_f, &self._aug_state, size, self.lock, 3,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE, None)
def chisquare(self, df, size=None):
"""
@@ -2215,7 +1670,7 @@ cdef class RandomState:
Parameters
----------
df : float or array_like of floats
- Number of degrees of freedom, should be > 0.
+ Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2261,26 +1716,13 @@ cdef class RandomState:
Examples
--------
>>> np.random.chisquare(2,4)
- array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272])
+ array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
"""
- cdef ndarray odf
- cdef double fdf
-
- odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if odf.shape == ():
- fdf = PyFloat_AsDouble(df)
-
- if fdf <= 0:
- raise ValueError("df <= 0")
- return cont1_array_sc(self.internal_state, rk_chisquare, size, fdf,
- self.lock)
-
- if np.any(np.less_equal(odf, 0.0)):
- raise ValueError("df <= 0")
- return cont1_array(self.internal_state, rk_chisquare, size, odf,
- self.lock)
+ return cont(&legacy_chisquare, &self._aug_state, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
def noncentral_chisquare(self, df, nonc, size=None):
"""
@@ -2288,18 +1730,18 @@ cdef class RandomState:
Draw samples from a noncentral chi-square distribution.
- The noncentral :math:`\\chi^2` distribution is a generalisation of
+ The noncentral :math:`\\chi^2` distribution is a generalization of
the :math:`\\chi^2` distribution.
Parameters
----------
df : float or array_like of floats
- Degrees of freedom, should be > 0.
+ Degrees of freedom, must be > 0.
.. versionchanged:: 1.10.0
Earlier NumPy versions required dfnum > 1.
nonc : float or array_like of floats
- Non-centrality, should be non-negative.
+ Non-centrality, must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2318,21 +1760,13 @@ cdef class RandomState:
.. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
\\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
- \\P_{Y_{df+2i}}(x),
+ P_{Y_{df+2i}}(x),
where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
- In Delhi (2007), it is noted that the noncentral chi-square is
- useful in bombing and coverage problems, the probability of
- killing the point target given by the noncentral chi-squared
- distribution.
-
References
----------
- .. [1] Delhi, M.S. Holla, "On a noncentral chi-square distribution in
- the analysis of weapon systems effectiveness", Metrika,
- Volume 15, Number 1 / December, 1970.
- .. [2] Wikipedia, "Noncentral chi-squared distribution"
+ .. [1] Wikipedia, "Noncentral chi-squared distribution"
https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
Examples
@@ -2364,29 +1798,10 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray odf, ononc
- cdef double fdf, fnonc
-
- odf = <ndarray>PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- ononc = <ndarray>PyArray_FROM_OTF(nonc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if odf.shape == ononc.shape == ():
- fdf = PyFloat_AsDouble(df)
- fnonc = PyFloat_AsDouble(nonc)
-
- if fdf <= 0:
- raise ValueError("df <= 0")
- if fnonc < 0:
- raise ValueError("nonc < 0")
- return cont2_array_sc(self.internal_state, rk_noncentral_chisquare,
- size, fdf, fnonc, self.lock)
-
- if np.any(np.less_equal(odf, 0.0)):
- raise ValueError("df <= 0")
- if np.any(np.less(ononc, 0.0)):
- raise ValueError("nonc < 0")
- return cont2_array(self.internal_state, rk_noncentral_chisquare, size,
- odf, ononc, self.lock)
+ return cont(&legacy_noncentral_chisquare, &self._aug_state, size, self.lock, 2,
+ df, 'df', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def standard_cauchy(self, size=None):
"""
@@ -2443,14 +1858,15 @@ cdef class RandomState:
--------
Draw samples and plot the distribution:
+ >>> import matplotlib.pyplot as plt
>>> s = np.random.standard_cauchy(1000000)
>>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
>>> plt.hist(s, bins=100)
>>> plt.show()
"""
- return cont0_array(self.internal_state, rk_standard_cauchy, size,
- self.lock)
+ return cont(&legacy_standard_cauchy, &self._aug_state, size, self.lock, 0,
+ 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
def standard_t(self, df, size=None):
"""
@@ -2466,7 +1882,7 @@ cdef class RandomState:
Parameters
----------
df : float or array_like of floats
- Degrees of freedom, should be > 0.
+ Degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2540,23 +1956,11 @@ cdef class RandomState:
probability of about 99% of being true.
"""
- cdef ndarray odf
- cdef double fdf
-
- odf = <ndarray> PyArray_FROM_OTF(df, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if odf.shape == ():
- fdf = PyFloat_AsDouble(df)
-
- if fdf <= 0:
- raise ValueError("df <= 0")
- return cont1_array_sc(self.internal_state, rk_standard_t, size,
- fdf, self.lock)
-
- if np.any(np.less_equal(odf, 0.0)):
- raise ValueError("df <= 0")
- return cont1_array(self.internal_state, rk_standard_t, size, odf,
- self.lock)
+ return cont(&legacy_standard_t, &self._aug_state, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0, '', CONS_NONE,
+ 0, '', CONS_NONE,
+ None)
def vonmises(self, mu, kappa, size=None):
"""
@@ -2628,33 +2032,18 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> from scipy.special import i0
+ >>> from scipy.special import i0 # doctest: +SKIP
>>> plt.hist(s, 50, density=True)
>>> x = np.linspace(-np.pi, np.pi, num=51)
- >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa))
- >>> plt.plot(x, y, linewidth=2, color='r')
+ >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa)) # doctest: +SKIP
+ >>> plt.plot(x, y, linewidth=2, color='r') # doctest: +SKIP
>>> plt.show()
"""
- cdef ndarray omu, okappa
- cdef double fmu, fkappa
-
- omu = <ndarray> PyArray_FROM_OTF(mu, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- okappa = <ndarray> PyArray_FROM_OTF(kappa, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if omu.shape == okappa.shape == ():
- fmu = PyFloat_AsDouble(mu)
- fkappa = PyFloat_AsDouble(kappa)
-
- if fkappa < 0:
- raise ValueError("kappa < 0")
- return cont2_array_sc(self.internal_state, rk_vonmises, size, fmu,
- fkappa, self.lock)
-
- if np.any(np.less(okappa, 0.0)):
- raise ValueError("kappa < 0")
- return cont2_array(self.internal_state, rk_vonmises, size, omu, okappa,
- self.lock)
+ return cont(&random_vonmises, &self._bitgen, size, self.lock, 2,
+ mu, 'mu', CONS_NONE,
+ kappa, 'kappa', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def pareto(self, a, size=None):
"""
@@ -2683,7 +2072,7 @@ cdef class RandomState:
Parameters
----------
a : float or array_like of floats
- Shape of the distribution. Should be greater than zero.
+ Shape of the distribution. Must be positive.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2721,7 +2110,6 @@ cdef class RandomState:
projects in Sourceforge [1]_. It is one of the so-called
"fat-tailed" distributions.
-
References
----------
.. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
@@ -2749,22 +2137,10 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray oa
- cdef double fa
-
- oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oa.shape == ():
- fa = PyFloat_AsDouble(a)
-
- if fa <= 0:
- raise ValueError("a <= 0")
- return cont1_array_sc(self.internal_state, rk_pareto, size, fa,
- self.lock)
-
- if np.any(np.less_equal(oa, 0.0)):
- raise ValueError("a <= 0")
- return cont1_array(self.internal_state, rk_pareto, size, oa, self.lock)
+ return cont(&legacy_pareto, &self._aug_state, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
def weibull(self, a, size=None):
"""
@@ -2785,7 +2161,7 @@ cdef class RandomState:
Parameters
----------
a : float or array_like of floats
- Shape of the distribution. Should be greater than zero.
+ Shape parameter of the distribution. Must be nonnegative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2859,22 +2235,10 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray oa
- cdef double fa
-
- oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oa.shape == ():
- fa = PyFloat_AsDouble(a)
- if np.signbit(fa):
- raise ValueError("a < 0")
- return cont1_array_sc(self.internal_state, rk_weibull, size, fa,
- self.lock)
-
- if np.any(np.signbit(oa)):
- raise ValueError("a < 0")
- return cont1_array(self.internal_state, rk_weibull, size, oa,
- self.lock)
+ return cont(&legacy_weibull, &self._aug_state, size, self.lock, 1,
+ a, 'a', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
def power(self, a, size=None):
"""
@@ -2888,7 +2252,7 @@ cdef class RandomState:
Parameters
----------
a : float or array_like of floats
- Parameter of the distribution. Should be greater than zero.
+ Parameter of the distribution. Must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2949,43 +2313,32 @@ cdef class RandomState:
Compare the power function distribution to the inverse of the Pareto.
- >>> from scipy import stats
+ >>> from scipy import stats # doctest: +SKIP
>>> rvs = np.random.power(5, 1000000)
>>> rvsp = np.random.pareto(5, 1000000)
>>> xx = np.linspace(0,1,100)
- >>> powpdf = stats.powerlaw.pdf(xx,5)
+ >>> powpdf = stats.powerlaw.pdf(xx,5) # doctest: +SKIP
>>> plt.figure()
>>> plt.hist(rvs, bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-')
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
>>> plt.title('np.random.power(5)')
>>> plt.figure()
>>> plt.hist(1./(1.+rvsp), bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-')
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
>>> plt.title('inverse of 1 + np.random.pareto(5)')
>>> plt.figure()
>>> plt.hist(1./(1.+rvsp), bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-')
+ >>> plt.plot(xx,powpdf,'r-') # doctest: +SKIP
>>> plt.title('inverse of stats.pareto(5)')
"""
- cdef ndarray oa
- cdef double fa
-
- oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oa.shape == ():
- fa = PyFloat_AsDouble(a)
- if np.signbit(fa):
- raise ValueError("a < 0")
- return cont1_array_sc(self.internal_state, rk_power, size, fa,
- self.lock)
-
- if np.any(np.signbit(oa)):
- raise ValueError("a < 0")
- return cont1_array(self.internal_state, rk_power, size, oa, self.lock)
+ return cont(&legacy_power, &self._aug_state, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
def laplace(self, loc=0.0, scale=1.0, size=None):
"""
@@ -3004,7 +2357,8 @@ cdef class RandomState:
loc : float or array_like of floats, optional
The position, :math:`\\mu`, of the distribution peak. Default is 0.
scale : float or array_like of floats, optional
- :math:`\\lambda`, the exponential decay. Default is 1.
+ :math:`\\lambda`, the exponential decay. Default is 1. Must be non-
+ negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3066,24 +2420,10 @@ cdef class RandomState:
>>> plt.plot(x,g)
"""
- cdef ndarray oloc, oscale
- cdef double floc, fscale
-
- oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oloc.shape == oscale.shape == ():
- floc = PyFloat_AsDouble(loc)
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont2_array_sc(self.internal_state, rk_laplace, size, floc,
- fscale, self.lock)
-
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0")
- return cont2_array(self.internal_state, rk_laplace, size, oloc, oscale,
- self.lock)
+ return cont(&random_laplace, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def gumbel(self, loc=0.0, scale=1.0, size=None):
"""
@@ -3100,7 +2440,8 @@ cdef class RandomState:
loc : float or array_like of floats, optional
The location of the mode of the distribution. Default is 0.
scale : float or array_like of floats, optional
- The scale parameter of the distribution. Default is 1.
+ The scale parameter of the distribution. Default is 1. Must be non-
+ negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3197,24 +2538,10 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray oloc, oscale
- cdef double floc, fscale
-
- oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oloc.shape == oscale.shape == ():
- floc = PyFloat_AsDouble(loc)
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont2_array_sc(self.internal_state, rk_gumbel, size, floc,
- fscale, self.lock)
-
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0")
- return cont2_array(self.internal_state, rk_gumbel, size, oloc, oscale,
- self.lock)
+ return cont(&random_gumbel, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def logistic(self, loc=0.0, scale=1.0, size=None):
"""
@@ -3230,7 +2557,7 @@ cdef class RandomState:
loc : float or array_like of floats, optional
Parameter of the distribution. Default is 0.
scale : float or array_like of floats, optional
- Parameter of the distribution. Should be greater than zero.
+ Parameter of the distribution. Must be non-negative.
Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -3279,35 +2606,22 @@ cdef class RandomState:
>>> loc, scale = 10, 1
>>> s = np.random.logistic(loc, scale, 10000)
+ >>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, bins=50)
# plot against distribution
>>> def logist(x, loc, scale):
- ... return exp((loc-x)/scale)/(scale*(1+exp((loc-x)/scale))**2)
- >>> plt.plot(bins, logist(bins, loc, scale)*count.max()/\\
- ... logist(bins, loc, scale).max())
+ ... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
+ >>> lgst_val = logist(bins, loc, scale)
+ >>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
>>> plt.show()
"""
- cdef ndarray oloc, oscale
- cdef double floc, fscale
-
- oloc = PyArray_FROM_OTF(loc, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oloc.shape == oscale.shape == ():
- floc = PyFloat_AsDouble(loc)
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont2_array_sc(self.internal_state, rk_logistic, size, floc,
- fscale, self.lock)
-
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0")
- return cont2_array(self.internal_state, rk_logistic, size, oloc,
- oscale, self.lock)
+ return cont(&random_logistic, &self._bitgen, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def lognormal(self, mean=0.0, sigma=1.0, size=None):
"""
@@ -3325,8 +2639,8 @@ cdef class RandomState:
mean : float or array_like of floats, optional
Mean value of the underlying normal distribution. Default is 0.
sigma : float or array_like of floats, optional
- Standard deviation of the underlying normal distribution. Should
- be greater than zero. Default is 1.
+ Standard deviation of the underlying normal distribution. Must be
+ non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3398,7 +2712,7 @@ cdef class RandomState:
>>> # values, drawn from a normal distribution.
>>> b = []
>>> for i in range(1000):
- ... a = 10. + np.random.random(100)
+ ... a = 10. + np.random.standard_normal(100)
... b.append(np.product(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
@@ -3414,24 +2728,10 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray omean, osigma
- cdef double fmean, fsigma
-
- omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- osigma = PyArray_FROM_OTF(sigma, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if omean.shape == osigma.shape == ():
- fmean = PyFloat_AsDouble(mean)
- fsigma = PyFloat_AsDouble(sigma)
- if np.signbit(fsigma):
- raise ValueError("sigma < 0")
- return cont2_array_sc(self.internal_state, rk_lognormal, size,
- fmean, fsigma, self.lock)
-
- if np.any(np.signbit(osigma)):
- raise ValueError("sigma < 0.0")
- return cont2_array(self.internal_state, rk_lognormal, size, omean,
- osigma, self.lock)
+ return cont(&legacy_lognormal, &self._aug_state, size, self.lock, 2,
+ mean, 'mean', CONS_NONE,
+ sigma, 'sigma', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
def rayleigh(self, scale=1.0, size=None):
"""
@@ -3445,7 +2745,7 @@ cdef class RandomState:
Parameters
----------
scale : float or array_like of floats, optional
- Scale, also equals the mode. Should be >= 0. Default is 1.
+ Scale, also equals the mode. Must be non-negative. Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3479,6 +2779,7 @@ cdef class RandomState:
--------
Draw values from the distribution and plot the histogram
+ >>> from matplotlib.pyplot import hist
>>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
@@ -3492,25 +2793,13 @@ cdef class RandomState:
The percentage of waves larger than 3 meters is:
>>> 100.*sum(s>3)/1000000.
- 0.087300000000000003
+ 0.087300000000000003 # random
"""
- cdef ndarray oscale
- cdef double fscale
-
- oscale = <ndarray>PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oscale.shape == ():
- fscale = PyFloat_AsDouble(scale)
- if np.signbit(fscale):
- raise ValueError("scale < 0")
- return cont1_array_sc(self.internal_state, rk_rayleigh, size,
- fscale, self.lock)
-
- if np.any(np.signbit(oscale)):
- raise ValueError("scale < 0.0")
- return cont1_array(self.internal_state, rk_rayleigh, size, oscale,
- self.lock)
+ return cont(&random_rayleigh, &self._bitgen, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
def wald(self, mean, scale, size=None):
"""
@@ -3530,9 +2819,9 @@ cdef class RandomState:
Parameters
----------
mean : float or array_like of floats
- Distribution mean, should be > 0.
+ Distribution mean, must be > 0.
scale : float or array_like of floats
- Scale parameter, should be >= 0.
+ Scale parameter, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3575,29 +2864,10 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray omean, oscale
- cdef double fmean, fscale
-
- omean = PyArray_FROM_OTF(mean, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oscale = PyArray_FROM_OTF(scale, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if omean.shape == oscale.shape == ():
- fmean = PyFloat_AsDouble(mean)
- fscale = PyFloat_AsDouble(scale)
-
- if fmean <= 0:
- raise ValueError("mean <= 0")
- if fscale <= 0:
- raise ValueError("scale <= 0")
- return cont2_array_sc(self.internal_state, rk_wald, size, fmean,
- fscale, self.lock)
-
- if np.any(np.less_equal(omean,0.0)):
- raise ValueError("mean <= 0.0")
- elif np.any(np.less_equal(oscale,0.0)):
- raise ValueError("scale <= 0.0")
- return cont2_array(self.internal_state, rk_wald, size, omean, oscale,
- self.lock)
+ return cont(&legacy_wald, &self._aug_state, size, self.lock, 2,
+ mean, 'mean', CONS_POSITIVE,
+ scale, 'scale', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
def triangular(self, left, mode, right, size=None):
"""
@@ -3617,9 +2887,9 @@ cdef class RandomState:
Lower limit.
mode : float or array_like of floats
The value where the peak of the distribution occurs.
- The value should fulfill the condition ``left <= mode <= right``.
+ The value must fulfill the condition ``left <= mode <= right``.
right : float or array_like of floats
- Upper limit, should be larger than `left`.
+ Upper limit, must be larger than `left`.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3662,14 +2932,15 @@ cdef class RandomState:
>>> plt.show()
"""
- cdef ndarray oleft, omode, oright
+ cdef bint is_scalar = True
cdef double fleft, fmode, fright
+ cdef np.ndarray oleft, omode, oright
- oleft = <ndarray>PyArray_FROM_OTF(left, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- omode = <ndarray>PyArray_FROM_OTF(mode, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- oright = <ndarray>PyArray_FROM_OTF(right, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
+ oleft = <np.ndarray>np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ omode = <np.ndarray>np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ oright = <np.ndarray>np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED)
- if oleft.shape == omode.shape == oright.shape == ():
+ if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0:
fleft = PyFloat_AsDouble(left)
fright = PyFloat_AsDouble(right)
fmode = PyFloat_AsDouble(mode)
@@ -3680,8 +2951,10 @@ cdef class RandomState:
raise ValueError("mode > right")
if fleft == fright:
raise ValueError("left == right")
- return cont3_array_sc(self.internal_state, rk_triangular, size,
- fleft, fmode, fright, self.lock)
+ return cont(&random_triangular, &self._bitgen, size, self.lock, 3,
+ fleft, '', CONS_NONE,
+ fmode, '', CONS_NONE,
+ fright, '', CONS_NONE, None)
if np.any(np.greater(oleft, omode)):
raise ValueError("left > mode")
@@ -3689,8 +2962,11 @@ cdef class RandomState:
raise ValueError("mode > right")
if np.any(np.equal(oleft, oright)):
raise ValueError("left == right")
- return cont3_array(self.internal_state, rk_triangular, size, oleft,
- omode, oright, self.lock)
+
+ return cont_broadcast_3(&random_triangular, &self._bitgen, size, self.lock,
+ oleft, '', CONS_NONE,
+ omode, '', CONS_NONE,
+ oright, '', CONS_NONE)
# Complicated, discrete distributions:
def binomial(self, n, p, size=None):
@@ -3778,36 +3054,66 @@ cdef class RandomState:
# answer = 0.38885, or 38%.
"""
- cdef ndarray on, op
- cdef long ln
- cdef double fp
-
- on = <ndarray>PyArray_FROM_OTF(n, NPY_LONG, NPY_ARRAY_ALIGNED)
- op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if on.shape == op.shape == ():
- fp = PyFloat_AsDouble(p)
- ln = PyInt_AsLong(n)
-
- if ln < 0:
- raise ValueError("n < 0")
- if fp < 0:
- raise ValueError("p < 0")
- elif fp > 1:
- raise ValueError("p > 1")
- elif np.isnan(fp):
- raise ValueError("p is nan")
- return discnp_array_sc(self.internal_state, rk_binomial, size, ln,
- fp, self.lock)
-
- if np.any(np.less(n, 0)):
- raise ValueError("n < 0")
- if np.any(np.less(p, 0)):
- raise ValueError("p < 0")
- if np.any(np.greater(p, 1)):
- raise ValueError("p > 1")
- return discnp_array(self.internal_state, rk_binomial, size, on, op,
- self.lock)
+
+ # Uses a custom implementation since self._binomial is required
+ cdef double _dp = 0
+ cdef long _in = 0
+ cdef bint is_scalar = True
+ cdef np.npy_intp i, cnt
+ cdef np.ndarray randoms
+ cdef long *randoms_data
+ cdef np.broadcast it
+
+ p_arr = <np.ndarray>np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0
+ n_arr = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_LONG, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0
+
+ if not is_scalar:
+ check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1)
+ check_array_constraint(n_arr, 'n', CONS_NON_NEGATIVE)
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, int)
+ else:
+ it = np.PyArray_MultiIterNew2(p_arr, n_arr)
+ randoms = <np.ndarray>np.empty(it.shape, int)
+
+ randoms_data = <long *>np.PyArray_DATA(randoms)
+ cnt = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
+ with self.lock, nogil:
+ for i in range(cnt):
+ _dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ _in = (<long*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ (<long*>np.PyArray_MultiIter_DATA(it, 0))[0] = \
+ legacy_random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+ _dp = PyFloat_AsDouble(p)
+ _in = <long>n
+ check_constraint(_dp, 'p', CONS_BOUNDED_0_1)
+ check_constraint(<double>_in, 'n', CONS_NON_NEGATIVE)
+
+ if size is None:
+ with self.lock:
+ return <long>legacy_random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
+
+ randoms = <np.ndarray>np.empty(size, int)
+ cnt = np.PyArray_SIZE(randoms)
+ randoms_data = <long *>np.PyArray_DATA(randoms)
+
+ with self.lock, nogil:
+ for i in range(cnt):
+ randoms_data[i] = legacy_random_binomial(&self._bitgen, _dp, _in,
+ &self._binomial)
+
+ return randoms
def negative_binomial(self, n, p, size=None):
"""
@@ -3816,14 +3122,13 @@ cdef class RandomState:
Draw samples from a negative binomial distribution.
Samples are drawn from a negative binomial distribution with specified
- parameters, `n` successes and `p` probability of success where `n` is an
- integer > 0 and `p` is in the interval [0, 1].
+ parameters, `n` successes and `p` probability of success where `n`
+ is > 0 and `p` is in the interval [0, 1].
Parameters
----------
- n : int or array_like of ints
- Parameter of the distribution, > 0. Floats are also accepted,
- but they will be truncated to integers.
+ n : float or array_like of floats
+ Parameter of the distribution, > 0.
p : float or array_like of floats
Parameter of the distribution, >= 0 and <=1.
size : int or tuple of ints, optional
@@ -3841,14 +3146,17 @@ cdef class RandomState:
Notes
-----
- The probability density for the negative binomial distribution is
+ The probability mass function of the negative binomial distribution is
- .. math:: P(N;n,p) = \\binom{N+n-1}{N}p^{n}(1-p)^{N},
+ .. math:: P(N;n,p) = \\frac{\\Gamma(N+n)}{N!\\Gamma(n)}p^{n}(1-p)^{N},
where :math:`n` is the number of successes, :math:`p` is the
- probability of success, and :math:`N+n` is the number of trials.
- The negative binomial distribution gives the probability of N
- failures given n successes, with a success on the last trial.
+ probability of success, :math:`N+n` is the number of trials, and
+ :math:`\\Gamma` is the gamma function. When :math:`n` is an integer,
+ :math:`\\frac{\\Gamma(N+n)}{N!\\Gamma(n)} = \\binom{N+n-1}{N}`, which is
+ the more common form of this term in the the pmf. The negative
+ binomial distribution gives the probability of N failures given n
+ successes, with a success on the last trial.
If one throws a die repeatedly until the third time a "1" appears,
then the probability distribution of the number of non-"1"s that
@@ -3873,40 +3181,17 @@ cdef class RandomState:
single success after drilling 5 wells, after 6 wells, etc.?
>>> s = np.random.negative_binomial(1, 0.1, 100000)
- >>> for i in range(1, 11):
+ >>> for i in range(1, 11): # doctest: +SKIP
... probability = sum(s<i) / 100000.
- ... print i, "wells drilled, probability of one success =", probability
+ ... print(i, "wells drilled, probability of one success =", probability)
"""
- cdef ndarray on
- cdef ndarray op
- cdef double fn
- cdef double fp
-
- on = <ndarray>PyArray_FROM_OTF(n, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
- op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if on.shape == op.shape == ():
- fp = PyFloat_AsDouble(p)
- fn = PyFloat_AsDouble(n)
-
- if fn <= 0:
- raise ValueError("n <= 0")
- if fp < 0:
- raise ValueError("p < 0")
- elif fp > 1:
- raise ValueError("p > 1")
- return discdd_array_sc(self.internal_state, rk_negative_binomial,
- size, fn, fp, self.lock)
-
- if np.any(np.less_equal(n, 0)):
- raise ValueError("n <= 0")
- if np.any(np.less(p, 0)):
- raise ValueError("p < 0")
- if np.any(np.greater(p, 1)):
- raise ValueError("p > 1")
- return discdd_array(self.internal_state, rk_negative_binomial, size,
- on, op, self.lock)
+ out = disc(&legacy_negative_binomial, &self._aug_state, size, self.lock, 2, 0,
+ n, 'n', CONS_POSITIVE,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE)
+ # Match historical output type
+ return int64_to_long(out)
def poisson(self, lam=1.0, size=None):
"""
@@ -3920,7 +3205,7 @@ cdef class RandomState:
Parameters
----------
lam : float or array_like of floats
- Expectation of interval, should be >= 0. A sequence of expectation
+ Expectation of interval, must be >= 0. A sequence of expectation
intervals must be broadcastable over the requested size.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -3944,7 +3229,7 @@ cdef class RandomState:
:math:`k` events occurring within the observed
interval :math:`\\lambda`.
- Because the output is limited to the range of the C long type, a
+ Because the output is limited to the range of the C int64 type, a
ValueError is raised when `lam` is within 10 sigma of the maximum
representable value.
@@ -3974,27 +3259,12 @@ cdef class RandomState:
>>> s = np.random.poisson(lam=(100., 500.), size=(100, 2))
"""
- cdef ndarray olam
- cdef double flam
-
- olam = <ndarray>PyArray_FROM_OTF(lam, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if olam.shape == ():
- flam = PyFloat_AsDouble(lam)
-
- if lam < 0:
- raise ValueError("lam < 0")
- if lam > self.poisson_lam_max:
- raise ValueError("lam value too large")
- return discd_array_sc(self.internal_state, rk_poisson, size, flam,
- self.lock)
-
- if np.any(np.less(olam, 0)):
- raise ValueError("lam < 0")
- if np.any(np.greater(olam, self.poisson_lam_max)):
- raise ValueError("lam value too large.")
- return discd_array(self.internal_state, rk_poisson, size, olam,
- self.lock)
+ out = disc(&legacy_random_poisson, &self._bitgen, size, self.lock, 1, 0,
+ lam, 'lam', LEGACY_CONS_POISSON,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+ # Match historical output type
+ return int64_to_long(out)
def zipf(self, a, size=None):
"""
@@ -4013,7 +3283,7 @@ cdef class RandomState:
Parameters
----------
a : float or array_like of floats
- Distribution parameter. Should be greater than 1.
+ Distribution parameter. Must be greater than 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -4059,35 +3329,23 @@ cdef class RandomState:
the probability density function:
>>> import matplotlib.pyplot as plt
- >>> from scipy import special
+ >>> from scipy import special # doctest: +SKIP
Truncate s values at 50 so plot is interesting:
>>> count, bins, ignored = plt.hist(s[s<50], 50, density=True)
>>> x = np.arange(1., 50.)
- >>> y = x**(-a) / special.zetac(a)
- >>> plt.plot(x, y/max(y), linewidth=2, color='r')
+ >>> y = x**(-a) / special.zetac(a) # doctest: +SKIP
+ >>> plt.plot(x, y/max(y), linewidth=2, color='r') # doctest: +SKIP
>>> plt.show()
"""
- cdef ndarray oa
- cdef double fa
-
- oa = <ndarray>PyArray_FROM_OTF(a, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if oa.shape == ():
- fa = PyFloat_AsDouble(a)
-
- # use logic that ensures NaN is rejected.
- if not fa > 1.0:
- raise ValueError("'a' must be a valid float > 1.0")
- return discd_array_sc(self.internal_state, rk_zipf, size, fa,
- self.lock)
-
- # use logic that ensures NaN is rejected.
- if not np.all(np.greater(oa, 1.0)):
- raise ValueError("'a' must contain valid floats > 1.0")
- return discd_array(self.internal_state, rk_zipf, size, oa, self.lock)
+ out = disc(&legacy_random_zipf, &self._bitgen, size, self.lock, 1, 0,
+ a, 'a', CONS_GT_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+ # Match historical output type
+ return int64_to_long(out)
def geometric(self, p, size=None):
"""
@@ -4135,27 +3393,12 @@ cdef class RandomState:
0.34889999999999999 #random
"""
- cdef ndarray op
- cdef double fp
-
- op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if op.shape == ():
- fp = PyFloat_AsDouble(p)
-
- if fp < 0.0:
- raise ValueError("p < 0.0")
- if fp > 1.0:
- raise ValueError("p > 1.0")
- return discd_array_sc(self.internal_state, rk_geometric, size, fp,
- self.lock)
-
- if np.any(np.less(op, 0.0)):
- raise ValueError("p < 0.0")
- if np.any(np.greater(op, 1.0)):
- raise ValueError("p > 1.0")
- return discd_array(self.internal_state, rk_geometric, size, op,
- self.lock)
+ out = disc(&legacy_random_geometric, &self._bitgen, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_GT_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+ # Match historical output type
+ return int64_to_long(out)
def hypergeometric(self, ngood, nbad, nsample, size=None):
"""
@@ -4164,9 +3407,9 @@ cdef class RandomState:
Draw samples from a Hypergeometric distribution.
Samples are drawn from a hypergeometric distribution with specified
- parameters, ngood (ways to make a good selection), nbad (ways to make
- a bad selection), and nsample = number of items sampled, which is less
- than or equal to the sum ngood + nbad.
+ parameters, `ngood` (ways to make a good selection), `nbad` (ways to make
+ a bad selection), and `nsample` (number of items sampled, which is less
+ than or equal to the sum ``ngood + nbad``).
Parameters
----------
@@ -4180,14 +3423,16 @@ cdef class RandomState:
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``ngood``, ``nbad``, and ``nsample``
+ a single value is returned if `ngood`, `nbad`, and `nsample`
are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size``
samples are drawn.
Returns
-------
out : ndarray or scalar
- Drawn samples from the parameterized hypergeometric distribution.
+ Drawn samples from the parameterized hypergeometric distribution. Each
+ sample is the number of good items within a randomly selected subset of
+ size `nsample` taken from a set of `ngood` good items and `nbad` bad items.
See Also
--------
@@ -4202,11 +3447,11 @@ cdef class RandomState:
where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
- for P(x) the probability of x successes, g = ngood, b = nbad, and
- n = number of samples.
+ for P(x) the probability of ``x`` good results in the drawn sample,
+ g = `ngood`, b = `nbad`, and n = `nsample`.
- Consider an urn with black and white marbles in it, ngood of them
- black and nbad are white. If you draw nsample balls without
+ Consider an urn with black and white marbles in it, `ngood` of them
+ are black and `nbad` are white. If you draw `nsample` balls without
replacement, then the hypergeometric distribution describes the
distribution of black balls in the drawn sample.
@@ -4233,6 +3478,7 @@ cdef class RandomState:
>>> ngood, nbad, nsamp = 100, 2, 10
# number of good, number of bad, and number of samples
>>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)
+ >>> from matplotlib.pyplot import hist
>>> hist(s)
# note that it is very unlikely to grab both bad items
@@ -4245,39 +3491,42 @@ cdef class RandomState:
# answer = 0.003 ... pretty unlikely!
"""
- cdef ndarray ongood, onbad, onsample
- cdef long lngood, lnbad, lnsample
-
- ongood = <ndarray>PyArray_FROM_OTF(ngood, NPY_LONG, NPY_ARRAY_ALIGNED)
- onbad = <ndarray>PyArray_FROM_OTF(nbad, NPY_LONG, NPY_ARRAY_ALIGNED)
- onsample = <ndarray>PyArray_FROM_OTF(nsample, NPY_LONG, NPY_ARRAY_ALIGNED)
-
- if ongood.shape == onbad.shape == onsample.shape == ():
- lngood = PyInt_AsLong(ngood)
- lnbad = PyInt_AsLong(nbad)
- lnsample = PyInt_AsLong(nsample)
-
- if lngood < 0:
- raise ValueError("ngood < 0")
- if lnbad < 0:
- raise ValueError("nbad < 0")
- if lnsample < 1:
- raise ValueError("nsample < 1")
+ cdef bint is_scalar = True
+ cdef np.ndarray ongood, onbad, onsample
+ cdef int64_t lngood, lnbad, lnsample
+
+ # This cast to long is required to ensure that the values are inbounds
+ ongood = <np.ndarray>np.PyArray_FROM_OTF(ngood, np.NPY_LONG, np.NPY_ALIGNED)
+ onbad = <np.ndarray>np.PyArray_FROM_OTF(nbad, np.NPY_LONG, np.NPY_ALIGNED)
+ onsample = <np.ndarray>np.PyArray_FROM_OTF(nsample, np.NPY_LONG, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0:
+
+ lngood = <int64_t>ngood
+ lnbad = <int64_t>nbad
+ lnsample = <int64_t>nsample
+
if lngood + lnbad < lnsample:
raise ValueError("ngood + nbad < nsample")
- return discnmN_array_sc(self.internal_state, rk_hypergeometric,
- size, lngood, lnbad, lnsample, self.lock)
-
- if np.any(np.less(ongood, 0)):
- raise ValueError("ngood < 0")
- if np.any(np.less(onbad, 0)):
- raise ValueError("nbad < 0")
- if np.any(np.less(onsample, 1)):
- raise ValueError("nsample < 1")
- if np.any(np.less(np.add(ongood, onbad),onsample)):
+ out = disc(&legacy_random_hypergeometric, &self._bitgen, size, self.lock, 0, 3,
+ lngood, 'ngood', CONS_NON_NEGATIVE,
+ lnbad, 'nbad', CONS_NON_NEGATIVE,
+ lnsample, 'nsample', CONS_GTE_1)
+ # Match historical output type
+ return int64_to_long(out)
+
+ if np.any(np.less(np.add(ongood, onbad), onsample)):
raise ValueError("ngood + nbad < nsample")
- return discnmN_array(self.internal_state, rk_hypergeometric, size,
- ongood, onbad, onsample, self.lock)
+ # Convert to int64, if necessary, to use int64 infrastructure
+ ongood = ongood.astype(np.int64)
+ onbad = onbad.astype(np.int64)
+ onsample = onsample.astype(np.int64)
+ out = discrete_broadcast_iii(&legacy_random_hypergeometric,&self._bitgen, size, self.lock,
+ ongood, 'ngood', CONS_NON_NEGATIVE,
+ onbad, 'nbad', CONS_NON_NEGATIVE,
+ onsample, 'nsample', CONS_GTE_1)
+ # Match historical output type
+ return int64_to_long(out)
def logseries(self, p, size=None):
"""
@@ -4342,44 +3591,30 @@ cdef class RandomState:
>>> a = .6
>>> s = np.random.logseries(a, 10000)
+ >>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s)
# plot against distribution
>>> def logseries(k, p):
- ... return -p**k/(k*log(1-p))
+ ... return -p**k/(k*np.log(1-p))
>>> plt.plot(bins, logseries(bins, a)*count.max()/
- logseries(bins, a).max(), 'r')
+ ... logseries(bins, a).max(), 'r')
>>> plt.show()
"""
- cdef ndarray op
- cdef double fp
-
- op = <ndarray>PyArray_FROM_OTF(p, NPY_DOUBLE, NPY_ARRAY_ALIGNED)
-
- if op.shape == ():
- fp = PyFloat_AsDouble(p)
-
- if fp <= 0.0:
- raise ValueError("p <= 0.0")
- if fp >= 1.0:
- raise ValueError("p >= 1.0")
- return discd_array_sc(self.internal_state, rk_logseries, size, fp,
- self.lock)
-
- if np.any(np.less_equal(op, 0.0)):
- raise ValueError("p <= 0.0")
- if np.any(np.greater_equal(op, 1.0)):
- raise ValueError("p >= 1.0")
- return discd_array(self.internal_state, rk_logseries, size, op,
- self.lock)
+ out = disc(&legacy_random_logseries, &self._bitgen, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+ # Match historical output type
+ return int64_to_long(out)
# Multivariate distributions:
def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
tol=1e-8):
"""
- multivariate_normal(mean, cov[, size, check_valid, tol])
+ multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
Draw random samples from a multivariate normal distribution.
@@ -4406,6 +3641,7 @@ cdef class RandomState:
Behavior when the covariance matrix is not positive semidefinite.
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
+ cov is cast to double before the check.
Returns
-------
@@ -4474,7 +3710,7 @@ cdef class RandomState:
standard deviation:
>>> list((x[0,0,:] - mean) < 0.6)
- [True, True]
+ [True, True] # random
"""
from numpy.dual import svd
@@ -4484,7 +3720,7 @@ cdef class RandomState:
cov = np.array(cov)
if size is None:
shape = []
- elif isinstance(size, (int, long, np.integer)):
+ elif isinstance(size, (int, np.integer)):
shape = [size]
else:
shape = size
@@ -4511,40 +3747,43 @@ cdef class RandomState:
# covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
# decomposition of cov is such an A.
#
- # Also check that cov is symmetric positive-semidefinite. If so, the u.T and v
+ # Also check that cov is positive-semidefinite. If so, the u.T and v
# matrices should be equal up to roundoff error if cov is
# symmetric and the singular value of the corresponding row is
# not zero. We continue to use the SVD rather than Cholesky in
- # order to preserve current outputs.
+ # order to preserve current outputs. Note that symmetry has not
+ # been checked.
+ # GH10839, ensure double to make tol meaningful
+ cov = cov.astype(np.double)
(u, s, v) = svd(cov)
if check_valid != 'ignore':
if check_valid != 'warn' and check_valid != 'raise':
- raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'")
+ raise ValueError(
+ "check_valid must equal 'warn', 'raise', or 'ignore'")
psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
if not psd:
if check_valid == 'warn':
- warnings.warn(
- "covariance is not symmetric positive-semidefinite.",
+ warnings.warn("covariance is not positive-semidefinite.",
RuntimeWarning)
else:
raise ValueError(
- "covariance is not symmetric positive-semidefinite.")
+ "covariance is not positive-semidefinite.")
x = np.dot(x, np.sqrt(s)[:, None] * v)
x += mean
x.shape = tuple(final_shape)
return x
- def multinomial(self, npy_intp n, object pvals, size=None):
+ def multinomial(self, np.npy_intp n, object pvals, size=None):
"""
multinomial(n, pvals, size=None)
Draw samples from a multinomial distribution.
- The multinomial distribution is a multivariate generalisation of the
+ The multinomial distribution is a multivariate generalization of the
binomial distribution. Take an experiment with one of ``p``
possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the
@@ -4558,7 +3797,7 @@ cdef class RandomState:
Number of experiments.
pvals : sequence of floats, length p
Probabilities of each of the ``p`` different outcomes. These
- should sum to 1 (however, the last element is always assumed to
+ must sum to 1 (however, the last element is always assumed to
account for the remaining probability, as long as
``sum(pvals[:-1]) <= 1)``.
size : int or tuple of ints, optional
@@ -4580,14 +3819,14 @@ cdef class RandomState:
Throw a dice 20 times:
>>> np.random.multinomial(20, [1/6.]*6, size=1)
- array([[4, 1, 7, 5, 2, 1]])
+ array([[4, 1, 7, 5, 2, 1]]) # random
It landed 4 times on 1, once on 2, etc.
Now, throw the dice 20 times, and 20 times again:
>>> np.random.multinomial(20, [1/6.]*6, size=2)
- array([[3, 4, 3, 3, 4, 3],
+ array([[3, 4, 3, 3, 4, 3], # random
[2, 4, 3, 4, 0, 7]])
For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
@@ -4596,7 +3835,7 @@ cdef class RandomState:
A loaded die is more likely to land on number 6:
>>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
- array([11, 16, 14, 17, 16, 26])
+ array([11, 16, 14, 17, 16, 26]) # random
The probability inputs should be normalized. As an implementation
detail, the value of the last entry is ignored and assumed to take
@@ -4605,49 +3844,48 @@ cdef class RandomState:
other should be sampled like so:
>>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
- array([38, 62])
+ array([38, 62]) # random
not like:
>>> np.random.multinomial(100, [1.0, 2.0]) # WRONG
- array([100, 0])
+ Traceback (most recent call last):
+ ValueError: pvals < 0, pvals > 1 or pvals contains NaNs
"""
- cdef npy_intp d
- cdef ndarray parr "arrayObject_parr", mnarr "arrayObject_mnarr"
+ cdef np.npy_intp d, i, sz, offset
+ cdef np.ndarray parr, mnarr
cdef double *pix
cdef long *mnix
- cdef npy_intp i, j, dn, sz
- cdef double Sum
+ cdef long ni
d = len(pvals)
- parr = <ndarray>PyArray_ContiguousFromObject(pvals, NPY_DOUBLE, 1, 1)
- pix = <double*>PyArray_DATA(parr)
-
+ parr = <np.ndarray>np.PyArray_FROM_OTF(
+ pvals, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
+ pix = <double*>np.PyArray_DATA(parr)
+ check_array_constraint(parr, 'pvals', CONS_BOUNDED_0_1)
if kahan_sum(pix, d-1) > (1.0 + 1e-12):
raise ValueError("sum(pvals[:-1]) > 1.0")
- shape = _shape_from_size(size, d)
-
- multin = np.zeros(shape, int)
- mnarr = <ndarray>multin
- mnix = <long*>PyArray_DATA(mnarr)
- sz = PyArray_SIZE(mnarr)
- with self.lock, nogil, cython.cdivision(True):
- i = 0
- while i < sz:
- Sum = 1.0
- dn = n
- for j from 0 <= j < d-1:
- mnix[i+j] = rk_binomial(self.internal_state, dn, pix[j]/Sum)
- dn = dn - mnix[i+j]
- if dn <= 0:
- break
- Sum = Sum - pix[j]
- if dn > 0:
- mnix[i+d-1] = dn
-
- i = i + d
+ if size is None:
+ shape = (d,)
+ else:
+ try:
+ shape = (operator.index(size), d)
+ except:
+ shape = tuple(size) + (d,)
+
+ multin = np.zeros(shape, dtype=int)
+ mnarr = <np.ndarray>multin
+ mnix = <long*>np.PyArray_DATA(mnarr)
+ sz = np.PyArray_SIZE(mnarr)
+ ni = n
+ check_constraint(ni, 'n', CONS_NON_NEGATIVE)
+ offset = 0
+ with self.lock, nogil:
+ for i in range(sz // d):
+ legacy_random_multinomial(&self._bitgen, ni, &mnix[offset], pix, d, &self._binomial)
+ offset += d
return multin
@@ -4659,8 +3897,9 @@ cdef class RandomState:
Draw `size` samples of dimension k from a Dirichlet distribution. A
Dirichlet-distributed random variable can be seen as a multivariate
- generalization of a Beta distribution. Dirichlet pdf is the conjugate
- prior of a multinomial in Bayesian inference.
+ generalization of a Beta distribution. The Dirichlet distribution
+ is a conjugate prior of a multinomial distribution in Bayesian
+ inference.
Parameters
----------
@@ -4684,13 +3923,23 @@ cdef class RandomState:
Notes
-----
- .. math:: X \\approx \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i}
+ The Dirichlet distribution is a distribution over vectors
+ :math:`x` that fulfil the conditions :math:`x_i>0` and
+ :math:`\\sum_{i=1}^k x_i = 1`.
+
+ The probability density function :math:`p` of a
+ Dirichlet-distributed random vector :math:`X` is
+ proportional to
+
+ .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i},
+
+ where :math:`\\alpha` is a vector containing the positive
+ concentration parameters.
- Uses the following property for computation: for each dimension,
- draw a random sample y_i from a standard gamma generator of shape
- `alpha_i`, then
- :math:`X = \\frac{1}{\\sum_{i=1}^k{y_i}} (y_1, \\ldots, y_n)` is
- Dirichlet distributed.
+ The method uses the following property for computation: let :math:`Y`
+ be a random vector which has components that follow a standard gamma
+ distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y`
+ is Dirichlet-distributed
References
----------
@@ -4710,6 +3959,7 @@ cdef class RandomState:
>>> s = np.random.dirichlet((10, 5, 3), 20).transpose()
+ >>> import matplotlib.pyplot as plt
>>> plt.barh(range(20), s[0])
>>> plt.barh(range(20), s[1], left=s[0], color='g')
>>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
@@ -4717,58 +3967,62 @@ cdef class RandomState:
"""
- #=================
+ # =================
# Pure python algo
- #=================
- #alpha = N.atleast_1d(alpha)
- #k = alpha.size
-
- #if n == 1:
- # val = N.zeros(k)
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val)
- #else:
- # val = N.zeros((k, n))
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val, axis = 0)
- # val = val.T
-
- #return val
-
- cdef npy_intp k
- cdef npy_intp totsize
- cdef ndarray alpha_arr, val_arr
- cdef double *alpha_data
- cdef double *val_data
- cdef npy_intp i, j
- cdef double acc, invacc
-
- k = len(alpha)
- alpha_arr = <ndarray>PyArray_ContiguousFromObject(alpha, NPY_DOUBLE, 1, 1)
+ # =================
+ # alpha = N.atleast_1d(alpha)
+ # k = alpha.size
+
+ # if n == 1:
+ # val = N.zeros(k)
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val)
+ # else:
+ # val = N.zeros((k, n))
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val, axis = 0)
+ # val = val.T
+ # return val
+
+ cdef np.npy_intp k, totsize, i, j
+ cdef np.ndarray alpha_arr, val_arr
+ cdef double *alpha_data
+ cdef double *val_data
+ cdef double acc, invacc
+
+ k = len(alpha)
+ alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(
+ alpha, np.NPY_DOUBLE, np.NPY_ALIGNED | np.NPY_ARRAY_C_CONTIGUOUS)
if np.any(np.less_equal(alpha_arr, 0)):
raise ValueError('alpha <= 0')
- alpha_data = <double*>PyArray_DATA(alpha_arr)
+ alpha_data = <double*>np.PyArray_DATA(alpha_arr)
- shape = _shape_from_size(size, k)
+ if size is None:
+ shape = (k,)
+ else:
+ try:
+ shape = (operator.index(size), k)
+ except:
+ shape = tuple(size) + (k,)
- diric = np.zeros(shape, np.float64)
- val_arr = <ndarray>diric
- val_data= <double*>PyArray_DATA(val_arr)
+ diric = np.zeros(shape, np.float64)
+ val_arr = <np.ndarray>diric
+ val_data = <double*>np.PyArray_DATA(val_arr)
i = 0
- totsize = PyArray_SIZE(val_arr)
+ totsize = np.PyArray_SIZE(val_arr)
with self.lock, nogil:
while i < totsize:
acc = 0.0
- for j from 0 <= j < k:
- val_data[i+j] = rk_standard_gamma(self.internal_state,
- alpha_data[j])
- acc = acc + val_data[i+j]
- invacc = 1/acc
- for j from 0 <= j < k:
- val_data[i+j] = val_data[i+j] * invacc
+ for j in range(k):
+ val_data[i+j] = legacy_standard_gamma(&self._aug_state,
+ alpha_data[j])
+ acc = acc + val_data[i + j]
+ invacc = 1/acc
+ for j in range(k):
+ val_data[i + j] = val_data[i + j] * invacc
i = i + k
return diric
@@ -4798,20 +4052,20 @@ cdef class RandomState:
>>> arr = np.arange(10)
>>> np.random.shuffle(arr)
>>> arr
- [1 7 5 2 9 4 3 6 0 8]
+ [1 7 5 2 9 4 3 6 0 8] # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.shuffle(arr)
>>> arr
- array([[3, 4, 5],
+ array([[3, 4, 5], # random
[6, 7, 8],
[0, 1, 2]])
"""
cdef:
- npy_intp i, j, n = len(x), stride, itemsize
+ np.npy_intp i, j, n = len(x), stride, itemsize
char* x_ptr
char* buf_ptr
@@ -4819,29 +4073,30 @@ cdef class RandomState:
# Fast, statically typed path: shuffle the underlying buffer.
# Only for non-empty, 1d objects of class ndarray (subclasses such
# as MaskedArrays may not support this approach).
- x_ptr = <char*><size_t>x.ctypes.data
+ x_ptr = <char*><size_t>np.PyArray_DATA(x)
stride = x.strides[0]
itemsize = x.dtype.itemsize
# As the array x could contain python objects we use a buffer
# of bytes for the swaps to avoid leaving one of the objects
# within the buffer and erroneously decrementing it's refcount
# when the function exits.
- buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
- buf_ptr = <char*><size_t>buf.ctypes.data
+ buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
+ buf_ptr = <char*><size_t>np.PyArray_DATA(buf)
with self.lock:
# We trick gcc into providing a specialized implementation for
# the most common case, yielding a ~33% performance improvement.
# Note that apparently, only one branch can ever be specialized.
- if itemsize == sizeof(npy_intp):
- self._shuffle_raw(n, sizeof(npy_intp), stride, x_ptr, buf_ptr)
+ if itemsize == sizeof(np.npy_intp):
+ self._shuffle_raw(n, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
else:
self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr)
- elif isinstance(x, np.ndarray) and x.ndim > 1 and x.size:
- # Multidimensional ndarrays require a bounce buffer.
- buf = np.empty_like(x[0])
+ elif isinstance(x, np.ndarray) and x.ndim and x.size:
+ buf = np.empty_like(x[0, ...])
with self.lock:
for i in reversed(range(1, n)):
- j = rk_interval(i, self.internal_state)
+ j = random_interval(&self._bitgen, i)
+ if i == j:
+ continue # i == j is not needed and memcpy is undefined.
buf[...] = x[j]
x[j] = x[i]
x[i] = buf
@@ -4849,15 +4104,14 @@ cdef class RandomState:
# Untyped path.
with self.lock:
for i in reversed(range(1, n)):
- j = rk_interval(i, self.internal_state)
+ j = random_interval(&self._bitgen, i)
x[i], x[j] = x[j], x[i]
- cdef inline _shuffle_raw(self, npy_intp n, npy_intp itemsize,
- npy_intp stride, char* data, char* buf):
- cdef npy_intp i, j
+ cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp itemsize,
+ np.npy_intp stride, char* data, char* buf):
+ cdef np.npy_intp i, j
for i in reversed(range(1, n)):
- j = rk_interval(i, self.internal_state)
- if i == j : continue # i == j is not needed and memcpy is undefined.
+ j = random_interval(&self._bitgen, i)
string.memcpy(buf, data + j * stride, itemsize)
string.memcpy(data + j * stride, data + i * stride, itemsize)
string.memcpy(data + i * stride, buf, itemsize)
@@ -4883,28 +4137,32 @@ cdef class RandomState:
out : ndarray
Permuted sequence or array range.
+
Examples
--------
>>> np.random.permutation(10)
- array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6])
+ array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
>>> np.random.permutation([1, 4, 9, 12, 15])
- array([15, 1, 9, 4, 12])
+ array([15, 1, 9, 4, 12]) # random
>>> arr = np.arange(9).reshape((3, 3))
>>> np.random.permutation(arr)
- array([[6, 7, 8],
+ array([[6, 7, 8], # random
[0, 1, 2],
[3, 4, 5]])
"""
- if isinstance(x, (int, long, np.integer)):
+
+ if isinstance(x, (int, np.integer)):
arr = np.arange(x)
self.shuffle(arr)
return arr
arr = np.asarray(x)
-
+ if arr.ndim < 1:
+ raise IndexError("x must be an integer or at least 1-dimensional")
+
# shuffle has fast-path for 1-d
if arr.ndim == 1:
# Return a copy if same memory
@@ -4917,56 +4175,123 @@ cdef class RandomState:
idx = np.arange(arr.shape[0], dtype=np.intp)
self.shuffle(idx)
return arr[idx]
-
_rand = RandomState()
-seed = _rand.seed
-get_state = _rand.get_state
-set_state = _rand.set_state
-random_sample = _rand.random_sample
-choice = _rand.choice
-randint = _rand.randint
+
+beta = _rand.beta
+binomial = _rand.binomial
bytes = _rand.bytes
-uniform = _rand.uniform
+chisquare = _rand.chisquare
+choice = _rand.choice
+dirichlet = _rand.dirichlet
+exponential = _rand.exponential
+f = _rand.f
+gamma = _rand.gamma
+get_state = _rand.get_state
+geometric = _rand.geometric
+gumbel = _rand.gumbel
+hypergeometric = _rand.hypergeometric
+laplace = _rand.laplace
+logistic = _rand.logistic
+lognormal = _rand.lognormal
+logseries = _rand.logseries
+multinomial = _rand.multinomial
+multivariate_normal = _rand.multivariate_normal
+negative_binomial = _rand.negative_binomial
+noncentral_chisquare = _rand.noncentral_chisquare
+noncentral_f = _rand.noncentral_f
+normal = _rand.normal
+pareto = _rand.pareto
+permutation = _rand.permutation
+poisson = _rand.poisson
+power = _rand.power
rand = _rand.rand
+randint = _rand.randint
randn = _rand.randn
+random = _rand.random
random_integers = _rand.random_integers
-standard_normal = _rand.standard_normal
-normal = _rand.normal
-beta = _rand.beta
-exponential = _rand.exponential
+random_sample = _rand.random_sample
+rayleigh = _rand.rayleigh
+seed = _rand.seed
+set_state = _rand.set_state
+shuffle = _rand.shuffle
+standard_cauchy = _rand.standard_cauchy
standard_exponential = _rand.standard_exponential
standard_gamma = _rand.standard_gamma
-gamma = _rand.gamma
-f = _rand.f
-noncentral_f = _rand.noncentral_f
-chisquare = _rand.chisquare
-noncentral_chisquare = _rand.noncentral_chisquare
-standard_cauchy = _rand.standard_cauchy
+standard_normal = _rand.standard_normal
standard_t = _rand.standard_t
+triangular = _rand.triangular
+uniform = _rand.uniform
vonmises = _rand.vonmises
-pareto = _rand.pareto
-weibull = _rand.weibull
-power = _rand.power
-laplace = _rand.laplace
-gumbel = _rand.gumbel
-logistic = _rand.logistic
-lognormal = _rand.lognormal
-rayleigh = _rand.rayleigh
wald = _rand.wald
-triangular = _rand.triangular
-
-binomial = _rand.binomial
-negative_binomial = _rand.negative_binomial
-poisson = _rand.poisson
+weibull = _rand.weibull
zipf = _rand.zipf
-geometric = _rand.geometric
-hypergeometric = _rand.hypergeometric
-logseries = _rand.logseries
-multivariate_normal = _rand.multivariate_normal
-multinomial = _rand.multinomial
-dirichlet = _rand.dirichlet
+# Old aliases that should not be removed
+def sample(*args, **kwargs):
+ """
+ This is an alias of `random_sample`. See `random_sample` for the complete
+ documentation.
+ """
+ return _rand.random_sample(*args, **kwargs)
-shuffle = _rand.shuffle
-permutation = _rand.permutation
+def ranf(*args, **kwargs):
+ """
+ This is an alias of `random_sample`. See `random_sample` for the complete
+ documentation.
+ """
+ return _rand.random_sample(*args, **kwargs)
+
+__all__ = [
+ 'beta',
+ 'binomial',
+ 'bytes',
+ 'chisquare',
+ 'choice',
+ 'dirichlet',
+ 'exponential',
+ 'f',
+ 'gamma',
+ 'geometric',
+ 'get_state',
+ 'gumbel',
+ 'hypergeometric',
+ 'laplace',
+ 'logistic',
+ 'lognormal',
+ 'logseries',
+ 'multinomial',
+ 'multivariate_normal',
+ 'negative_binomial',
+ 'noncentral_chisquare',
+ 'noncentral_f',
+ 'normal',
+ 'pareto',
+ 'permutation',
+ 'poisson',
+ 'power',
+ 'rand',
+ 'randint',
+ 'randn',
+ 'random',
+ 'random_integers',
+ 'random_sample',
+ 'ranf',
+ 'rayleigh',
+ 'sample',
+ 'seed',
+ 'set_state',
+ 'shuffle',
+ 'standard_cauchy',
+ 'standard_exponential',
+ 'standard_gamma',
+ 'standard_normal',
+ 'standard_t',
+ 'triangular',
+ 'uniform',
+ 'vonmises',
+ 'wald',
+ 'weibull',
+ 'zipf',
+ 'RandomState',
+]
diff --git a/numpy/random/mtrand/Python.pxi b/numpy/random/mtrand/Python.pxi
deleted file mode 100644
index 08aadbaa1..000000000
--- a/numpy/random/mtrand/Python.pxi
+++ /dev/null
@@ -1,43 +0,0 @@
-# :Author: Robert Kern
-# :Copyright: 2004, Enthought, Inc.
-# :License: BSD Style
-
-
-cdef extern from "Python.h":
- # Not part of the Python API, but we might as well define it here.
- # Note that the exact type doesn't actually matter for Pyrex.
- ctypedef int size_t
-
- # String API
- char* PyString_AsString(object string)
- char* PyString_AS_STRING(object string)
- object PyString_FromString(char* c_string)
- object PyString_FromStringAndSize(char* c_string, int length)
-
- # Float API
- double PyFloat_AsDouble(object ob) except? -1.0
- long PyInt_AsLong(object ob) except? -1
-
- # Memory API
- void* PyMem_Malloc(size_t n)
- void* PyMem_Realloc(void* buf, size_t n)
- void PyMem_Free(void* buf)
-
- void Py_DECREF(object obj)
- void Py_XDECREF(object obj)
- void Py_INCREF(object obj)
- void Py_XINCREF(object obj)
-
- # TypeCheck API
- int PyFloat_Check(object obj)
- int PyInt_Check(object obj)
-
- # Error API
- int PyErr_Occurred()
- void PyErr_Clear()
-
-cdef extern from "string.h":
- void *memcpy(void *s1, void *s2, int n)
-
-cdef extern from "math.h":
- double fabs(double x)
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
deleted file mode 100644
index b7e157915..000000000
--- a/numpy/random/mtrand/distributions.c
+++ /dev/null
@@ -1,926 +0,0 @@
-/* Copyright 2005 Robert Kern (robert.kern@gmail.com)
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/* The implementations of rk_hypergeometric_hyp(), rk_hypergeometric_hrua(),
- * and rk_triangular() were adapted from Ivan Frohne's rv.py which has this
- * license:
- *
- * Copyright 1998 by Ivan Frohne; Wasilla, Alaska, U.S.A.
- * All Rights Reserved
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation for any purpose, free of charge, is granted subject to the
- * following conditions:
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the software.
- *
- * THE SOFTWARE AND DOCUMENTATION IS PROVIDED WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO MERCHANTABILITY, FITNESS
- * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHOR
- * OR COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM OR DAMAGES IN A CONTRACT
- * ACTION, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR ITS DOCUMENTATION.
- */
-
-#include "distributions.h"
-#include <stdio.h>
-#include <math.h>
-#include <stdlib.h>
-#include <limits.h>
-
-#ifndef min
-#define min(x,y) ((x<y)?x:y)
-#define max(x,y) ((x>y)?x:y)
-#endif
-
-#ifndef M_PI
-#define M_PI 3.14159265358979323846264338328
-#endif
-
-/*
- * log-gamma function to support some of these distributions. The
- * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their
- * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc.
- */
-static double loggam(double x)
-{
- double x0, x2, xp, gl, gl0;
- long k, n;
-
- static double a[10] = {8.333333333333333e-02,-2.777777777777778e-03,
- 7.936507936507937e-04,-5.952380952380952e-04,
- 8.417508417508418e-04,-1.917526917526918e-03,
- 6.410256410256410e-03,-2.955065359477124e-02,
- 1.796443723688307e-01,-1.39243221690590e+00};
- x0 = x;
- n = 0;
- if ((x == 1.0) || (x == 2.0))
- {
- return 0.0;
- }
- else if (x <= 7.0)
- {
- n = (long)(7 - x);
- x0 = x + n;
- }
- x2 = 1.0/(x0*x0);
- xp = 2*M_PI;
- gl0 = a[9];
- for (k=8; k>=0; k--)
- {
- gl0 *= x2;
- gl0 += a[k];
- }
- gl = gl0/x0 + 0.5*log(xp) + (x0-0.5)*log(x0) - x0;
- if (x <= 7.0)
- {
- for (k=1; k<=n; k++)
- {
- gl -= log(x0-1.0);
- x0 -= 1.0;
- }
- }
- return gl;
-}
-
-double rk_normal(rk_state *state, double loc, double scale)
-{
- return loc + scale*rk_gauss(state);
-}
-
-double rk_standard_exponential(rk_state *state)
-{
- /* We use -log(1-U) since U is [0, 1) */
- return -log(1.0 - rk_double(state));
-}
-
-double rk_exponential(rk_state *state, double scale)
-{
- return scale * rk_standard_exponential(state);
-}
-
-double rk_uniform(rk_state *state, double loc, double scale)
-{
- return loc + scale*rk_double(state);
-}
-
-double rk_standard_gamma(rk_state *state, double shape)
-{
- double b, c;
- double U, V, X, Y;
-
- if (shape == 1.0)
- {
- return rk_standard_exponential(state);
- }
- else if (shape < 1.0)
- {
- for (;;)
- {
- U = rk_double(state);
- V = rk_standard_exponential(state);
- if (U <= 1.0 - shape)
- {
- X = pow(U, 1./shape);
- if (X <= V)
- {
- return X;
- }
- }
- else
- {
- Y = -log((1-U)/shape);
- X = pow(1.0 - shape + shape*Y, 1./shape);
- if (X <= (V + Y))
- {
- return X;
- }
- }
- }
- }
- else
- {
- b = shape - 1./3.;
- c = 1./sqrt(9*b);
- for (;;)
- {
- do
- {
- X = rk_gauss(state);
- V = 1.0 + c*X;
- } while (V <= 0.0);
-
- V = V*V*V;
- U = rk_double(state);
- if (U < 1.0 - 0.0331*(X*X)*(X*X)) return (b*V);
- if (log(U) < 0.5*X*X + b*(1. - V + log(V))) return (b*V);
- }
- }
-}
-
-double rk_gamma(rk_state *state, double shape, double scale)
-{
- return scale * rk_standard_gamma(state, shape);
-}
-
-double rk_beta(rk_state *state, double a, double b)
-{
- double Ga, Gb;
-
- if ((a <= 1.0) && (b <= 1.0))
- {
- double U, V, X, Y;
- /* Use Johnk's algorithm */
-
- while (1)
- {
- U = rk_double(state);
- V = rk_double(state);
- X = pow(U, 1.0/a);
- Y = pow(V, 1.0/b);
-
- if ((X + Y) <= 1.0)
- {
- if (X +Y > 0)
- {
- return X / (X + Y);
- }
- else
- {
- double logX = log(U) / a;
- double logY = log(V) / b;
- double logM = logX > logY ? logX : logY;
- logX -= logM;
- logY -= logM;
-
- return exp(logX - log(exp(logX) + exp(logY)));
- }
- }
- }
- }
- else
- {
- Ga = rk_standard_gamma(state, a);
- Gb = rk_standard_gamma(state, b);
- return Ga/(Ga + Gb);
- }
-}
-
-double rk_chisquare(rk_state *state, double df)
-{
- return 2.0*rk_standard_gamma(state, df/2.0);
-}
-
-double rk_noncentral_chisquare(rk_state *state, double df, double nonc)
-{
- if (nonc == 0){
- return rk_chisquare(state, df);
- }
- if(1 < df)
- {
- const double Chi2 = rk_chisquare(state, df - 1);
- const double N = rk_gauss(state) + sqrt(nonc);
- return Chi2 + N*N;
- }
- else
- {
- const long i = rk_poisson(state, nonc / 2.0);
- return rk_chisquare(state, df + 2 * i);
- }
-}
-
-double rk_f(rk_state *state, double dfnum, double dfden)
-{
- return ((rk_chisquare(state, dfnum) * dfden) /
- (rk_chisquare(state, dfden) * dfnum));
-}
-
-double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc)
-{
- double t = rk_noncentral_chisquare(state, dfnum, nonc) * dfden;
- return t / (rk_chisquare(state, dfden) * dfnum);
-}
-
-long rk_binomial_btpe(rk_state *state, long n, double p)
-{
- double r,q,fm,p1,xm,xl,xr,c,laml,lamr,p2,p3,p4;
- double a,u,v,s,F,rho,t,A,nrq,x1,x2,f1,f2,z,z2,w,w2,x;
- long m,y,k,i;
-
- if (!(state->has_binomial) ||
- (state->nsave != n) ||
- (state->psave != p))
- {
- /* initialize */
- state->nsave = n;
- state->psave = p;
- state->has_binomial = 1;
- state->r = r = min(p, 1.0-p);
- state->q = q = 1.0 - r;
- state->fm = fm = n*r+r;
- state->m = m = (long)floor(state->fm);
- state->p1 = p1 = floor(2.195*sqrt(n*r*q)-4.6*q) + 0.5;
- state->xm = xm = m + 0.5;
- state->xl = xl = xm - p1;
- state->xr = xr = xm + p1;
- state->c = c = 0.134 + 20.5/(15.3 + m);
- a = (fm - xl)/(fm-xl*r);
- state->laml = laml = a*(1.0 + a/2.0);
- a = (xr - fm)/(xr*q);
- state->lamr = lamr = a*(1.0 + a/2.0);
- state->p2 = p2 = p1*(1.0 + 2.0*c);
- state->p3 = p3 = p2 + c/laml;
- state->p4 = p4 = p3 + c/lamr;
- }
- else
- {
- r = state->r;
- q = state->q;
- fm = state->fm;
- m = state->m;
- p1 = state->p1;
- xm = state->xm;
- xl = state->xl;
- xr = state->xr;
- c = state->c;
- laml = state->laml;
- lamr = state->lamr;
- p2 = state->p2;
- p3 = state->p3;
- p4 = state->p4;
- }
-
- /* sigh ... */
- Step10:
- nrq = n*r*q;
- u = rk_double(state)*p4;
- v = rk_double(state);
- if (u > p1) goto Step20;
- y = (long)floor(xm - p1*v + u);
- goto Step60;
-
- Step20:
- if (u > p2) goto Step30;
- x = xl + (u - p1)/c;
- v = v*c + 1.0 - fabs(m - x + 0.5)/p1;
- if (v > 1.0) goto Step10;
- y = (long)floor(x);
- goto Step50;
-
- Step30:
- if (u > p3) goto Step40;
- y = (long)floor(xl + log(v)/laml);
- if (y < 0) goto Step10;
- v = v*(u-p2)*laml;
- goto Step50;
-
- Step40:
- y = (long)floor(xr - log(v)/lamr);
- if (y > n) goto Step10;
- v = v*(u-p3)*lamr;
-
- Step50:
- k = labs(y - m);
- if ((k > 20) && (k < ((nrq)/2.0 - 1))) goto Step52;
-
- s = r/q;
- a = s*(n+1);
- F = 1.0;
- if (m < y)
- {
- for (i=m+1; i<=y; i++)
- {
- F *= (a/i - s);
- }
- }
- else if (m > y)
- {
- for (i=y+1; i<=m; i++)
- {
- F /= (a/i - s);
- }
- }
- if (v > F) goto Step10;
- goto Step60;
-
- Step52:
- rho = (k/(nrq))*((k*(k/3.0 + 0.625) + 0.16666666666666666)/nrq + 0.5);
- t = -k*k/(2*nrq);
- A = log(v);
- if (A < (t - rho)) goto Step60;
- if (A > (t + rho)) goto Step10;
-
- x1 = y+1;
- f1 = m+1;
- z = n+1-m;
- w = n-y+1;
- x2 = x1*x1;
- f2 = f1*f1;
- z2 = z*z;
- w2 = w*w;
- if (A > (xm*log(f1/x1)
- + (n-m+0.5)*log(z/w)
- + (y-m)*log(w*r/(x1*q))
- + (13680.-(462.-(132.-(99.-140./f2)/f2)/f2)/f2)/f1/166320.
- + (13680.-(462.-(132.-(99.-140./z2)/z2)/z2)/z2)/z/166320.
- + (13680.-(462.-(132.-(99.-140./x2)/x2)/x2)/x2)/x1/166320.
- + (13680.-(462.-(132.-(99.-140./w2)/w2)/w2)/w2)/w/166320.))
- {
- goto Step10;
- }
-
- Step60:
- if (p > 0.5)
- {
- y = n - y;
- }
-
- return y;
-}
-
-long rk_binomial_inversion(rk_state *state, long n, double p)
-{
- double q, qn, np, px, U;
- long X, bound;
-
- if (!(state->has_binomial) ||
- (state->nsave != n) ||
- (state->psave != p))
- {
- state->nsave = n;
- state->psave = p;
- state->has_binomial = 1;
- state->q = q = 1.0 - p;
- state->r = qn = exp(n * log(q));
- state->c = np = n*p;
- state->m = bound = min(n, np + 10.0*sqrt(np*q + 1));
- } else
- {
- q = state->q;
- qn = state->r;
- np = state->c;
- bound = state->m;
- }
- X = 0;
- px = qn;
- U = rk_double(state);
- while (U > px)
- {
- X++;
- if (X > bound)
- {
- X = 0;
- px = qn;
- U = rk_double(state);
- } else
- {
- U -= px;
- px = ((n-X+1) * p * px)/(X*q);
- }
- }
- return X;
-}
-
-long rk_binomial(rk_state *state, long n, double p)
-{
- double q;
-
- if (p <= 0.5)
- {
- if (p*n <= 30.0)
- {
- return rk_binomial_inversion(state, n, p);
- }
- else
- {
- return rk_binomial_btpe(state, n, p);
- }
- }
- else
- {
- q = 1.0-p;
- if (q*n <= 30.0)
- {
- return n - rk_binomial_inversion(state, n, q);
- }
- else
- {
- return n - rk_binomial_btpe(state, n, q);
- }
- }
-
-}
-
-long rk_negative_binomial(rk_state *state, double n, double p)
-{
- double Y;
-
- Y = rk_gamma(state, n, (1-p)/p);
- return rk_poisson(state, Y);
-}
-
-long rk_poisson_mult(rk_state *state, double lam)
-{
- long X;
- double prod, U, enlam;
-
- enlam = exp(-lam);
- X = 0;
- prod = 1.0;
- while (1)
- {
- U = rk_double(state);
- prod *= U;
- if (prod > enlam)
- {
- X += 1;
- }
- else
- {
- return X;
- }
- }
-}
-
-/*
- * The transformed rejection method for generating Poisson random variables
- * W. Hoermann
- * Insurance: Mathematics and Economics 12, 39-45 (1993)
- */
-#define LS2PI 0.91893853320467267
-#define TWELFTH 0.083333333333333333333333
-long rk_poisson_ptrs(rk_state *state, double lam)
-{
- long k;
- double U, V, slam, loglam, a, b, invalpha, vr, us;
-
- slam = sqrt(lam);
- loglam = log(lam);
- b = 0.931 + 2.53*slam;
- a = -0.059 + 0.02483*b;
- invalpha = 1.1239 + 1.1328/(b-3.4);
- vr = 0.9277 - 3.6224/(b-2);
-
- while (1)
- {
- U = rk_double(state) - 0.5;
- V = rk_double(state);
- us = 0.5 - fabs(U);
- k = (long)floor((2*a/us + b)*U + lam + 0.43);
- if ((us >= 0.07) && (V <= vr))
- {
- return k;
- }
- if ((k < 0) ||
- ((us < 0.013) && (V > us)))
- {
- continue;
- }
- if ((log(V) + log(invalpha) - log(a/(us*us)+b)) <=
- (-lam + k*loglam - loggam(k+1)))
- {
- return k;
- }
-
-
- }
-
-}
-
-long rk_poisson(rk_state *state, double lam)
-{
- if (lam >= 10)
- {
- return rk_poisson_ptrs(state, lam);
- }
- else if (lam == 0)
- {
- return 0;
- }
- else
- {
- return rk_poisson_mult(state, lam);
- }
-}
-
-double rk_standard_cauchy(rk_state *state)
-{
- return rk_gauss(state) / rk_gauss(state);
-}
-
-double rk_standard_t(rk_state *state, double df)
-{
- double N, G, X;
-
- N = rk_gauss(state);
- G = rk_standard_gamma(state, df/2);
- X = sqrt(df/2)*N/sqrt(G);
- return X;
-}
-
-/* Uses the rejection algorithm compared against the wrapped Cauchy
- distribution suggested by Best and Fisher and documented in
- Chapter 9 of Luc's Non-Uniform Random Variate Generation.
- http://cg.scs.carleton.ca/~luc/rnbookindex.html
- (but corrected to match the algorithm in R and Python)
-*/
-double rk_vonmises(rk_state *state, double mu, double kappa)
-{
- double s;
- double U, V, W, Y, Z;
- double result, mod;
- int neg;
-
- if (kappa < 1e-8)
- {
- return M_PI * (2*rk_double(state)-1);
- }
- else
- {
- /* with double precision rho is zero until 1.4e-8 */
- if (kappa < 1e-5) {
- /*
- * second order taylor expansion around kappa = 0
- * precise until relatively large kappas as second order is 0
- */
- s = (1./kappa + kappa);
- }
- else {
- double r = 1 + sqrt(1 + 4*kappa*kappa);
- double rho = (r - sqrt(2*r)) / (2*kappa);
- s = (1 + rho*rho)/(2*rho);
- }
-
- while (1)
- {
- U = rk_double(state);
- Z = cos(M_PI*U);
- W = (1 + s*Z)/(s + Z);
- Y = kappa * (s - W);
- V = rk_double(state);
- if ((Y*(2-Y) - V >= 0) || (log(Y/V)+1 - Y >= 0))
- {
- break;
- }
- }
-
- U = rk_double(state);
-
- result = acos(W);
- if (U < 0.5)
- {
- result = -result;
- }
- result += mu;
- neg = (result < 0);
- mod = fabs(result);
- mod = (fmod(mod+M_PI, 2*M_PI)-M_PI);
- if (neg)
- {
- mod *= -1;
- }
-
- return mod;
- }
-}
-
-double rk_pareto(rk_state *state, double a)
-{
- return exp(rk_standard_exponential(state)/a) - 1;
-}
-
-double rk_weibull(rk_state *state, double a)
-{
- return pow(rk_standard_exponential(state), 1./a);
-}
-
-double rk_power(rk_state *state, double a)
-{
- return pow(1 - exp(-rk_standard_exponential(state)), 1./a);
-}
-
-double rk_laplace(rk_state *state, double loc, double scale)
-{
- double U;
-
- U = rk_double(state);
- if (U < 0.5)
- {
- U = loc + scale * log(U + U);
- } else
- {
- U = loc - scale * log(2.0 - U - U);
- }
- return U;
-}
-
-double rk_gumbel(rk_state *state, double loc, double scale)
-{
- double U;
-
- U = 1.0 - rk_double(state);
- return loc - scale * log(-log(U));
-}
-
-double rk_logistic(rk_state *state, double loc, double scale)
-{
- double U;
-
- U = rk_double(state);
- return loc + scale * log(U/(1.0 - U));
-}
-
-double rk_lognormal(rk_state *state, double mean, double sigma)
-{
- return exp(rk_normal(state, mean, sigma));
-}
-
-double rk_rayleigh(rk_state *state, double mode)
-{
- return mode*sqrt(-2.0 * log(1.0 - rk_double(state)));
-}
-
-double rk_wald(rk_state *state, double mean, double scale)
-{
- double U, X, Y;
- double mu_2l;
-
- mu_2l = mean / (2*scale);
- Y = rk_gauss(state);
- Y = mean*Y*Y;
- X = mean + mu_2l*(Y - sqrt(4*scale*Y + Y*Y));
- U = rk_double(state);
- if (U <= mean/(mean+X))
- {
- return X;
- } else
- {
- return mean*mean/X;
- }
-}
-
-long rk_zipf(rk_state *state, double a)
-{
- double am1, b;
-
- am1 = a - 1.0;
- b = pow(2.0, am1);
- while (1) {
- double T, U, V, X;
-
- U = 1.0 - rk_double(state);
- V = rk_double(state);
- X = floor(pow(U, -1.0/am1));
- /*
- * The real result may be above what can be represented in a signed
- * long. Since this is a straightforward rejection algorithm, we can
- * just reject this value. This function then models a Zipf
- * distribution truncated to sys.maxint.
- */
- if (X > LONG_MAX || X < 1.0) {
- continue;
- }
-
- T = pow(1.0 + 1.0/X, am1);
- if (V*X*(T - 1.0)/(b - 1.0) <= T/b) {
- return (long)X;
- }
- }
-}
-
-long rk_geometric_search(rk_state *state, double p)
-{
- double U;
- long X;
- double sum, prod, q;
-
- X = 1;
- sum = prod = p;
- q = 1.0 - p;
- U = rk_double(state);
- while (U > sum)
- {
- prod *= q;
- sum += prod;
- X++;
- }
- return X;
-}
-
-long rk_geometric_inversion(rk_state *state, double p)
-{
- return (long)ceil(log(1.0-rk_double(state))/log(1.0-p));
-}
-
-long rk_geometric(rk_state *state, double p)
-{
- if (p >= 0.333333333333333333333333)
- {
- return rk_geometric_search(state, p);
- } else
- {
- return rk_geometric_inversion(state, p);
- }
-}
-
-long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample)
-{
- long d1, K, Z;
- double d2, U, Y;
-
- d1 = bad + good - sample;
- d2 = (double)min(bad, good);
-
- Y = d2;
- K = sample;
- while (Y > 0.0)
- {
- U = rk_double(state);
- Y -= (long)floor(U + Y/(d1 + K));
- K--;
- if (K == 0) break;
- }
- Z = (long)(d2 - Y);
- if (good > bad) Z = sample - Z;
- return Z;
-}
-
-/* D1 = 2*sqrt(2/e) */
-/* D2 = 3 - 2*sqrt(3/e) */
-#define D1 1.7155277699214135
-#define D2 0.8989161620588988
-long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample)
-{
- long mingoodbad, maxgoodbad, popsize, m, d9;
- double d4, d5, d6, d7, d8, d10, d11;
- long Z;
- double T, W, X, Y;
-
- mingoodbad = min(good, bad);
- popsize = good + bad;
- maxgoodbad = max(good, bad);
- m = min(sample, popsize - sample);
- d4 = ((double)mingoodbad) / popsize;
- d5 = 1.0 - d4;
- d6 = m*d4 + 0.5;
- d7 = sqrt((double)(popsize - m) * sample * d4 * d5 / (popsize - 1) + 0.5);
- d8 = D1*d7 + D2;
- d9 = (long)floor((double)(m + 1) * (mingoodbad + 1) / (popsize + 2));
- d10 = (loggam(d9+1) + loggam(mingoodbad-d9+1) + loggam(m-d9+1) +
- loggam(maxgoodbad-m+d9+1));
- d11 = min(min(m, mingoodbad)+1.0, floor(d6+16*d7));
- /* 16 for 16-decimal-digit precision in D1 and D2 */
-
- while (1)
- {
- X = rk_double(state);
- Y = rk_double(state);
- W = d6 + d8*(Y- 0.5)/X;
-
- /* fast rejection: */
- if ((W < 0.0) || (W >= d11)) continue;
-
- Z = (long)floor(W);
- T = d10 - (loggam(Z+1) + loggam(mingoodbad-Z+1) + loggam(m-Z+1) +
- loggam(maxgoodbad-m+Z+1));
-
- /* fast acceptance: */
- if ((X*(4.0-X)-3.0) <= T) break;
-
- /* fast rejection: */
- if (X*(X-T) >= 1) continue;
-
- if (2.0*log(X) <= T) break; /* acceptance */
- }
-
- /* this is a correction to HRUA* by Ivan Frohne in rv.py */
- if (good > bad) Z = m - Z;
-
- /* another fix from rv.py to allow sample to exceed popsize/2 */
- if (m < sample) Z = good - Z;
-
- return Z;
-}
-#undef D1
-#undef D2
-
-long rk_hypergeometric(rk_state *state, long good, long bad, long sample)
-{
- if (sample > 10)
- {
- return rk_hypergeometric_hrua(state, good, bad, sample);
- } else
- {
- return rk_hypergeometric_hyp(state, good, bad, sample);
- }
-}
-
-double rk_triangular(rk_state *state, double left, double mode, double right)
-{
- double base, leftbase, ratio, leftprod, rightprod;
- double U;
-
- base = right - left;
- leftbase = mode - left;
- ratio = leftbase / base;
- leftprod = leftbase*base;
- rightprod = (right - mode)*base;
-
- U = rk_double(state);
- if (U <= ratio)
- {
- return left + sqrt(U*leftprod);
- } else
- {
- return right - sqrt((1.0 - U) * rightprod);
- }
-}
-
-long rk_logseries(rk_state *state, double p)
-{
- double q, r, U, V;
- long result;
-
- r = log(1.0 - p);
-
- while (1) {
- V = rk_double(state);
- if (V >= p) {
- return 1;
- }
- U = rk_double(state);
- q = 1.0 - exp(r*U);
- if (V <= q*q) {
- result = (long)floor(1 + log(V)/log(q));
- if (result < 1) {
- continue;
- }
- else {
- return result;
- }
- }
- if (V >= q) {
- return 1;
- }
- return 2;
- }
-}
diff --git a/numpy/random/mtrand/distributions.h b/numpy/random/mtrand/distributions.h
deleted file mode 100644
index 0b42bc794..000000000
--- a/numpy/random/mtrand/distributions.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright 2005 Robert Kern (robert.kern@gmail.com)
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _RK_DISTR_
-#define _RK_DISTR_
-
-#include "randomkit.h"
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-/* References:
- *
- * Devroye, Luc. _Non-Uniform Random Variate Generation_.
- * Springer-Verlag, New York, 1986.
- * http://cgm.cs.mcgill.ca/~luc/rnbookindex.html
- *
- * Kachitvichyanukul, V. and Schmeiser, B. W. Binomial Random Variate
- * Generation. Communications of the ACM, 31, 2 (February, 1988) 216.
- *
- * Hoermann, W. The Transformed Rejection Method for Generating Poisson Random
- * Variables. Insurance: Mathematics and Economics, (to appear)
- * http://citeseer.csail.mit.edu/151115.html
- *
- * Marsaglia, G. and Tsang, W. W. A Simple Method for Generating Gamma
- * Variables. ACM Transactions on Mathematical Software, Vol. 26, No. 3,
- * September 2000, Pages 363–372.
- */
-
-/* Normal distribution with mean=loc and standard deviation=scale. */
-extern double rk_normal(rk_state *state, double loc, double scale);
-
-/* Standard exponential distribution (mean=1) computed by inversion of the
- * CDF. */
-extern double rk_standard_exponential(rk_state *state);
-
-/* Exponential distribution with mean=scale. */
-extern double rk_exponential(rk_state *state, double scale);
-
-/* Uniform distribution on interval [loc, loc+scale). */
-extern double rk_uniform(rk_state *state, double loc, double scale);
-
-/* Standard gamma distribution with shape parameter.
- * When shape < 1, the algorithm given by (Devroye p. 304) is used.
- * When shape == 1, a Exponential variate is generated.
- * When shape > 1, the small and fast method of (Marsaglia and Tsang 2000)
- * is used.
- */
-extern double rk_standard_gamma(rk_state *state, double shape);
-
-/* Gamma distribution with shape and scale. */
-extern double rk_gamma(rk_state *state, double shape, double scale);
-
-/* Beta distribution computed by combining two gamma variates (Devroye p. 432).
- */
-extern double rk_beta(rk_state *state, double a, double b);
-
-/* Chi^2 distribution computed by transforming a gamma variate (it being a
- * special case Gamma(df/2, 2)). */
-extern double rk_chisquare(rk_state *state, double df);
-
-/* Noncentral Chi^2 distribution computed by modifying a Chi^2 variate. */
-extern double rk_noncentral_chisquare(rk_state *state, double df, double nonc);
-
-/* F distribution computed by taking the ratio of two Chi^2 variates. */
-extern double rk_f(rk_state *state, double dfnum, double dfden);
-
-/* Noncentral F distribution computed by taking the ratio of a noncentral Chi^2
- * and a Chi^2 variate. */
-extern double rk_noncentral_f(rk_state *state, double dfnum, double dfden, double nonc);
-
-/* Binomial distribution with n Bernoulli trials with success probability p.
- * When n*p <= 30, the "Second waiting time method" given by (Devroye p. 525) is
- * used. Otherwise, the BTPE algorithm of (Kachitvichyanukul and Schmeiser 1988)
- * is used. */
-extern long rk_binomial(rk_state *state, long n, double p);
-
-/* Binomial distribution using BTPE. */
-extern long rk_binomial_btpe(rk_state *state, long n, double p);
-
-/* Binomial distribution using inversion and chop-down */
-extern long rk_binomial_inversion(rk_state *state, long n, double p);
-
-/* Negative binomial distribution computed by generating a Gamma(n, (1-p)/p)
- * variate Y and returning a Poisson(Y) variate (Devroye p. 543). */
-extern long rk_negative_binomial(rk_state *state, double n, double p);
-
-/* Poisson distribution with mean=lam.
- * When lam < 10, a basic algorithm using repeated multiplications of uniform
- * variates is used (Devroye p. 504).
- * When lam >= 10, algorithm PTRS from (Hoermann 1992) is used.
- */
-extern long rk_poisson(rk_state *state, double lam);
-
-/* Poisson distribution computed by repeated multiplication of uniform variates.
- */
-extern long rk_poisson_mult(rk_state *state, double lam);
-
-/* Poisson distribution computer by the PTRS algorithm. */
-extern long rk_poisson_ptrs(rk_state *state, double lam);
-
-/* Standard Cauchy distribution computed by dividing standard gaussians
- * (Devroye p. 451). */
-extern double rk_standard_cauchy(rk_state *state);
-
-/* Standard t-distribution with df degrees of freedom (Devroye p. 445 as
- * corrected in the Errata). */
-extern double rk_standard_t(rk_state *state, double df);
-
-/* von Mises circular distribution with center mu and shape kappa on [-pi,pi]
- * (Devroye p. 476 as corrected in the Errata). */
-extern double rk_vonmises(rk_state *state, double mu, double kappa);
-
-/* Pareto distribution via inversion (Devroye p. 262) */
-extern double rk_pareto(rk_state *state, double a);
-
-/* Weibull distribution via inversion (Devroye p. 262) */
-extern double rk_weibull(rk_state *state, double a);
-
-/* Power distribution via inversion (Devroye p. 262) */
-extern double rk_power(rk_state *state, double a);
-
-/* Laplace distribution */
-extern double rk_laplace(rk_state *state, double loc, double scale);
-
-/* Gumbel distribution */
-extern double rk_gumbel(rk_state *state, double loc, double scale);
-
-/* Logistic distribution */
-extern double rk_logistic(rk_state *state, double loc, double scale);
-
-/* Log-normal distribution */
-extern double rk_lognormal(rk_state *state, double mean, double sigma);
-
-/* Rayleigh distribution */
-extern double rk_rayleigh(rk_state *state, double mode);
-
-/* Wald distribution */
-extern double rk_wald(rk_state *state, double mean, double scale);
-
-/* Zipf distribution */
-extern long rk_zipf(rk_state *state, double a);
-
-/* Geometric distribution */
-extern long rk_geometric(rk_state *state, double p);
-extern long rk_geometric_search(rk_state *state, double p);
-extern long rk_geometric_inversion(rk_state *state, double p);
-
-/* Hypergeometric distribution */
-extern long rk_hypergeometric(rk_state *state, long good, long bad, long sample);
-extern long rk_hypergeometric_hyp(rk_state *state, long good, long bad, long sample);
-extern long rk_hypergeometric_hrua(rk_state *state, long good, long bad, long sample);
-
-/* Triangular distribution */
-extern double rk_triangular(rk_state *state, double left, double mode, double right);
-
-/* Logarithmic series distribution */
-extern long rk_logseries(rk_state *state, double p);
-
-#ifdef __cplusplus
-}
-#endif
-
-
-#endif /* _RK_DISTR_ */
diff --git a/numpy/random/mtrand/generate_mtrand_c.py b/numpy/random/mtrand/generate_mtrand_c.py
deleted file mode 100644
index ec935e6dd..000000000
--- a/numpy/random/mtrand/generate_mtrand_c.py
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-import sys
-import re
-import os
-
-unused_internal_funcs = ['__Pyx_PrintItem',
- '__Pyx_PrintNewline',
- '__Pyx_ReRaise',
- #'__Pyx_GetExcValue',
- '__Pyx_ArgTypeTest',
- '__Pyx_SetVtable',
- '__Pyx_GetVtable',
- '__Pyx_CreateClass']
-
-if __name__ == '__main__':
- # Use cython here so that long docstrings are broken up.
- # This is needed for some VC++ compilers.
- os.system('cython mtrand.pyx')
- mtrand_c = open('mtrand.c', 'r')
- processed = open('mtrand_pp.c', 'w')
- unused_funcs_str = '(' + '|'.join(unused_internal_funcs) + ')'
- uifpat = re.compile(r'static \w+ \*?'+unused_funcs_str+r'.*/\*proto\*/')
- linepat = re.compile(r'/\* ".*/mtrand.pyx":')
- for linenum, line in enumerate(mtrand_c):
- m = re.match(r'^(\s+arrayObject\w*\s*=\s*[(])[(]PyObject\s*[*][)]',
- line)
- if m:
- line = '%s(PyArrayObject *)%s' % (m.group(1), line[m.end():])
- m = uifpat.match(line)
- if m:
- line = ''
- m = re.search(unused_funcs_str, line)
- if m:
- print("%s was declared unused, but is used at line %d" % (m.group(),
- linenum+1), file=sys.stderr)
- line = linepat.sub(r'/* "mtrand.pyx":', line)
- processed.write(line)
- mtrand_c.close()
- processed.close()
- os.rename('mtrand_pp.c', 'mtrand.c')
diff --git a/numpy/random/mtrand/initarray.c b/numpy/random/mtrand/initarray.c
deleted file mode 100644
index 21f1dc05a..000000000
--- a/numpy/random/mtrand/initarray.c
+++ /dev/null
@@ -1,152 +0,0 @@
-/*
- * These function have been adapted from Python 2.4.1's _randommodule.c
- *
- * The following changes have been made to it in 2005 by Robert Kern:
- *
- * * init_by_array has been declared extern, has a void return, and uses the
- * rk_state structure to hold its data.
- *
- * The original file has the following verbatim comments:
- *
- * ------------------------------------------------------------------
- * The code in this module was based on a download from:
- * http://www.math.keio.ac.jp/~matumoto/MT2002/emt19937ar.html
- *
- * It was modified in 2002 by Raymond Hettinger as follows:
- *
- * * the principal computational lines untouched except for tabbing.
- *
- * * renamed genrand_res53() to random_random() and wrapped
- * in python calling/return code.
- *
- * * genrand_int32() and the helper functions, init_genrand()
- * and init_by_array(), were declared static, wrapped in
- * Python calling/return code. also, their global data
- * references were replaced with structure references.
- *
- * * unused functions from the original were deleted.
- * new, original C python code was added to implement the
- * Random() interface.
- *
- * The following are the verbatim comments from the original code:
- *
- * A C-program for MT19937, with initialization improved 2002/1/26.
- * Coded by Takuji Nishimura and Makoto Matsumoto.
- *
- * Before using, initialize the state by using init_genrand(seed)
- * or init_by_array(init_key, key_length).
- *
- * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- *
- * Any feedback is very welcome.
- * http://www.math.keio.ac.jp/matumoto/emt.html
- * email: matumoto@math.keio.ac.jp
- */
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
-
-#include "initarray.h"
-
-static void
-init_genrand(rk_state *self, unsigned long s);
-
-/* initializes mt[RK_STATE_LEN] with a seed */
-static void
-init_genrand(rk_state *self, unsigned long s)
-{
- int mti;
- unsigned long *mt = self->key;
-
- mt[0] = s & 0xffffffffUL;
- for (mti = 1; mti < RK_STATE_LEN; mti++) {
- /*
- * See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier.
- * In the previous versions, MSBs of the seed affect
- * only MSBs of the array mt[].
- * 2002/01/09 modified by Makoto Matsumoto
- */
- mt[mti] = (1812433253UL * (mt[mti-1] ^ (mt[mti-1] >> 30)) + mti);
- /* for > 32 bit machines */
- mt[mti] &= 0xffffffffUL;
- }
- self->pos = mti;
- return;
-}
-
-
-/*
- * initialize by an array with array-length
- * init_key is the array for initializing keys
- * key_length is its length
- */
-extern void
-init_by_array(rk_state *self, unsigned long init_key[], npy_intp key_length)
-{
- /* was signed in the original code. RDH 12/16/2002 */
- npy_intp i = 1;
- npy_intp j = 0;
- unsigned long *mt = self->key;
- npy_intp k;
-
- init_genrand(self, 19650218UL);
- k = (RK_STATE_LEN > key_length ? RK_STATE_LEN : key_length);
- for (; k; k--) {
- /* non linear */
- mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL))
- + init_key[j] + j;
- /* for > 32 bit machines */
- mt[i] &= 0xffffffffUL;
- i++;
- j++;
- if (i >= RK_STATE_LEN) {
- mt[0] = mt[RK_STATE_LEN - 1];
- i = 1;
- }
- if (j >= key_length) {
- j = 0;
- }
- }
- for (k = RK_STATE_LEN - 1; k; k--) {
- mt[i] = (mt[i] ^ ((mt[i-1] ^ (mt[i-1] >> 30)) * 1566083941UL))
- - i; /* non linear */
- mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
- i++;
- if (i >= RK_STATE_LEN) {
- mt[0] = mt[RK_STATE_LEN - 1];
- i = 1;
- }
- }
-
- mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */
- self->gauss = 0;
- self->has_gauss = 0;
- self->has_binomial = 0;
-}
diff --git a/numpy/random/mtrand/initarray.h b/numpy/random/mtrand/initarray.h
deleted file mode 100644
index f5e5e5332..000000000
--- a/numpy/random/mtrand/initarray.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#include "Python.h"
-#define NO_IMPORT_ARRAY
-#include "numpy/arrayobject.h"
-#include "randomkit.h"
-
-extern void
-init_by_array(rk_state *self, unsigned long init_key[],
- npy_intp key_length);
diff --git a/numpy/random/mtrand/mtrand_py_helper.h b/numpy/random/mtrand/mtrand_py_helper.h
deleted file mode 100644
index 266847cbe..000000000
--- a/numpy/random/mtrand/mtrand_py_helper.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#ifndef _MTRAND_PY_HELPER_H_
-#define _MTRAND_PY_HELPER_H_
-
-#include <Python.h>
-
-static PyObject *empty_py_bytes(npy_intp length, void **bytes)
-{
- PyObject *b;
-#if PY_MAJOR_VERSION >= 3
- b = PyBytes_FromStringAndSize(NULL, length);
- if (b) {
- *bytes = PyBytes_AS_STRING(b);
- }
-#else
- b = PyString_FromStringAndSize(NULL, length);
- if (b) {
- *bytes = PyString_AS_STRING(b);
- }
-#endif
- return b;
-}
-
-#endif /* _MTRAND_PY_HELPER_H_ */
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
deleted file mode 100644
index 9092fa113..000000000
--- a/numpy/random/mtrand/numpy.pxd
+++ /dev/null
@@ -1,163 +0,0 @@
-# :Author: Travis Oliphant
-from cpython.exc cimport PyErr_Print
-
-cdef extern from "numpy/npy_no_deprecated_api.h": pass
-
-cdef extern from "numpy/arrayobject.h":
-
- cdef enum NPY_TYPES:
- NPY_BOOL
- NPY_BYTE
- NPY_UBYTE
- NPY_SHORT
- NPY_USHORT
- NPY_INT
- NPY_UINT
- NPY_LONG
- NPY_ULONG
- NPY_LONGLONG
- NPY_ULONGLONG
- NPY_FLOAT
- NPY_DOUBLE
- NPY_LONGDOUBLE
- NPY_CFLOAT
- NPY_CDOUBLE
- NPY_CLONGDOUBLE
- NPY_OBJECT
- NPY_STRING
- NPY_UNICODE
- NPY_VOID
- NPY_NTYPES
- NPY_NOTYPE
-
- cdef enum requirements:
- NPY_ARRAY_C_CONTIGUOUS
- NPY_ARRAY_F_CONTIGUOUS
- NPY_ARRAY_OWNDATA
- NPY_ARRAY_FORCECAST
- NPY_ARRAY_ENSURECOPY
- NPY_ARRAY_ENSUREARRAY
- NPY_ARRAY_ELEMENTSTRIDES
- NPY_ARRAY_ALIGNED
- NPY_ARRAY_NOTSWAPPED
- NPY_ARRAY_WRITEABLE
- NPY_ARRAY_WRITEBACKIFCOPY
- NPY_ARR_HAS_DESCR
-
- NPY_ARRAY_BEHAVED
- NPY_ARRAY_BEHAVED_NS
- NPY_ARRAY_CARRAY
- NPY_ARRAY_CARRAY_RO
- NPY_ARRAY_FARRAY
- NPY_ARRAY_FARRAY_RO
- NPY_ARRAY_DEFAULT
-
- NPY_ARRAY_IN_ARRAY
- NPY_ARRAY_OUT_ARRAY
- NPY_ARRAY_INOUT_ARRAY
- NPY_ARRAY_IN_FARRAY
- NPY_ARRAY_OUT_FARRAY
- NPY_ARRAY_INOUT_FARRAY
-
- NPY_ARRAY_UPDATE_ALL
-
- cdef enum defines:
- NPY_MAXDIMS
-
- ctypedef struct npy_cdouble:
- double real
- double imag
-
- ctypedef struct npy_cfloat:
- double real
- double imag
-
- ctypedef int npy_int
- ctypedef int npy_intp
- ctypedef int npy_int64
- ctypedef int npy_uint64
- ctypedef int npy_int32
- ctypedef int npy_uint32
- ctypedef int npy_int16
- ctypedef int npy_uint16
- ctypedef int npy_int8
- ctypedef int npy_uint8
- ctypedef int npy_bool
-
- ctypedef extern class numpy.dtype [object PyArray_Descr]: pass
-
- ctypedef extern class numpy.ndarray [object PyArrayObject]: pass
-
- ctypedef extern class numpy.flatiter [object PyArrayIterObject]:
- cdef int nd_m1
- cdef npy_intp index, size
- cdef ndarray ao
- cdef char *dataptr
-
- ctypedef extern class numpy.broadcast [object PyArrayMultiIterObject]:
- cdef int numiter
- cdef npy_intp size, index
- cdef int nd
- cdef npy_intp *dimensions
- cdef void **iters
-
- object PyArray_ZEROS(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran)
- object PyArray_EMPTY(int ndims, npy_intp* dims, NPY_TYPES type_num, int fortran)
- dtype PyArray_DescrFromTypeNum(NPY_TYPES type_num)
- object PyArray_SimpleNew(int ndims, npy_intp* dims, NPY_TYPES type_num)
- int PyArray_Check(object obj)
- object PyArray_ContiguousFromAny(object obj, NPY_TYPES type,
- int mindim, int maxdim)
- object PyArray_ContiguousFromObject(object obj, NPY_TYPES type,
- int mindim, int maxdim)
- npy_intp PyArray_SIZE(ndarray arr)
- npy_intp PyArray_NBYTES(ndarray arr)
- object PyArray_FromAny(object obj, dtype newtype, int mindim, int maxdim,
- int requirements, object context)
- object PyArray_FROMANY(object obj, NPY_TYPES type_num, int min,
- int max, int requirements)
- object PyArray_NewFromDescr(object subtype, dtype newtype, int nd,
- npy_intp* dims, npy_intp* strides, void* data,
- int flags, object parent)
-
- object PyArray_FROM_OTF(object obj, NPY_TYPES type, int flags)
- object PyArray_EnsureArray(object)
-
- object PyArray_MultiIterNew(int n, ...)
-
- char *PyArray_MultiIter_DATA(broadcast multi, int i) nogil
- void PyArray_MultiIter_NEXTi(broadcast multi, int i) nogil
- void PyArray_MultiIter_NEXT(broadcast multi) nogil
-
- object PyArray_IterNew(object arr)
- void PyArray_ITER_NEXT(flatiter it) nogil
- void* PyArray_ITER_DATA(flatiter it) nogil
-
- dtype PyArray_DescrFromType(int)
-
- int _import_array() except -1
-
-# include functions that were once macros in the new api
-
- int PyArray_NDIM(ndarray arr)
- char * PyArray_DATA(ndarray arr)
- npy_intp * PyArray_DIMS(ndarray arr)
- npy_intp * PyArray_STRIDES(ndarray arr)
- npy_intp PyArray_DIM(ndarray arr, int idim)
- npy_intp PyArray_STRIDE(ndarray arr, int istride)
- object PyArray_BASE(ndarray arr)
- dtype PyArray_DESCR(ndarray arr)
- int PyArray_FLAGS(ndarray arr)
- npy_intp PyArray_ITEMSIZE(ndarray arr)
- int PyArray_TYPE(ndarray arr)
- int PyArray_CHKFLAGS(ndarray arr, int flags)
- object PyArray_GETITEM(ndarray arr, char *itemptr)
-
-
-# copied from cython version with addition of PyErr_Print.
-cdef inline int import_array() except -1:
- try:
- _import_array()
- except Exception:
- PyErr_Print()
- raise ImportError("numpy.core.multiarray failed to import")
diff --git a/numpy/random/mtrand/randint_helpers.pxi.in b/numpy/random/mtrand/randint_helpers.pxi.in
deleted file mode 100644
index 894a25167..000000000
--- a/numpy/random/mtrand/randint_helpers.pxi.in
+++ /dev/null
@@ -1,77 +0,0 @@
-"""
-Template for each `dtype` helper function in `np.random.randint`.
-"""
-
-{{py:
-
-dtypes = (
- ('bool', 'bool', 'bool_'),
- ('int8', 'uint8', 'int8'),
- ('int16', 'uint16', 'int16'),
- ('int32', 'uint32', 'int32'),
- ('int64', 'uint64', 'int64'),
- ('uint8', 'uint8', 'uint8'),
- ('uint16', 'uint16', 'uint16'),
- ('uint32', 'uint32', 'uint32'),
- ('uint64', 'uint64', 'uint64'),
-)
-
-def get_dispatch(dtypes):
- for npy_dt, npy_udt, np_dt in dtypes:
- yield npy_dt, npy_udt, np_dt
-}}
-
-{{for npy_dt, npy_udt, np_dt in get_dispatch(dtypes)}}
-
-def _rand_{{npy_dt}}(npy_{{npy_dt}} low, npy_{{npy_dt}} high, size, rngstate):
- """
- _rand_{{npy_dt}}(low, high, size, rngstate)
-
- Return random np.{{np_dt}} integers between ``low`` and ``high``, inclusive.
-
- Return random integers from the "discrete uniform" distribution in the
- closed interval [``low``, ``high``). On entry the arguments are presumed
- to have been validated for size and order for the np.{{np_dt}} type.
-
- Parameters
- ----------
- low : int
- Lowest (signed) integer to be drawn from the distribution.
- high : int
- Highest (signed) integer to be drawn from the distribution.
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- rngstate : encapsulated pointer to rk_state
- The specific type depends on the python version. In Python 2 it is
- a PyCObject, in Python 3 a PyCapsule object.
-
- Returns
- -------
- out : python integer or ndarray of np.{{np_dt}}
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- """
- cdef npy_{{npy_udt}} off, rng, buf
- cdef npy_{{npy_udt}} *out
- cdef ndarray array "arrayObject"
- cdef npy_intp cnt
- cdef rk_state *state = <rk_state *>PyCapsule_GetPointer(rngstate, NULL)
-
- off = <npy_{{npy_udt}}>(low)
- rng = <npy_{{npy_udt}}>(high) - <npy_{{npy_udt}}>(low)
-
- if size is None:
- rk_random_{{npy_udt}}(off, rng, 1, &buf, state)
- return np.{{np_dt}}(<npy_{{npy_dt}}>buf)
- else:
- array = <ndarray>np.empty(size, np.{{np_dt}})
- cnt = PyArray_SIZE(array)
- array_data = <npy_{{npy_udt}} *>PyArray_DATA(array)
- with nogil:
- rk_random_{{npy_udt}}(off, rng, cnt, array_data, state)
- return array
-
-{{endfor}}
diff --git a/numpy/random/mtrand/randomkit.c b/numpy/random/mtrand/randomkit.c
deleted file mode 100644
index 6371ebe33..000000000
--- a/numpy/random/mtrand/randomkit.c
+++ /dev/null
@@ -1,626 +0,0 @@
-/* Random kit 1.3 */
-
-/*
- * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org)
- *
- * The rk_random and rk_seed functions algorithms and the original design of
- * the Mersenne Twister RNG:
- *
- * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- *
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * 3. The names of its contributors may not be used to endorse or promote
- * products derived from this software without specific prior written
- * permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * Original algorithm for the implementation of rk_interval function from
- * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by
- * Magnus Jonsson.
- *
- * Constants used in the rk_double implementation by Isaku Wada.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sublicense, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice shall be included
- * in all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
- * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
- * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
- * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
- * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
- * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/* static char const rcsid[] =
- "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */
-
-#ifdef _WIN32
-/*
- * Windows
- * XXX: we have to use this ugly defined(__GNUC__) because it is not easy to
- * detect the compiler used in distutils itself
- */
-#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND))
-
-/*
- * FIXME: ideally, we should set this to the real version of MSVCRT. We need
- * something higher than 0x601 to enable _ftime64 and co
- */
-#define __MSVCRT_VERSION__ 0x0700
-#include <time.h>
-#include <sys/timeb.h>
-
-/*
- * mingw msvcr lib import wrongly export _ftime, which does not exist in the
- * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which
- * is available in those versions of the runtime
- */
-#define _FTIME(x) _ftime64((x))
-#else
-#include <time.h>
-#include <sys/timeb.h>
-#define _FTIME(x) _ftime((x))
-#endif
-
-#ifndef RK_NO_WINCRYPT
-/* Windows crypto */
-#ifndef _WIN32_WINNT
-#define _WIN32_WINNT 0x0400
-#endif
-#include <windows.h>
-#include <wincrypt.h>
-#endif
-
-/*
- * Do not move this include. randomkit.h must be included
- * after windows timeb.h is included.
- */
-#include "randomkit.h"
-
-#else
-/* Unix */
-#include "randomkit.h"
-#include <time.h>
-#include <sys/time.h>
-#include <unistd.h>
-#endif
-
-#include <stddef.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <errno.h>
-#include <limits.h>
-#include <math.h>
-#include <assert.h>
-
-#ifndef RK_DEV_URANDOM
-#define RK_DEV_URANDOM "/dev/urandom"
-#endif
-
-#ifndef RK_DEV_RANDOM
-#define RK_DEV_RANDOM "/dev/random"
-#endif
-
-char *rk_strerror[RK_ERR_MAX] =
-{
- "no error",
- "random device unvavailable"
-};
-
-/* static functions */
-static unsigned long rk_hash(unsigned long key);
-
-void
-rk_seed(unsigned long seed, rk_state *state)
-{
- int pos;
- seed &= 0xffffffffUL;
-
- /* Knuth's PRNG as used in the Mersenne Twister reference implementation */
- for (pos = 0; pos < RK_STATE_LEN; pos++) {
- state->key[pos] = seed;
- seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL;
- }
- state->pos = RK_STATE_LEN;
- state->gauss = 0;
- state->has_gauss = 0;
- state->has_binomial = 0;
-}
-
-/* Thomas Wang 32 bits integer hash function */
-unsigned long
-rk_hash(unsigned long key)
-{
- key += ~(key << 15);
- key ^= (key >> 10);
- key += (key << 3);
- key ^= (key >> 6);
- key += ~(key << 11);
- key ^= (key >> 16);
- return key;
-}
-
-rk_error
-rk_randomseed(rk_state *state)
-{
-#ifndef _WIN32
- struct timeval tv;
-#else
- struct _timeb tv;
-#endif
- int i;
-
- if (rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR) {
- /* ensures non-zero key */
- state->key[0] |= 0x80000000UL;
- state->pos = RK_STATE_LEN;
- state->gauss = 0;
- state->has_gauss = 0;
- state->has_binomial = 0;
-
- for (i = 0; i < 624; i++) {
- state->key[i] &= 0xffffffffUL;
- }
- return RK_NOERR;
- }
-
-#ifndef _WIN32
- gettimeofday(&tv, NULL);
- rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec)
- ^ rk_hash(clock()), state);
-#else
- _FTIME(&tv);
- rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state);
-#endif
-
- return RK_ENODEV;
-}
-
-/* Magic Mersenne Twister constants */
-#define N 624
-#define M 397
-#define MATRIX_A 0x9908b0dfUL
-#define UPPER_MASK 0x80000000UL
-#define LOWER_MASK 0x7fffffffUL
-
-/*
- * Slightly optimised reference implementation of the Mersenne Twister
- * Note that regardless of the precision of long, only 32 bit random
- * integers are produced
- */
-unsigned long
-rk_random(rk_state *state)
-{
- unsigned long y;
-
- if (state->pos == RK_STATE_LEN) {
- int i;
-
- for (i = 0; i < N - M; i++) {
- y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK);
- state->key[i] = state->key[i+M] ^ (y>>1) ^ (-(y & 1) & MATRIX_A);
- }
- for (; i < N - 1; i++) {
- y = (state->key[i] & UPPER_MASK) | (state->key[i+1] & LOWER_MASK);
- state->key[i] = state->key[i+(M-N)] ^ (y>>1) ^ (-(y & 1) & MATRIX_A);
- }
- y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK);
- state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
-
- state->pos = 0;
- }
- y = state->key[state->pos++];
-
- /* Tempering */
- y ^= (y >> 11);
- y ^= (y << 7) & 0x9d2c5680UL;
- y ^= (y << 15) & 0xefc60000UL;
- y ^= (y >> 18);
-
- return y;
-}
-
-
-/*
- * Returns an unsigned 64 bit random integer.
- */
-NPY_INLINE static npy_uint64
-rk_uint64(rk_state *state)
-{
- npy_uint64 upper = (npy_uint64)rk_random(state) << 32;
- npy_uint64 lower = (npy_uint64)rk_random(state);
- return upper | lower;
-}
-
-
-/*
- * Returns an unsigned 32 bit random integer.
- */
-NPY_INLINE static npy_uint32
-rk_uint32(rk_state *state)
-{
- return (npy_uint32)rk_random(state);
-}
-
-
-/*
- * Fills an array with cnt random npy_uint64 between off and off + rng
- * inclusive. The numbers wrap if rng is sufficiently large.
- */
-void
-rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt,
- npy_uint64 *out, rk_state *state)
-{
- npy_uint64 val, mask = rng;
- npy_intp i;
-
- if (rng == 0) {
- for (i = 0; i < cnt; i++) {
- out[i] = off;
- }
- return;
- }
-
- /* Smallest bit mask >= max */
- mask |= mask >> 1;
- mask |= mask >> 2;
- mask |= mask >> 4;
- mask |= mask >> 8;
- mask |= mask >> 16;
- mask |= mask >> 32;
-
- for (i = 0; i < cnt; i++) {
- if (rng <= 0xffffffffUL) {
- while ((val = (rk_uint32(state) & mask)) > rng);
- }
- else {
- while ((val = (rk_uint64(state) & mask)) > rng);
- }
- out[i] = off + val;
- }
-}
-
-
-/*
- * Fills an array with cnt random npy_uint32 between off and off + rng
- * inclusive. The numbers wrap if rng is sufficiently large.
- */
-void
-rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt,
- npy_uint32 *out, rk_state *state)
-{
- npy_uint32 val, mask = rng;
- npy_intp i;
-
- if (rng == 0) {
- for (i = 0; i < cnt; i++) {
- out[i] = off;
- }
- return;
- }
-
- /* Smallest bit mask >= max */
- mask |= mask >> 1;
- mask |= mask >> 2;
- mask |= mask >> 4;
- mask |= mask >> 8;
- mask |= mask >> 16;
-
- for (i = 0; i < cnt; i++) {
- while ((val = (rk_uint32(state) & mask)) > rng);
- out[i] = off + val;
- }
-}
-
-
-/*
- * Fills an array with cnt random npy_uint16 between off and off + rng
- * inclusive. The numbers wrap if rng is sufficiently large.
- */
-void
-rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt,
- npy_uint16 *out, rk_state *state)
-{
- npy_uint16 val, mask = rng;
- npy_intp i;
- npy_uint32 buf;
- int bcnt = 0;
-
- if (rng == 0) {
- for (i = 0; i < cnt; i++) {
- out[i] = off;
- }
- return;
- }
-
- /* Smallest bit mask >= max */
- mask |= mask >> 1;
- mask |= mask >> 2;
- mask |= mask >> 4;
- mask |= mask >> 8;
-
- for (i = 0; i < cnt; i++) {
- do {
- if (!bcnt) {
- buf = rk_uint32(state);
- bcnt = 1;
- }
- else {
- buf >>= 16;
- bcnt--;
- }
- val = (npy_uint16)buf & mask;
- } while (val > rng);
- out[i] = off + val;
- }
-}
-
-
-/*
- * Fills an array with cnt random npy_uint8 between off and off + rng
- * inclusive. The numbers wrap if rng is sufficiently large.
- */
-void
-rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt,
- npy_uint8 *out, rk_state *state)
-{
- npy_uint8 val, mask = rng;
- npy_intp i;
- npy_uint32 buf;
- int bcnt = 0;
-
- if (rng == 0) {
- for (i = 0; i < cnt; i++) {
- out[i] = off;
- }
- return;
- }
-
- /* Smallest bit mask >= max */
- mask |= mask >> 1;
- mask |= mask >> 2;
- mask |= mask >> 4;
-
- for (i = 0; i < cnt; i++) {
- do {
- if (!bcnt) {
- buf = rk_uint32(state);
- bcnt = 3;
- }
- else {
- buf >>= 8;
- bcnt--;
- }
- val = (npy_uint8)buf & mask;
- } while (val > rng);
- out[i] = off + val;
- }
-}
-
-
-/*
- * Fills an array with cnt random npy_bool between off and off + rng
- * inclusive.
- */
-void
-rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt,
- npy_bool *out, rk_state *state)
-{
- npy_intp i;
- npy_uint32 buf;
- int bcnt = 0;
-
- if (rng == 0) {
- for (i = 0; i < cnt; i++) {
- out[i] = off;
- }
- return;
- }
-
- /* If we reach here rng and mask are one and off is zero */
- assert(rng == 1 && off == 0);
- for (i = 0; i < cnt; i++) {
- if (!bcnt) {
- buf = rk_uint32(state);
- bcnt = 31;
- }
- else {
- buf >>= 1;
- bcnt--;
- }
- out[i] = (buf & 0x00000001) != 0;
- }
-}
-
-
-long
-rk_long(rk_state *state)
-{
- return rk_ulong(state) >> 1;
-}
-
-unsigned long
-rk_ulong(rk_state *state)
-{
-#if ULONG_MAX <= 0xffffffffUL
- return rk_random(state);
-#else
- return (rk_random(state) << 32) | (rk_random(state));
-#endif
-}
-
-unsigned long
-rk_interval(unsigned long max, rk_state *state)
-{
- unsigned long mask = max, value;
-
- if (max == 0) {
- return 0;
- }
- /* Smallest bit mask >= max */
- mask |= mask >> 1;
- mask |= mask >> 2;
- mask |= mask >> 4;
- mask |= mask >> 8;
- mask |= mask >> 16;
-#if ULONG_MAX > 0xffffffffUL
- mask |= mask >> 32;
-#endif
-
- /* Search a random value in [0..mask] <= max */
-#if ULONG_MAX > 0xffffffffUL
- if (max <= 0xffffffffUL) {
- while ((value = (rk_random(state) & mask)) > max);
- }
- else {
- while ((value = (rk_ulong(state) & mask)) > max);
- }
-#else
- while ((value = (rk_ulong(state) & mask)) > max);
-#endif
- return value;
-}
-
-double
-rk_double(rk_state *state)
-{
- /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */
- long a = rk_random(state) >> 5, b = rk_random(state) >> 6;
- return (a * 67108864.0 + b) / 9007199254740992.0;
-}
-
-void
-rk_fill(void *buffer, size_t size, rk_state *state)
-{
- unsigned long r;
- unsigned char *buf = buffer;
-
- for (; size >= 4; size -= 4) {
- r = rk_random(state);
- *(buf++) = r & 0xFF;
- *(buf++) = (r >> 8) & 0xFF;
- *(buf++) = (r >> 16) & 0xFF;
- *(buf++) = (r >> 24) & 0xFF;
- }
-
- if (!size) {
- return;
- }
- r = rk_random(state);
- for (; size; r >>= 8, size --) {
- *(buf++) = (unsigned char)(r & 0xFF);
- }
-}
-
-rk_error
-rk_devfill(void *buffer, size_t size, int strong)
-{
-#ifndef _WIN32
- FILE *rfile;
- int done;
-
- if (strong) {
- rfile = fopen(RK_DEV_RANDOM, "rb");
- }
- else {
- rfile = fopen(RK_DEV_URANDOM, "rb");
- }
- if (rfile == NULL) {
- return RK_ENODEV;
- }
- done = fread(buffer, size, 1, rfile);
- fclose(rfile);
- if (done) {
- return RK_NOERR;
- }
-#else
-
-#ifndef RK_NO_WINCRYPT
- HCRYPTPROV hCryptProv;
- BOOL done;
-
- if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL,
- CRYPT_VERIFYCONTEXT) || !hCryptProv) {
- return RK_ENODEV;
- }
- done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer);
- CryptReleaseContext(hCryptProv, 0);
- if (done) {
- return RK_NOERR;
- }
-#endif
-
-#endif
- return RK_ENODEV;
-}
-
-rk_error
-rk_altfill(void *buffer, size_t size, int strong, rk_state *state)
-{
- rk_error err;
-
- err = rk_devfill(buffer, size, strong);
- if (err) {
- rk_fill(buffer, size, state);
- }
- return err;
-}
-
-double
-rk_gauss(rk_state *state)
-{
- if (state->has_gauss) {
- const double tmp = state->gauss;
- state->gauss = 0;
- state->has_gauss = 0;
- return tmp;
- }
- else {
- double f, x1, x2, r2;
-
- do {
- x1 = 2.0*rk_double(state) - 1.0;
- x2 = 2.0*rk_double(state) - 1.0;
- r2 = x1*x1 + x2*x2;
- }
- while (r2 >= 1.0 || r2 == 0.0);
-
- /* Polar method, a more efficient version of the Box-Muller approach. */
- f = sqrt(-2.0*log(r2)/r2);
- /* Keep for next call */
- state->gauss = f*x1;
- state->has_gauss = 1;
- return f*x2;
- }
-}
diff --git a/numpy/random/pcg64.pyx b/numpy/random/pcg64.pyx
new file mode 100644
index 000000000..585520139
--- /dev/null
+++ b/numpy/random/pcg64.pyx
@@ -0,0 +1,270 @@
+import numpy as np
+cimport numpy as np
+
+from .common cimport *
+from .bit_generator cimport BitGenerator
+
+__all__ = ['PCG64']
+
+cdef extern from "src/pcg64/pcg64.h":
+ # Use int as generic type, actual type read from pcg64.h and is platform dependent
+ ctypedef int pcg64_random_t
+
+ struct s_pcg64_state:
+ pcg64_random_t *pcg_state
+ int has_uint32
+ uint32_t uinteger
+
+ ctypedef s_pcg64_state pcg64_state
+
+ uint64_t pcg64_next64(pcg64_state *state) nogil
+ uint32_t pcg64_next32(pcg64_state *state) nogil
+ void pcg64_jump(pcg64_state *state)
+ void pcg64_advance(pcg64_state *state, uint64_t *step)
+ void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc)
+ void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
+ void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
+
+cdef uint64_t pcg64_uint64(void* st) nogil:
+ return pcg64_next64(<pcg64_state *>st)
+
+cdef uint32_t pcg64_uint32(void *st) nogil:
+ return pcg64_next32(<pcg64_state *> st)
+
+cdef double pcg64_double(void* st) nogil:
+ return uint64_to_double(pcg64_next64(<pcg64_state *>st))
+
+
+cdef class PCG64(BitGenerator):
+ """
+ PCG64(seed_seq=None)
+
+ BitGenerator for the PCG-64 pseudo-random number generator.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], ISeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in an implementor of the `ISeedSequence` interface like
+ `SeedSequence`.
+
+ Notes
+ -----
+ PCG-64 is a 128-bit implementation of O'Neill's permutation congruential
+ generator ([1]_, [2]_). PCG-64 has a period of :math:`2^{128}` and supports
+ advancing an arbitrary number of steps as well as :math:`2^{127}` streams.
+ The specific member of the PCG family that we use is PCG XSL RR 128/64
+ as described in the paper ([2]_).
+
+ ``PCG64`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ Supports the method :meth:`advance` to advance the RNG an arbitrary number of
+ steps. The state of the PCG-64 RNG is represented by 2 128-bit unsigned
+ integers.
+
+ **State and Seeding**
+
+ The ``PCG64`` state vector consists of 2 unsigned 128-bit values,
+ which are represented externally as Python ints. One is the state of the
+ PRNG, which is advanced by a linear congruential generator (LCG). The
+ second is a fixed odd increment used in the LCG.
+
+ The input seed is processed by `SeedSequence` to generate both values. The
+ increment is not independently settable.
+
+ **Parallel Features**
+
+ The preferred way to use a BitGenerator in parallel applications is to use
+ the `SeedSequence.spawn` method to obtain entropy values, and to use these
+ to generate new BitGenerators:
+
+ >>> from numpy.random import Generator, PCG64, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> rg = [Generator(PCG64(s)) for s in sg.spawn(10)]
+
+ **Compatibility Guarantee**
+
+ ``PCG64`` makes a guarantee that a fixed seed and will always produce
+ the same random integer stream.
+
+ References
+ ----------
+ .. [1] `"PCG, A Family of Better Random Number Generators"
+ <http://www.pcg-random.org/>`_
+ .. [2] O'Neill, Melissa E. `"PCG: A Family of Simple Fast Space-Efficient
+ Statistically Good Algorithms for Random Number Generation"
+ <https://www.cs.hmc.edu/tr/hmc-cs-2014-0905.pdf>`_
+ """
+
+ cdef pcg64_state rng_state
+ cdef pcg64_random_t pcg64_random_state
+
+ def __init__(self, seed=None):
+ BitGenerator.__init__(self, seed)
+ self.rng_state.pcg_state = &self.pcg64_random_state
+
+ self._bitgen.state = <void *>&self.rng_state
+ self._bitgen.next_uint64 = &pcg64_uint64
+ self._bitgen.next_uint32 = &pcg64_uint32
+ self._bitgen.next_double = &pcg64_double
+ self._bitgen.next_raw = &pcg64_uint64
+ # Seed the _bitgen
+ val = self._seed_seq.generate_state(4, np.uint64)
+ pcg64_set_seed(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(val),
+ (<uint64_t *>np.PyArray_DATA(val) + 2))
+ self._reset_state_variables()
+
+ cdef _reset_state_variables(self):
+ self.rng_state.has_uint32 = 0
+ self.rng_state.uinteger = 0
+
+ cdef jump_inplace(self, jumps):
+ """
+ Jump state in-place
+ Not part of public API
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the rng.
+
+ Notes
+ -----
+ The step size is phi-1 when multiplied by 2**128 where phi is the
+ golden ratio.
+ """
+ step = 0x9e3779b97f4a7c15f39cc0605cedc835
+ self.advance(step * int(jumps))
+
+ def jumped(self, jumps=1):
+ """
+ jumped(jumps=1)
+
+ Returns a new bit generator with the state jumped.
+
+ Jumps the state as-if jumps * 210306068529402873165736369884012333109
+ random numbers have been generated.
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the bit generator returned
+
+ Returns
+ -------
+ bit_generator : PCG64
+ New instance of generator jumped iter times
+
+ Notes
+ -----
+ The step size is phi-1 when multiplied by 2**128 where phi is the
+ golden ratio.
+ """
+ cdef PCG64 bit_generator
+
+ bit_generator = self.__class__()
+ bit_generator.state = self.state
+ bit_generator.jump_inplace(jumps)
+
+ return bit_generator
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+
+ # state_vec is state.high, state.low, inc.high, inc.low
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ pcg64_get_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ &has_uint32, &uinteger)
+ state = int(state_vec[0]) * 2**64 + int(state_vec[1])
+ inc = int(state_vec[2]) * 2**64 + int(state_vec[3])
+ return {'bit_generator': self.__class__.__name__,
+ 'state': {'state': state, 'inc': inc},
+ 'has_uint32': has_uint32,
+ 'uinteger': uinteger}
+
+ @state.setter
+ def state(self, value):
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'RNG'.format(self.__class__.__name__))
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ state_vec[0] = value['state']['state'] // 2 ** 64
+ state_vec[1] = value['state']['state'] % 2 ** 64
+ state_vec[2] = value['state']['inc'] // 2 ** 64
+ state_vec[3] = value['state']['inc'] % 2 ** 64
+ has_uint32 = value['has_uint32']
+ uinteger = value['uinteger']
+ pcg64_set_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ has_uint32, uinteger)
+
+ def advance(self, delta):
+ """
+ advance(delta)
+
+ Advance the underlying RNG as-if delta draws have occurred.
+
+ Parameters
+ ----------
+ delta : integer, positive
+ Number of draws to advance the RNG. Must be less than the
+ size state variable in the underlying RNG.
+
+ Returns
+ -------
+ self : PCG64
+ RNG advanced delta steps
+
+ Notes
+ -----
+ Advancing a RNG updates the underlying RNG state as-if a given
+ number of calls to the underlying RNG have been made. In general
+ there is not a one-to-one relationship between the number output
+ random values from a particular distribution and the number of
+ draws from the core RNG. This occurs for two reasons:
+
+ * The random values are simulated using a rejection-based method
+ and so, on average, more than one value from the underlying
+ RNG is required to generate an single draw.
+ * The number of bits required to generate a simulated value
+ differs from the number of bits generated by the underlying
+ RNG. For example, two 16-bit integer values can be simulated
+ from a single draw of a 32-bit RNG.
+
+ Advancing the RNG state resets any pre-computed random numbers.
+ This is required to ensure exact reproducibility.
+ """
+ delta = wrap_int(delta, 128)
+
+ cdef np.ndarray d = np.empty(2, dtype=np.uint64)
+ d[0] = delta // 2**64
+ d[1] = delta % 2**64
+ pcg64_advance(&self.rng_state, <uint64_t *>np.PyArray_DATA(d))
+ self._reset_state_variables()
+ return self
diff --git a/numpy/random/philox.pyx b/numpy/random/philox.pyx
new file mode 100644
index 000000000..8b7683017
--- /dev/null
+++ b/numpy/random/philox.pyx
@@ -0,0 +1,336 @@
+from cpython.pycapsule cimport PyCapsule_New
+
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
+import numpy as np
+
+from .common cimport *
+from .bit_generator cimport BitGenerator
+
+__all__ = ['Philox']
+
+np.import_array()
+
+DEF PHILOX_BUFFER_SIZE=4
+
+cdef extern from 'src/philox/philox.h':
+ struct s_r123array2x64:
+ uint64_t v[2]
+
+ struct s_r123array4x64:
+ uint64_t v[4]
+
+ ctypedef s_r123array4x64 r123array4x64
+ ctypedef s_r123array2x64 r123array2x64
+
+ ctypedef r123array4x64 philox4x64_ctr_t
+ ctypedef r123array2x64 philox4x64_key_t
+
+ struct s_philox_state:
+ philox4x64_ctr_t *ctr
+ philox4x64_key_t *key
+ int buffer_pos
+ uint64_t buffer[PHILOX_BUFFER_SIZE]
+ int has_uint32
+ uint32_t uinteger
+
+ ctypedef s_philox_state philox_state
+
+ uint64_t philox_next64(philox_state *state) nogil
+ uint32_t philox_next32(philox_state *state) nogil
+ void philox_jump(philox_state *state)
+ void philox_advance(uint64_t *step, philox_state *state)
+
+
+cdef uint64_t philox_uint64(void*st) nogil:
+ return philox_next64(<philox_state *> st)
+
+cdef uint32_t philox_uint32(void *st) nogil:
+ return philox_next32(<philox_state *> st)
+
+cdef double philox_double(void*st) nogil:
+ return uint64_to_double(philox_next64(<philox_state *> st))
+
+cdef class Philox(BitGenerator):
+ """
+ Philox(seed=None, counter=None, key=None)
+
+ Container for the Philox (4x64) pseudo-random number generator.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], ISeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in an implementor of the `ISeedSequence` interface like
+ `SeedSequence`.
+ counter : {None, int, array_like}, optional
+ Counter to use in the Philox state. Can be either
+ a Python int (long in 2.x) in [0, 2**256) or a 4-element uint64 array.
+ If not provided, the RNG is initialized at 0.
+ key : {None, int, array_like}, optional
+ Key to use in the Philox state. Unlike seed, the value in key is
+ directly set. Can be either a Python int in [0, 2**128) or a 2-element
+ uint64 array. `key` and `seed` cannot both be used.
+
+ Attributes
+ ----------
+ lock: threading.Lock
+ Lock instance that is shared so that the same bit git generator can
+ be used in multiple Generators without corrupting the state. Code that
+ generates values from a bit generator should hold the bit generator's
+ lock.
+
+ Notes
+ -----
+ Philox is a 64-bit PRNG that uses a counter-based design based on weaker
+ (and faster) versions of cryptographic functions [1]_. Instances using
+ different values of the key produce independent sequences. Philox has a
+ period of :math:`2^{256} - 1` and supports arbitrary advancing and jumping
+ the sequence in increments of :math:`2^{128}`. These features allow
+ multiple non-overlapping sequences to be generated.
+
+ ``Philox`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ **State and Seeding**
+
+ The ``Philox`` state vector consists of a 256-bit value encoded as
+ a 4-element uint64 array and a 128-bit value encoded as a 2-element uint64
+ array. The former is a counter which is incremented by 1 for every 4 64-bit
+ randoms produced. The second is a key which determined the sequence
+ produced. Using different keys produces independent sequences.
+
+ The input seed is processed by `SeedSequence` to generate the key. The
+ counter is set to 0.
+
+ Alternately, one can omit the seed parameter and set the ``key`` and
+ ``counter`` directly.
+
+ **Parallel Features**
+
+ The preferred way to use a BitGenerator in parallel applications is to use
+ the `SeedSequence.spawn` method to obtain entropy values, and to use these
+ to generate new BitGenerators:
+
+ >>> from numpy.random import Generator, Philox, SeedSequence
+ >>> sg = SeedSequence(1234)
+ >>> rg = [Generator(Philox(s)) for s in sg.spawn(10)]
+
+ ``Philox`` can be used in parallel applications by calling the ``jumped``
+ method to advances the state as-if :math:`2^{128}` random numbers have
+ been generated. Alternatively, ``advance`` can be used to advance the
+ counter for any positive step in [0, 2**256). When using ``jumped``, all
+ generators should be chained to ensure that the segments come from the same
+ sequence.
+
+ >>> from numpy.random import Generator, Philox
+ >>> bit_generator = Philox(1234)
+ >>> rg = []
+ >>> for _ in range(10):
+ ... rg.append(Generator(bit_generator))
+ ... bit_generator = bit_generator.jumped()
+
+ Alternatively, ``Philox`` can be used in parallel applications by using
+ a sequence of distinct keys where each instance uses different key.
+
+ >>> key = 2**96 + 2**33 + 2**17 + 2**9
+ >>> rg = [Generator(Philox(key=key+i)) for i in range(10)]
+
+ **Compatibility Guarantee**
+
+ ``Philox`` makes a guarantee that a fixed seed will always produce
+ the same random integer stream.
+
+ Examples
+ --------
+ >>> from numpy.random import Generator, Philox
+ >>> rg = Generator(Philox(1234))
+ >>> rg.standard_normal()
+ 0.123 # random
+
+ References
+ ----------
+ .. [1] John K. Salmon, Mark A. Moraes, Ron O. Dror, and David E. Shaw,
+ "Parallel Random Numbers: As Easy as 1, 2, 3," Proceedings of
+ the International Conference for High Performance Computing,
+ Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
+ """
+ cdef philox_state rng_state
+ cdef philox4x64_key_t philox_key
+ cdef philox4x64_ctr_t philox_ctr
+
+ def __init__(self, seed=None, counter=None, key=None):
+ if seed is not None and key is not None:
+ raise ValueError('seed and key cannot be both used')
+ BitGenerator.__init__(self, seed)
+ self.rng_state.ctr = &self.philox_ctr
+ self.rng_state.key = &self.philox_key
+ if key is not None:
+ key = int_to_array(key, 'key', 128, 64)
+ for i in range(2):
+ self.rng_state.key.v[i] = key[i]
+ # The seed sequence is invalid.
+ self._seed_seq = None
+ else:
+ key = self._seed_seq.generate_state(2, np.uint64)
+ for i in range(2):
+ self.rng_state.key.v[i] = key[i]
+ counter = 0 if counter is None else counter
+ counter = int_to_array(counter, 'counter', 256, 64)
+ for i in range(4):
+ self.rng_state.ctr.v[i] = counter[i]
+
+ self._reset_state_variables()
+
+ self._bitgen.state = <void *>&self.rng_state
+ self._bitgen.next_uint64 = &philox_uint64
+ self._bitgen.next_uint32 = &philox_uint32
+ self._bitgen.next_double = &philox_double
+ self._bitgen.next_raw = &philox_uint64
+
+ cdef _reset_state_variables(self):
+ self.rng_state.has_uint32 = 0
+ self.rng_state.uinteger = 0
+ self.rng_state.buffer_pos = PHILOX_BUFFER_SIZE
+ for i in range(PHILOX_BUFFER_SIZE):
+ self.rng_state.buffer[i] = 0
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ ctr = np.empty(4, dtype=np.uint64)
+ key = np.empty(2, dtype=np.uint64)
+ buffer = np.empty(PHILOX_BUFFER_SIZE, dtype=np.uint64)
+ for i in range(4):
+ ctr[i] = self.rng_state.ctr.v[i]
+ if i < 2:
+ key[i] = self.rng_state.key.v[i]
+ for i in range(PHILOX_BUFFER_SIZE):
+ buffer[i] = self.rng_state.buffer[i]
+
+ state = {'counter': ctr, 'key': key}
+ return {'bit_generator': self.__class__.__name__,
+ 'state': state,
+ 'buffer': buffer,
+ 'buffer_pos': self.rng_state.buffer_pos,
+ 'has_uint32': self.rng_state.has_uint32,
+ 'uinteger': self.rng_state.uinteger}
+
+ @state.setter
+ def state(self, value):
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'PRNG'.format(self.__class__.__name__))
+ for i in range(4):
+ self.rng_state.ctr.v[i] = <uint64_t> value['state']['counter'][i]
+ if i < 2:
+ self.rng_state.key.v[i] = <uint64_t> value['state']['key'][i]
+ for i in range(PHILOX_BUFFER_SIZE):
+ self.rng_state.buffer[i] = <uint64_t> value['buffer'][i]
+
+ self.rng_state.has_uint32 = value['has_uint32']
+ self.rng_state.uinteger = value['uinteger']
+ self.rng_state.buffer_pos = value['buffer_pos']
+
+ cdef jump_inplace(self, iter):
+ """
+ Jump state in-place
+
+ Not part of public API
+
+ Parameters
+ ----------
+ iter : integer, positive
+ Number of times to jump the state of the rng.
+ """
+ self.advance(iter * int(2 ** 128))
+
+ def jumped(self, jumps=1):
+ """
+ jumped(jumps=1)
+
+ Returns a new bit generator with the state jumped
+
+ The state of the returned big generator is jumped as-if
+ 2**(128 * jumps) random numbers have been generated.
+
+ Parameters
+ ----------
+ jumps : integer, positive
+ Number of times to jump the state of the bit generator returned
+
+ Returns
+ -------
+ bit_generator : Philox
+ New instance of generator jumped iter times
+ """
+ cdef Philox bit_generator
+
+ bit_generator = self.__class__()
+ bit_generator.state = self.state
+ bit_generator.jump_inplace(jumps)
+
+ return bit_generator
+
+ def advance(self, delta):
+ """
+ advance(delta)
+
+ Advance the underlying RNG as-if delta draws have occurred.
+
+ Parameters
+ ----------
+ delta : integer, positive
+ Number of draws to advance the RNG. Must be less than the
+ size state variable in the underlying RNG.
+
+ Returns
+ -------
+ self : Philox
+ RNG advanced delta steps
+
+ Notes
+ -----
+ Advancing a RNG updates the underlying RNG state as-if a given
+ number of calls to the underlying RNG have been made. In general
+ there is not a one-to-one relationship between the number output
+ random values from a particular distribution and the number of
+ draws from the core RNG. This occurs for two reasons:
+
+ * The random values are simulated using a rejection-based method
+ and so, on average, more than one value from the underlying
+ RNG is required to generate an single draw.
+ * The number of bits required to generate a simulated value
+ differs from the number of bits generated by the underlying
+ RNG. For example, two 16-bit integer values can be simulated
+ from a single draw of a 32-bit RNG.
+
+ Advancing the RNG state resets any pre-computed random numbers.
+ This is required to ensure exact reproducibility.
+ """
+ delta = wrap_int(delta, 256)
+
+ cdef np.ndarray delta_a
+ delta_a = int_to_array(delta, 'step', 256, 64)
+ philox_advance(<uint64_t *> delta_a.data, &self.rng_state)
+ self._reset_state_variables()
+ return self
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index 394a70ead..ce7f0565f 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -1,22 +1,17 @@
from __future__ import division, print_function
-from os.path import join
+import os
+import platform
import sys
-from distutils.dep_util import newer
-from distutils.msvccompiler import get_build_version as get_msvc_build_version
+from os.path import join
+
+from numpy.distutils.system_info import platform_bits
-def needs_mingw_ftime_workaround():
- # We need the mingw workaround for _ftime if the msvc runtime version is
- # 7.1 or above and we build with mingw ...
- # ... but we can't easily detect compiler version outside distutils command
- # context, so we will need to detect in randomkit whether we build with gcc
- msver = get_msvc_build_version()
- if msver and msver >= 8:
- return True
+is_msvc = (platform.platform().startswith('Windows') and
+ platform.python_compiler().startswith('MS'))
- return False
-def configuration(parent_package='',top_path=None):
+def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
@@ -24,7 +19,7 @@ def configuration(parent_package='',top_path=None):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
if sys.platform == 'win32':
- libs.append('Advapi32')
+ libs.extend(['Advapi32', 'Kernel32'])
ext.libraries.extend(libs)
return None
@@ -36,28 +31,105 @@ def configuration(parent_package='',top_path=None):
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1')]
- if needs_mingw_ftime_workaround():
- defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
-
- libs = []
- # Configure mtrand
- config.add_extension('mtrand',
- sources=[join('mtrand', x) for x in
- ['mtrand.c', 'randomkit.c', 'initarray.c',
- 'distributions.c']]+[generate_libraries],
- libraries=libs,
- depends=[join('mtrand', '*.h'),
- join('mtrand', '*.pyx'),
- join('mtrand', '*.pxi'),],
- define_macros=defs,
- )
- config.add_data_files(('.', join('mtrand', 'randomkit.h')))
+ defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_data_dir('tests')
+ EXTRA_LINK_ARGS = []
+ # Math lib
+ EXTRA_LIBRARIES = ['m'] if os.name != 'nt' else []
+ # Some bit generators exclude GCC inlining
+ EXTRA_COMPILE_ARGS = ['-U__GNUC_GNU_INLINE__']
+
+ if is_msvc and platform_bits == 32:
+ # 32-bit windows requires explicit sse2 option
+ EXTRA_COMPILE_ARGS += ['/arch:SSE2']
+ elif not is_msvc:
+ # Some bit generators require c99
+ EXTRA_COMPILE_ARGS += ['-std=c99']
+ INTEL_LIKE = any(arch in platform.machine()
+ for arch in ('x86', 'i686', 'i386', 'amd64'))
+ if INTEL_LIKE:
+ # Assumes GCC or GCC-like compiler
+ EXTRA_COMPILE_ARGS += ['-msse2']
+
+ # Use legacy integer variable sizes
+ LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
+ PCG64_DEFS = []
+ # One can force emulated 128-bit arithmetic if one wants.
+ #PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
+
+ for gen in ['mt19937']:
+ # gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen),
+ 'src/{0}/{0}.c'.format(gen),
+ 'src/{0}/{0}-jump.c'.format(gen)],
+ include_dirs=['.', 'src', join('src', gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen],
+ define_macros=defs,
+ )
+ for gen in ['philox', 'pcg64', 'sfc64']:
+ # gen.pyx, src/gen/gen.c
+ _defs = defs + PCG64_DEFS if gen == 'pcg64' else defs
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen),
+ 'src/{0}/{0}.c'.format(gen)],
+ include_dirs=['.', 'src', join('src', gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen, 'bit_generator.pyx',
+ 'bit_generator.pxd'],
+ define_macros=_defs,
+ )
+ for gen in ['common', 'bit_generator']:
+ # gen.pyx
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ include_dirs=['.', 'src'],
+ depends=['%s.pyx' % gen, '%s.pxd' % gen,],
+ define_macros=defs,
+ )
+ other_srcs = [
+ 'src/distributions/logfactorial.c',
+ 'src/distributions/distributions.c',
+ 'src/distributions/random_hypergeometric.c',
+ ]
+ for gen in ['generator', 'bounded_integers']:
+ # gen.pyx, src/distributions/distributions.c
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen)] + other_srcs,
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ include_dirs=['.', 'src'],
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen],
+ define_macros=defs,
+ )
+ config.add_extension('mtrand',
+ # mtrand does not depend on random_hypergeometric.c.
+ sources=['mtrand.c',
+ 'src/legacy/legacy-distributions.c',
+ 'src/distributions/logfactorial.c',
+ 'src/distributions/distributions.c'],
+ include_dirs=['.', 'src', 'src/legacy'],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['mtrand.pyx'],
+ define_macros=defs + LEGACY_DEFS,
+ )
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
+
setup(configuration=configuration)
diff --git a/numpy/random/sfc64.pyx b/numpy/random/sfc64.pyx
new file mode 100644
index 000000000..a881096e9
--- /dev/null
+++ b/numpy/random/sfc64.pyx
@@ -0,0 +1,144 @@
+import numpy as np
+cimport numpy as np
+
+from .common cimport *
+from .bit_generator cimport BitGenerator
+
+__all__ = ['SFC64']
+
+cdef extern from "src/sfc64/sfc64.h":
+ struct s_sfc64_state:
+ uint64_t s[4]
+ int has_uint32
+ uint32_t uinteger
+
+ ctypedef s_sfc64_state sfc64_state
+ uint64_t sfc64_next64(sfc64_state *state) nogil
+ uint32_t sfc64_next32(sfc64_state *state) nogil
+ void sfc64_set_seed(sfc64_state *state, uint64_t *seed)
+ void sfc64_get_state(sfc64_state *state, uint64_t *state_arr, int *has_uint32, uint32_t *uinteger)
+ void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32, uint32_t uinteger)
+
+
+cdef uint64_t sfc64_uint64(void* st) nogil:
+ return sfc64_next64(<sfc64_state *>st)
+
+cdef uint32_t sfc64_uint32(void *st) nogil:
+ return sfc64_next32(<sfc64_state *> st)
+
+cdef double sfc64_double(void* st) nogil:
+ return uint64_to_double(sfc64_next64(<sfc64_state *>st))
+
+
+cdef class SFC64(BitGenerator):
+ """
+ SFC64(seed=None)
+
+ BitGenerator for Chris Doty-Humphrey's Small Fast Chaotic PRNG.
+
+ Parameters
+ ----------
+ seed : {None, int, array_like[ints], ISeedSequence}, optional
+ A seed to initialize the `BitGenerator`. If None, then fresh,
+ unpredictable entropy will be pulled from the OS. If an ``int`` or
+ ``array_like[ints]`` is passed, then it will be passed to
+ `SeedSequence` to derive the initial `BitGenerator` state. One may also
+ pass in an implementor of the `ISeedSequence` interface like
+ `SeedSequence`.
+
+ Notes
+ -----
+ ``SFC64`` is a 256-bit implementation of Chris Doty-Humphrey's Small Fast
+ Chaotic PRNG ([1]_). ``SFC64`` has a few different cycles that one might be
+ on, depending on the seed; the expected period will be about
+ :math:`2^{255}` ([2]_). ``SFC64`` incorporates a 64-bit counter which means
+ that the absolute minimum cycle length is :math:`2^{64}` and that distinct
+ seeds will not run into each other for at least :math:`2^{64}` iterations.
+
+ ``SFC64`` provides a capsule containing function pointers that produce
+ doubles, and unsigned 32 and 64- bit integers. These are not
+ directly consumable in Python and must be consumed by a ``Generator``
+ or similar object that supports low-level access.
+
+ **State and Seeding**
+
+ The ``SFC64`` state vector consists of 4 unsigned 64-bit values. The last
+ is a 64-bit counter that increments by 1 each iteration.
+
+ The input seed is processed by `SeedSequence` to generate the first
+ 3 values, then the ``SFC64`` algorithm is iterated a small number of times
+ to mix.
+
+ **Compatibility Guarantee**
+
+ ``SFC64`` makes a guarantee that a fixed seed will always produce the same
+ random integer stream.
+
+ References
+ ----------
+ .. [1] `"PractRand"
+ <http://pracrand.sourceforge.net/RNG_engines.txt>`_
+ .. [2] `"Random Invertible Mapping Statistics"
+ <http://www.pcg-random.org/posts/random-invertible-mapping-statistics.html>`_
+ """
+
+ cdef sfc64_state rng_state
+
+ def __init__(self, seed=None):
+ BitGenerator.__init__(self, seed)
+ self._bitgen.state = <void *>&self.rng_state
+ self._bitgen.next_uint64 = &sfc64_uint64
+ self._bitgen.next_uint32 = &sfc64_uint32
+ self._bitgen.next_double = &sfc64_double
+ self._bitgen.next_raw = &sfc64_uint64
+ # Seed the _bitgen
+ val = self._seed_seq.generate_state(3, np.uint64)
+ sfc64_set_seed(&self.rng_state, <uint64_t*>np.PyArray_DATA(val))
+ self._reset_state_variables()
+
+ cdef _reset_state_variables(self):
+ self.rng_state.has_uint32 = 0
+ self.rng_state.uinteger = 0
+
+ @property
+ def state(self):
+ """
+ Get or set the PRNG state
+
+ Returns
+ -------
+ state : dict
+ Dictionary containing the information required to describe the
+ state of the PRNG
+ """
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ sfc64_get_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ &has_uint32, &uinteger)
+ return {'bit_generator': self.__class__.__name__,
+ 'state': {'state': state_vec},
+ 'has_uint32': has_uint32,
+ 'uinteger': uinteger}
+
+ @state.setter
+ def state(self, value):
+ cdef np.ndarray state_vec
+ cdef int has_uint32
+ cdef uint32_t uinteger
+ if not isinstance(value, dict):
+ raise TypeError('state must be a dict')
+ bitgen = value.get('bit_generator', '')
+ if bitgen != self.__class__.__name__:
+ raise ValueError('state must be for a {0} '
+ 'RNG'.format(self.__class__.__name__))
+ state_vec = <np.ndarray>np.empty(4, dtype=np.uint64)
+ state_vec[:] = value['state']['state']
+ has_uint32 = value['has_uint32']
+ uinteger = value['uinteger']
+ sfc64_set_state(&self.rng_state,
+ <uint64_t *>np.PyArray_DATA(state_vec),
+ has_uint32, uinteger)
diff --git a/numpy/random/src/aligned_malloc/aligned_malloc.c b/numpy/random/src/aligned_malloc/aligned_malloc.c
new file mode 100644
index 000000000..6e8192cfb
--- /dev/null
+++ b/numpy/random/src/aligned_malloc/aligned_malloc.c
@@ -0,0 +1,9 @@
+#include "aligned_malloc.h"
+
+static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n);
+
+static NPY_INLINE void *PyArray_malloc_aligned(size_t n);
+
+static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s);
+
+static NPY_INLINE void PyArray_free_aligned(void *p); \ No newline at end of file
diff --git a/numpy/random/src/aligned_malloc/aligned_malloc.h b/numpy/random/src/aligned_malloc/aligned_malloc.h
new file mode 100644
index 000000000..ea24f6d23
--- /dev/null
+++ b/numpy/random/src/aligned_malloc/aligned_malloc.h
@@ -0,0 +1,54 @@
+#ifndef _RANDOMDGEN__ALIGNED_MALLOC_H_
+#define _RANDOMDGEN__ALIGNED_MALLOC_H_
+
+#include "Python.h"
+#include "numpy/npy_common.h"
+
+#define NPY_MEMALIGN 16 /* 16 for SSE2, 32 for AVX, 64 for Xeon Phi */
+
+static NPY_INLINE void *PyArray_realloc_aligned(void *p, size_t n)
+{
+ void *p1, **p2, *base;
+ size_t old_offs, offs = NPY_MEMALIGN - 1 + sizeof(void *);
+ if (NPY_UNLIKELY(p != NULL))
+ {
+ base = *(((void **)p) - 1);
+ if (NPY_UNLIKELY((p1 = PyMem_Realloc(base, n + offs)) == NULL))
+ return NULL;
+ if (NPY_LIKELY(p1 == base))
+ return p;
+ p2 = (void **)(((Py_uintptr_t)(p1) + offs) & ~(NPY_MEMALIGN - 1));
+ old_offs = (size_t)((Py_uintptr_t)p - (Py_uintptr_t)base);
+ memmove((void *)p2, ((char *)p1) + old_offs, n);
+ }
+ else
+ {
+ if (NPY_UNLIKELY((p1 = PyMem_Malloc(n + offs)) == NULL))
+ return NULL;
+ p2 = (void **)(((Py_uintptr_t)(p1) + offs) & ~(NPY_MEMALIGN - 1));
+ }
+ *(p2 - 1) = p1;
+ return (void *)p2;
+}
+
+static NPY_INLINE void *PyArray_malloc_aligned(size_t n)
+{
+ return PyArray_realloc_aligned(NULL, n);
+}
+
+static NPY_INLINE void *PyArray_calloc_aligned(size_t n, size_t s)
+{
+ void *p;
+ if (NPY_UNLIKELY((p = PyArray_realloc_aligned(NULL, n * s)) == NULL))
+ return NULL;
+ memset(p, 0, n * s);
+ return p;
+}
+
+static NPY_INLINE void PyArray_free_aligned(void *p)
+{
+ void *base = *(((void **)p) - 1);
+ PyMem_Free(base);
+}
+
+#endif
diff --git a/numpy/random/src/bitgen.h b/numpy/random/src/bitgen.h
new file mode 100644
index 000000000..0adaaf2ee
--- /dev/null
+++ b/numpy/random/src/bitgen.h
@@ -0,0 +1,20 @@
+#ifndef _RANDOM_BITGEN_H
+#define _RANDOM_BITGEN_H
+
+#pragma once
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+/* Must match the declaration in numpy/random/common.pxd */
+
+typedef struct bitgen {
+ void *state;
+ uint64_t (*next_uint64)(void *st);
+ uint32_t (*next_uint32)(void *st);
+ double (*next_double)(void *st);
+ uint64_t (*next_raw)(void *st);
+} bitgen_t;
+
+
+#endif
diff --git a/numpy/random/src/distributions/LICENSE.md b/numpy/random/src/distributions/LICENSE.md
new file mode 100644
index 000000000..31576ba4b
--- /dev/null
+++ b/numpy/random/src/distributions/LICENSE.md
@@ -0,0 +1,61 @@
+## NumPy
+
+Copyright (c) 2005-2017, NumPy Developers.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+* Neither the name of the NumPy Developers nor the names of any
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+## Julia
+
+The ziggurat methods were derived from Julia.
+
+Copyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah,
+and other contributors:
+
+https://github.com/JuliaLang/julia/contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c
new file mode 100644
index 000000000..1244ffe65
--- /dev/null
+++ b/numpy/random/src/distributions/distributions.c
@@ -0,0 +1,1782 @@
+#include "distributions.h"
+#include "ziggurat_constants.h"
+#include "logfactorial.h"
+
+#if defined(_MSC_VER) && defined(_WIN64)
+#include <intrin.h>
+#endif
+
+/* Random generators for external use */
+float random_float(bitgen_t *bitgen_state) { return next_float(bitgen_state); }
+
+double random_double(bitgen_t *bitgen_state) {
+ return next_double(bitgen_state);
+}
+
+static NPY_INLINE double next_standard_exponential(bitgen_t *bitgen_state) {
+ return -log(1.0 - next_double(bitgen_state));
+}
+
+double random_standard_exponential(bitgen_t *bitgen_state) {
+ return next_standard_exponential(bitgen_state);
+}
+
+void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt,
+ double *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = next_standard_exponential(bitgen_state);
+ }
+}
+
+float random_standard_exponential_f(bitgen_t *bitgen_state) {
+ return -logf(1.0f - next_float(bitgen_state));
+}
+
+void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = next_double(bitgen_state);
+ }
+}
+#if 0
+double random_gauss(bitgen_t *bitgen_state) {
+ if (bitgen_state->has_gauss) {
+ const double temp = bitgen_state->gauss;
+ bitgen_state->has_gauss = false;
+ bitgen_state->gauss = 0.0;
+ return temp;
+ } else {
+ double f, x1, x2, r2;
+
+ do {
+ x1 = 2.0 * next_double(bitgen_state) - 1.0;
+ x2 = 2.0 * next_double(bitgen_state) - 1.0;
+ r2 = x1 * x1 + x2 * x2;
+ } while (r2 >= 1.0 || r2 == 0.0);
+
+ /* Polar method, a more efficient version of the Box-Muller approach. */
+ f = sqrt(-2.0 * log(r2) / r2);
+ /* Keep for next call */
+ bitgen_state->gauss = f * x1;
+ bitgen_state->has_gauss = true;
+ return f * x2;
+ }
+}
+
+float random_gauss_f(bitgen_t *bitgen_state) {
+ if (bitgen_state->has_gauss_f) {
+ const float temp = bitgen_state->gauss_f;
+ bitgen_state->has_gauss_f = false;
+ bitgen_state->gauss_f = 0.0f;
+ return temp;
+ } else {
+ float f, x1, x2, r2;
+
+ do {
+ x1 = 2.0f * next_float(bitgen_state) - 1.0f;
+ x2 = 2.0f * next_float(bitgen_state) - 1.0f;
+ r2 = x1 * x1 + x2 * x2;
+ } while (r2 >= 1.0 || r2 == 0.0);
+
+ /* Polar method, a more efficient version of the Box-Muller approach. */
+ f = sqrtf(-2.0f * logf(r2) / r2);
+ /* Keep for next call */
+ bitgen_state->gauss_f = f * x1;
+ bitgen_state->has_gauss_f = true;
+ return f * x2;
+ }
+}
+#endif
+
+static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state);
+
+static double standard_exponential_zig_unlikely(bitgen_t *bitgen_state,
+ uint8_t idx, double x) {
+ if (idx == 0) {
+ /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
+ return ziggurat_exp_r - log(1.0 - next_double(bitgen_state));
+ } else if ((fe_double[idx - 1] - fe_double[idx]) * next_double(bitgen_state) +
+ fe_double[idx] <
+ exp(-x)) {
+ return x;
+ } else {
+ return standard_exponential_zig(bitgen_state);
+ }
+}
+
+static NPY_INLINE double standard_exponential_zig(bitgen_t *bitgen_state) {
+ uint64_t ri;
+ uint8_t idx;
+ double x;
+ ri = next_uint64(bitgen_state);
+ ri >>= 3;
+ idx = ri & 0xFF;
+ ri >>= 8;
+ x = ri * we_double[idx];
+ if (ri < ke_double[idx]) {
+ return x; /* 98.9% of the time we return here 1st try */
+ }
+ return standard_exponential_zig_unlikely(bitgen_state, idx, x);
+}
+
+double random_standard_exponential_zig(bitgen_t *bitgen_state) {
+ return standard_exponential_zig(bitgen_state);
+}
+
+void random_standard_exponential_zig_fill(bitgen_t *bitgen_state, npy_intp cnt,
+ double *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = standard_exponential_zig(bitgen_state);
+ }
+}
+
+static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state);
+
+static float standard_exponential_zig_unlikely_f(bitgen_t *bitgen_state,
+ uint8_t idx, float x) {
+ if (idx == 0) {
+ /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
+ return ziggurat_exp_r_f - logf(1.0f - next_float(bitgen_state));
+ } else if ((fe_float[idx - 1] - fe_float[idx]) * next_float(bitgen_state) +
+ fe_float[idx] <
+ expf(-x)) {
+ return x;
+ } else {
+ return standard_exponential_zig_f(bitgen_state);
+ }
+}
+
+static NPY_INLINE float standard_exponential_zig_f(bitgen_t *bitgen_state) {
+ uint32_t ri;
+ uint8_t idx;
+ float x;
+ ri = next_uint32(bitgen_state);
+ ri >>= 1;
+ idx = ri & 0xFF;
+ ri >>= 8;
+ x = ri * we_float[idx];
+ if (ri < ke_float[idx]) {
+ return x; /* 98.9% of the time we return here 1st try */
+ }
+ return standard_exponential_zig_unlikely_f(bitgen_state, idx, x);
+}
+
+float random_standard_exponential_zig_f(bitgen_t *bitgen_state) {
+ return standard_exponential_zig_f(bitgen_state);
+}
+
+static NPY_INLINE double next_gauss_zig(bitgen_t *bitgen_state) {
+ uint64_t r;
+ int sign;
+ uint64_t rabs;
+ int idx;
+ double x, xx, yy;
+ for (;;) {
+ /* r = e3n52sb8 */
+ r = next_uint64(bitgen_state);
+ idx = r & 0xff;
+ r >>= 8;
+ sign = r & 0x1;
+ rabs = (r >> 1) & 0x000fffffffffffff;
+ x = rabs * wi_double[idx];
+ if (sign & 0x1)
+ x = -x;
+ if (rabs < ki_double[idx])
+ return x; /* 99.3% of the time return here */
+ if (idx == 0) {
+ for (;;) {
+ /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
+ xx = -ziggurat_nor_inv_r * log(1.0 - next_double(bitgen_state));
+ yy = -log(1.0 - next_double(bitgen_state));
+ if (yy + yy > xx * xx)
+ return ((rabs >> 8) & 0x1) ? -(ziggurat_nor_r + xx)
+ : ziggurat_nor_r + xx;
+ }
+ } else {
+ if (((fi_double[idx - 1] - fi_double[idx]) * next_double(bitgen_state) +
+ fi_double[idx]) < exp(-0.5 * x * x))
+ return x;
+ }
+ }
+}
+
+double random_gauss_zig(bitgen_t *bitgen_state) {
+ return next_gauss_zig(bitgen_state);
+}
+
+void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out) {
+ npy_intp i;
+ for (i = 0; i < cnt; i++) {
+ out[i] = next_gauss_zig(bitgen_state);
+ }
+}
+
+float random_gauss_zig_f(bitgen_t *bitgen_state) {
+ uint32_t r;
+ int sign;
+ uint32_t rabs;
+ int idx;
+ float x, xx, yy;
+ for (;;) {
+ /* r = n23sb8 */
+ r = next_uint32(bitgen_state);
+ idx = r & 0xff;
+ sign = (r >> 8) & 0x1;
+ rabs = (r >> 9) & 0x0007fffff;
+ x = rabs * wi_float[idx];
+ if (sign & 0x1)
+ x = -x;
+ if (rabs < ki_float[idx])
+ return x; /* # 99.3% of the time return here */
+ if (idx == 0) {
+ for (;;) {
+ /* Switch to 1.0 - U to avoid log(0.0), see GH 13361 */
+ xx = -ziggurat_nor_inv_r_f * logf(1.0f - next_float(bitgen_state));
+ yy = -logf(1.0f - next_float(bitgen_state));
+ if (yy + yy > xx * xx)
+ return ((rabs >> 8) & 0x1) ? -(ziggurat_nor_r_f + xx)
+ : ziggurat_nor_r_f + xx;
+ }
+ } else {
+ if (((fi_float[idx - 1] - fi_float[idx]) * next_float(bitgen_state) +
+ fi_float[idx]) < exp(-0.5 * x * x))
+ return x;
+ }
+ }
+}
+
+/*
+static NPY_INLINE double standard_gamma(bitgen_t *bitgen_state, double shape) {
+ double b, c;
+ double U, V, X, Y;
+
+ if (shape == 1.0) {
+ return random_standard_exponential(bitgen_state);
+ } else if (shape < 1.0) {
+ for (;;) {
+ U = next_double(bitgen_state);
+ V = random_standard_exponential(bitgen_state);
+ if (U <= 1.0 - shape) {
+ X = pow(U, 1. / shape);
+ if (X <= V) {
+ return X;
+ }
+ } else {
+ Y = -log((1 - U) / shape);
+ X = pow(1.0 - shape + shape * Y, 1. / shape);
+ if (X <= (V + Y)) {
+ return X;
+ }
+ }
+ }
+ } else {
+ b = shape - 1. / 3.;
+ c = 1. / sqrt(9 * b);
+ for (;;) {
+ do {
+ X = random_gauss(bitgen_state);
+ V = 1.0 + c * X;
+ } while (V <= 0.0);
+
+ V = V * V * V;
+ U = next_double(bitgen_state);
+ if (U < 1.0 - 0.0331 * (X * X) * (X * X))
+ return (b * V);
+ if (log(U) < 0.5 * X * X + b * (1. - V + log(V)))
+ return (b * V);
+ }
+ }
+}
+
+static NPY_INLINE float standard_gamma_float(bitgen_t *bitgen_state, float
+shape) { float b, c; float U, V, X, Y;
+
+ if (shape == 1.0f) {
+ return random_standard_exponential_f(bitgen_state);
+ } else if (shape < 1.0f) {
+ for (;;) {
+ U = next_float(bitgen_state);
+ V = random_standard_exponential_f(bitgen_state);
+ if (U <= 1.0f - shape) {
+ X = powf(U, 1.0f / shape);
+ if (X <= V) {
+ return X;
+ }
+ } else {
+ Y = -logf((1.0f - U) / shape);
+ X = powf(1.0f - shape + shape * Y, 1.0f / shape);
+ if (X <= (V + Y)) {
+ return X;
+ }
+ }
+ }
+ } else {
+ b = shape - 1.0f / 3.0f;
+ c = 1.0f / sqrtf(9.0f * b);
+ for (;;) {
+ do {
+ X = random_gauss_f(bitgen_state);
+ V = 1.0f + c * X;
+ } while (V <= 0.0f);
+
+ V = V * V * V;
+ U = next_float(bitgen_state);
+ if (U < 1.0f - 0.0331f * (X * X) * (X * X))
+ return (b * V);
+ if (logf(U) < 0.5f * X * X + b * (1.0f - V + logf(V)))
+ return (b * V);
+ }
+ }
+}
+
+
+double random_standard_gamma(bitgen_t *bitgen_state, double shape) {
+ return standard_gamma(bitgen_state, shape);
+}
+
+float random_standard_gamma_f(bitgen_t *bitgen_state, float shape) {
+ return standard_gamma_float(bitgen_state, shape);
+}
+*/
+
+static NPY_INLINE double standard_gamma_zig(bitgen_t *bitgen_state,
+ double shape) {
+ double b, c;
+ double U, V, X, Y;
+
+ if (shape == 1.0) {
+ return random_standard_exponential_zig(bitgen_state);
+ } else if (shape == 0.0) {
+ return 0.0;
+ } else if (shape < 1.0) {
+ for (;;) {
+ U = next_double(bitgen_state);
+ V = random_standard_exponential_zig(bitgen_state);
+ if (U <= 1.0 - shape) {
+ X = pow(U, 1. / shape);
+ if (X <= V) {
+ return X;
+ }
+ } else {
+ Y = -log((1 - U) / shape);
+ X = pow(1.0 - shape + shape * Y, 1. / shape);
+ if (X <= (V + Y)) {
+ return X;
+ }
+ }
+ }
+ } else {
+ b = shape - 1. / 3.;
+ c = 1. / sqrt(9 * b);
+ for (;;) {
+ do {
+ X = random_gauss_zig(bitgen_state);
+ V = 1.0 + c * X;
+ } while (V <= 0.0);
+
+ V = V * V * V;
+ U = next_double(bitgen_state);
+ if (U < 1.0 - 0.0331 * (X * X) * (X * X))
+ return (b * V);
+ /* log(0.0) ok here */
+ if (log(U) < 0.5 * X * X + b * (1. - V + log(V)))
+ return (b * V);
+ }
+ }
+}
+
+static NPY_INLINE float standard_gamma_zig_f(bitgen_t *bitgen_state,
+ float shape) {
+ float b, c;
+ float U, V, X, Y;
+
+ if (shape == 1.0f) {
+ return random_standard_exponential_zig_f(bitgen_state);
+ } else if (shape == 0.0) {
+ return 0.0;
+ } else if (shape < 1.0f) {
+ for (;;) {
+ U = next_float(bitgen_state);
+ V = random_standard_exponential_zig_f(bitgen_state);
+ if (U <= 1.0f - shape) {
+ X = powf(U, 1.0f / shape);
+ if (X <= V) {
+ return X;
+ }
+ } else {
+ Y = -logf((1.0f - U) / shape);
+ X = powf(1.0f - shape + shape * Y, 1.0f / shape);
+ if (X <= (V + Y)) {
+ return X;
+ }
+ }
+ }
+ } else {
+ b = shape - 1.0f / 3.0f;
+ c = 1.0f / sqrtf(9.0f * b);
+ for (;;) {
+ do {
+ X = random_gauss_zig_f(bitgen_state);
+ V = 1.0f + c * X;
+ } while (V <= 0.0f);
+
+ V = V * V * V;
+ U = next_float(bitgen_state);
+ if (U < 1.0f - 0.0331f * (X * X) * (X * X))
+ return (b * V);
+ /* logf(0.0) ok here */
+ if (logf(U) < 0.5f * X * X + b * (1.0f - V + logf(V)))
+ return (b * V);
+ }
+ }
+}
+
+double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape) {
+ return standard_gamma_zig(bitgen_state, shape);
+}
+
+float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape) {
+ return standard_gamma_zig_f(bitgen_state, shape);
+}
+
+int64_t random_positive_int64(bitgen_t *bitgen_state) {
+ return next_uint64(bitgen_state) >> 1;
+}
+
+int32_t random_positive_int32(bitgen_t *bitgen_state) {
+ return next_uint32(bitgen_state) >> 1;
+}
+
+int64_t random_positive_int(bitgen_t *bitgen_state) {
+#if ULONG_MAX <= 0xffffffffUL
+ return (int64_t)(next_uint32(bitgen_state) >> 1);
+#else
+ return (int64_t)(next_uint64(bitgen_state) >> 1);
+#endif
+}
+
+uint64_t random_uint(bitgen_t *bitgen_state) {
+#if ULONG_MAX <= 0xffffffffUL
+ return next_uint32(bitgen_state);
+#else
+ return next_uint64(bitgen_state);
+#endif
+}
+
+/*
+ * log-gamma function to support some of these distributions. The
+ * algorithm comes from SPECFUN by Shanjie Zhang and Jianming Jin and their
+ * book "Computation of Special Functions", 1996, John Wiley & Sons, Inc.
+ *
+ * If loggam(k+1) is being used to compute log(k!) for an integer k, consider
+ * using logfactorial(k) instead.
+ */
+double loggam(double x) {
+ double x0, x2, xp, gl, gl0;
+ RAND_INT_TYPE k, n;
+
+ static double a[10] = {8.333333333333333e-02, -2.777777777777778e-03,
+ 7.936507936507937e-04, -5.952380952380952e-04,
+ 8.417508417508418e-04, -1.917526917526918e-03,
+ 6.410256410256410e-03, -2.955065359477124e-02,
+ 1.796443723688307e-01, -1.39243221690590e+00};
+ x0 = x;
+ n = 0;
+ if ((x == 1.0) || (x == 2.0)) {
+ return 0.0;
+ } else if (x <= 7.0) {
+ n = (RAND_INT_TYPE)(7 - x);
+ x0 = x + n;
+ }
+ x2 = 1.0 / (x0 * x0);
+ xp = 2 * M_PI;
+ gl0 = a[9];
+ for (k = 8; k >= 0; k--) {
+ gl0 *= x2;
+ gl0 += a[k];
+ }
+ gl = gl0 / x0 + 0.5 * log(xp) + (x0 - 0.5) * log(x0) - x0;
+ if (x <= 7.0) {
+ for (k = 1; k <= n; k++) {
+ gl -= log(x0 - 1.0);
+ x0 -= 1.0;
+ }
+ }
+ return gl;
+}
+
+/*
+double random_normal(bitgen_t *bitgen_state, double loc, double scale) {
+ return loc + scale * random_gauss(bitgen_state);
+}
+*/
+
+double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale) {
+ return loc + scale * random_gauss_zig(bitgen_state);
+}
+
+double random_exponential(bitgen_t *bitgen_state, double scale) {
+ return scale * standard_exponential_zig(bitgen_state);
+}
+
+double random_uniform(bitgen_t *bitgen_state, double lower, double range) {
+ return lower + range * next_double(bitgen_state);
+}
+
+double random_gamma(bitgen_t *bitgen_state, double shape, double scale) {
+ return scale * random_standard_gamma_zig(bitgen_state, shape);
+}
+
+float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale) {
+ return scale * random_standard_gamma_zig_f(bitgen_state, shape);
+}
+
+double random_beta(bitgen_t *bitgen_state, double a, double b) {
+ double Ga, Gb;
+
+ if ((a <= 1.0) && (b <= 1.0)) {
+ double U, V, X, Y, XpY;
+ /* Use Johnk's algorithm */
+
+ while (1) {
+ U = next_double(bitgen_state);
+ V = next_double(bitgen_state);
+ X = pow(U, 1.0 / a);
+ Y = pow(V, 1.0 / b);
+ XpY = X + Y;
+ /* Reject if both U and V are 0.0, which is approx 1 in 10^106 */
+ if ((XpY <= 1.0) && (XpY > 0.0)) {
+ if (X + Y > 0) {
+ return X / XpY;
+ } else {
+ double logX = log(U) / a;
+ double logY = log(V) / b;
+ double logM = logX > logY ? logX : logY;
+ logX -= logM;
+ logY -= logM;
+
+ return exp(logX - log(exp(logX) + exp(logY)));
+ }
+ }
+ }
+ } else {
+ Ga = random_standard_gamma_zig(bitgen_state, a);
+ Gb = random_standard_gamma_zig(bitgen_state, b);
+ return Ga / (Ga + Gb);
+ }
+}
+
+double random_chisquare(bitgen_t *bitgen_state, double df) {
+ return 2.0 * random_standard_gamma_zig(bitgen_state, df / 2.0);
+}
+
+double random_f(bitgen_t *bitgen_state, double dfnum, double dfden) {
+ return ((random_chisquare(bitgen_state, dfnum) * dfden) /
+ (random_chisquare(bitgen_state, dfden) * dfnum));
+}
+
+double random_standard_cauchy(bitgen_t *bitgen_state) {
+ return random_gauss_zig(bitgen_state) / random_gauss_zig(bitgen_state);
+}
+
+double random_pareto(bitgen_t *bitgen_state, double a) {
+ return exp(standard_exponential_zig(bitgen_state) / a) - 1;
+}
+
+double random_weibull(bitgen_t *bitgen_state, double a) {
+ if (a == 0.0) {
+ return 0.0;
+ }
+ return pow(standard_exponential_zig(bitgen_state), 1. / a);
+}
+
+double random_power(bitgen_t *bitgen_state, double a) {
+ return pow(1 - exp(-standard_exponential_zig(bitgen_state)), 1. / a);
+}
+
+double random_laplace(bitgen_t *bitgen_state, double loc, double scale) {
+ double U;
+
+ U = next_double(bitgen_state);
+ if (U >= 0.5) {
+ U = loc - scale * log(2.0 - U - U);
+ } else if (U > 0.0) {
+ U = loc + scale * log(U + U);
+ } else {
+ /* Reject U == 0.0 and call again to get next value */
+ U = random_laplace(bitgen_state, loc, scale);
+ }
+ return U;
+}
+
+double random_gumbel(bitgen_t *bitgen_state, double loc, double scale) {
+ double U;
+
+ U = 1.0 - next_double(bitgen_state);
+ if (U < 1.0) {
+ return loc - scale * log(-log(U));
+ }
+ /* Reject U == 1.0 and call again to get next value */
+ return random_gumbel(bitgen_state, loc, scale);
+}
+
+double random_logistic(bitgen_t *bitgen_state, double loc, double scale) {
+ double U;
+
+ U = next_double(bitgen_state);
+ if (U > 0.0) {
+ return loc + scale * log(U / (1.0 - U));
+ }
+ /* Reject U == 0.0 and call again to get next value */
+ return random_logistic(bitgen_state, loc, scale);
+}
+
+double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma) {
+ return exp(random_normal_zig(bitgen_state, mean, sigma));
+}
+
+double random_rayleigh(bitgen_t *bitgen_state, double mode) {
+ return mode * sqrt(-2.0 * log(1.0 - next_double(bitgen_state)));
+}
+
+double random_standard_t(bitgen_t *bitgen_state, double df) {
+ double num, denom;
+
+ num = random_gauss_zig(bitgen_state);
+ denom = random_standard_gamma_zig(bitgen_state, df / 2);
+ return sqrt(df / 2) * num / sqrt(denom);
+}
+
+static RAND_INT_TYPE random_poisson_mult(bitgen_t *bitgen_state, double lam) {
+ RAND_INT_TYPE X;
+ double prod, U, enlam;
+
+ enlam = exp(-lam);
+ X = 0;
+ prod = 1.0;
+ while (1) {
+ U = next_double(bitgen_state);
+ prod *= U;
+ if (prod > enlam) {
+ X += 1;
+ } else {
+ return X;
+ }
+ }
+}
+
+/*
+ * The transformed rejection method for generating Poisson random variables
+ * W. Hoermann
+ * Insurance: Mathematics and Economics 12, 39-45 (1993)
+ */
+#define LS2PI 0.91893853320467267
+#define TWELFTH 0.083333333333333333333333
+static RAND_INT_TYPE random_poisson_ptrs(bitgen_t *bitgen_state, double lam) {
+ RAND_INT_TYPE k;
+ double U, V, slam, loglam, a, b, invalpha, vr, us;
+
+ slam = sqrt(lam);
+ loglam = log(lam);
+ b = 0.931 + 2.53 * slam;
+ a = -0.059 + 0.02483 * b;
+ invalpha = 1.1239 + 1.1328 / (b - 3.4);
+ vr = 0.9277 - 3.6224 / (b - 2);
+
+ while (1) {
+ U = next_double(bitgen_state) - 0.5;
+ V = next_double(bitgen_state);
+ us = 0.5 - fabs(U);
+ k = (RAND_INT_TYPE)floor((2 * a / us + b) * U + lam + 0.43);
+ if ((us >= 0.07) && (V <= vr)) {
+ return k;
+ }
+ if ((k < 0) || ((us < 0.013) && (V > us))) {
+ continue;
+ }
+ /* log(V) == log(0.0) ok here */
+ /* if U==0.0 so that us==0.0, log is ok since always returns */
+ if ((log(V) + log(invalpha) - log(a / (us * us) + b)) <=
+ (-lam + k * loglam - loggam(k + 1))) {
+ return k;
+ }
+ }
+}
+
+RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam) {
+ if (lam >= 10) {
+ return random_poisson_ptrs(bitgen_state, lam);
+ } else if (lam == 0) {
+ return 0;
+ } else {
+ return random_poisson_mult(bitgen_state, lam);
+ }
+}
+
+RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
+ double p) {
+ double Y = random_gamma(bitgen_state, n, (1 - p) / p);
+ return random_poisson(bitgen_state, Y);
+}
+
+RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state, RAND_INT_TYPE n,
+ double p, binomial_t *binomial) {
+ double r, q, fm, p1, xm, xl, xr, c, laml, lamr, p2, p3, p4;
+ double a, u, v, s, F, rho, t, A, nrq, x1, x2, f1, f2, z, z2, w, w2, x;
+ RAND_INT_TYPE m, y, k, i;
+
+ if (!(binomial->has_binomial) || (binomial->nsave != n) ||
+ (binomial->psave != p)) {
+ /* initialize */
+ binomial->nsave = n;
+ binomial->psave = p;
+ binomial->has_binomial = 1;
+ binomial->r = r = MIN(p, 1.0 - p);
+ binomial->q = q = 1.0 - r;
+ binomial->fm = fm = n * r + r;
+ binomial->m = m = (RAND_INT_TYPE)floor(binomial->fm);
+ binomial->p1 = p1 = floor(2.195 * sqrt(n * r * q) - 4.6 * q) + 0.5;
+ binomial->xm = xm = m + 0.5;
+ binomial->xl = xl = xm - p1;
+ binomial->xr = xr = xm + p1;
+ binomial->c = c = 0.134 + 20.5 / (15.3 + m);
+ a = (fm - xl) / (fm - xl * r);
+ binomial->laml = laml = a * (1.0 + a / 2.0);
+ a = (xr - fm) / (xr * q);
+ binomial->lamr = lamr = a * (1.0 + a / 2.0);
+ binomial->p2 = p2 = p1 * (1.0 + 2.0 * c);
+ binomial->p3 = p3 = p2 + c / laml;
+ binomial->p4 = p4 = p3 + c / lamr;
+ } else {
+ r = binomial->r;
+ q = binomial->q;
+ fm = binomial->fm;
+ m = binomial->m;
+ p1 = binomial->p1;
+ xm = binomial->xm;
+ xl = binomial->xl;
+ xr = binomial->xr;
+ c = binomial->c;
+ laml = binomial->laml;
+ lamr = binomial->lamr;
+ p2 = binomial->p2;
+ p3 = binomial->p3;
+ p4 = binomial->p4;
+ }
+
+/* sigh ... */
+Step10:
+ nrq = n * r * q;
+ u = next_double(bitgen_state) * p4;
+ v = next_double(bitgen_state);
+ if (u > p1)
+ goto Step20;
+ y = (RAND_INT_TYPE)floor(xm - p1 * v + u);
+ goto Step60;
+
+Step20:
+ if (u > p2)
+ goto Step30;
+ x = xl + (u - p1) / c;
+ v = v * c + 1.0 - fabs(m - x + 0.5) / p1;
+ if (v > 1.0)
+ goto Step10;
+ y = (RAND_INT_TYPE)floor(x);
+ goto Step50;
+
+Step30:
+ if (u > p3)
+ goto Step40;
+ y = (RAND_INT_TYPE)floor(xl + log(v) / laml);
+ /* Reject if v==0.0 since previous cast is undefined */
+ if ((y < 0) || (v == 0.0))
+ goto Step10;
+ v = v * (u - p2) * laml;
+ goto Step50;
+
+Step40:
+ y = (RAND_INT_TYPE)floor(xr - log(v) / lamr);
+ /* Reject if v==0.0 since previous cast is undefined */
+ if ((y > n) || (v == 0.0))
+ goto Step10;
+ v = v * (u - p3) * lamr;
+
+Step50:
+ k = llabs(y - m);
+ if ((k > 20) && (k < ((nrq) / 2.0 - 1)))
+ goto Step52;
+
+ s = r / q;
+ a = s * (n + 1);
+ F = 1.0;
+ if (m < y) {
+ for (i = m + 1; i <= y; i++) {
+ F *= (a / i - s);
+ }
+ } else if (m > y) {
+ for (i = y + 1; i <= m; i++) {
+ F /= (a / i - s);
+ }
+ }
+ if (v > F)
+ goto Step10;
+ goto Step60;
+
+Step52:
+ rho =
+ (k / (nrq)) * ((k * (k / 3.0 + 0.625) + 0.16666666666666666) / nrq + 0.5);
+ t = -k * k / (2 * nrq);
+ /* log(0.0) ok here */
+ A = log(v);
+ if (A < (t - rho))
+ goto Step60;
+ if (A > (t + rho))
+ goto Step10;
+
+ x1 = y + 1;
+ f1 = m + 1;
+ z = n + 1 - m;
+ w = n - y + 1;
+ x2 = x1 * x1;
+ f2 = f1 * f1;
+ z2 = z * z;
+ w2 = w * w;
+ if (A > (xm * log(f1 / x1) + (n - m + 0.5) * log(z / w) +
+ (y - m) * log(w * r / (x1 * q)) +
+ (13680. - (462. - (132. - (99. - 140. / f2) / f2) / f2) / f2) / f1 /
+ 166320. +
+ (13680. - (462. - (132. - (99. - 140. / z2) / z2) / z2) / z2) / z /
+ 166320. +
+ (13680. - (462. - (132. - (99. - 140. / x2) / x2) / x2) / x2) / x1 /
+ 166320. +
+ (13680. - (462. - (132. - (99. - 140. / w2) / w2) / w2) / w2) / w /
+ 166320.)) {
+ goto Step10;
+ }
+
+Step60:
+ if (p > 0.5) {
+ y = n - y;
+ }
+
+ return y;
+}
+
+RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state, RAND_INT_TYPE n,
+ double p, binomial_t *binomial) {
+ double q, qn, np, px, U;
+ RAND_INT_TYPE X, bound;
+
+ if (!(binomial->has_binomial) || (binomial->nsave != n) ||
+ (binomial->psave != p)) {
+ binomial->nsave = n;
+ binomial->psave = p;
+ binomial->has_binomial = 1;
+ binomial->q = q = 1.0 - p;
+ binomial->r = qn = exp(n * log(q));
+ binomial->c = np = n * p;
+ binomial->m = bound = (RAND_INT_TYPE)MIN(n, np + 10.0 * sqrt(np * q + 1));
+ } else {
+ q = binomial->q;
+ qn = binomial->r;
+ np = binomial->c;
+ bound = binomial->m;
+ }
+ X = 0;
+ px = qn;
+ U = next_double(bitgen_state);
+ while (U > px) {
+ X++;
+ if (X > bound) {
+ X = 0;
+ px = qn;
+ U = next_double(bitgen_state);
+ } else {
+ U -= px;
+ px = ((n - X + 1) * p * px) / (X * q);
+ }
+ }
+ return X;
+}
+
+int64_t random_binomial(bitgen_t *bitgen_state, double p, int64_t n,
+ binomial_t *binomial) {
+ double q;
+
+ if ((n == 0LL) || (p == 0.0f))
+ return 0;
+
+ if (p <= 0.5) {
+ if (p * n <= 30.0) {
+ return random_binomial_inversion(bitgen_state, n, p, binomial);
+ } else {
+ return random_binomial_btpe(bitgen_state, n, p, binomial);
+ }
+ } else {
+ q = 1.0 - p;
+ if (q * n <= 30.0) {
+ return n - random_binomial_inversion(bitgen_state, n, q, binomial);
+ } else {
+ return n - random_binomial_btpe(bitgen_state, n, q, binomial);
+ }
+ }
+}
+
+double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc) {
+ if (npy_isnan(nonc)) {
+ return NPY_NAN;
+ }
+ if (nonc == 0) {
+ return random_chisquare(bitgen_state, df);
+ }
+ if (1 < df) {
+ const double Chi2 = random_chisquare(bitgen_state, df - 1);
+ const double n = random_gauss_zig(bitgen_state) + sqrt(nonc);
+ return Chi2 + n * n;
+ } else {
+ const RAND_INT_TYPE i = random_poisson(bitgen_state, nonc / 2.0);
+ return random_chisquare(bitgen_state, df + 2 * i);
+ }
+}
+
+double random_noncentral_f(bitgen_t *bitgen_state, double dfnum, double dfden,
+ double nonc) {
+ double t = random_noncentral_chisquare(bitgen_state, dfnum, nonc) * dfden;
+ return t / (random_chisquare(bitgen_state, dfden) * dfnum);
+}
+
+double random_wald(bitgen_t *bitgen_state, double mean, double scale) {
+ double U, X, Y;
+ double mu_2l;
+
+ mu_2l = mean / (2 * scale);
+ Y = random_gauss_zig(bitgen_state);
+ Y = mean * Y * Y;
+ X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y));
+ U = next_double(bitgen_state);
+ if (U <= mean / (mean + X)) {
+ return X;
+ } else {
+ return mean * mean / X;
+ }
+}
+
+double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa) {
+ double s;
+ double U, V, W, Y, Z;
+ double result, mod;
+ int neg;
+ if (npy_isnan(kappa)) {
+ return NPY_NAN;
+ }
+ if (kappa < 1e-8) {
+ return M_PI * (2 * next_double(bitgen_state) - 1);
+ } else {
+ /* with double precision rho is zero until 1.4e-8 */
+ if (kappa < 1e-5) {
+ /*
+ * second order taylor expansion around kappa = 0
+ * precise until relatively large kappas as second order is 0
+ */
+ s = (1. / kappa + kappa);
+ } else {
+ double r = 1 + sqrt(1 + 4 * kappa * kappa);
+ double rho = (r - sqrt(2 * r)) / (2 * kappa);
+ s = (1 + rho * rho) / (2 * rho);
+ }
+
+ while (1) {
+ U = next_double(bitgen_state);
+ Z = cos(M_PI * U);
+ W = (1 + s * Z) / (s + Z);
+ Y = kappa * (s - W);
+ V = next_double(bitgen_state);
+ /*
+ * V==0.0 is ok here since Y >= 0 always leads
+ * to accept, while Y < 0 always rejects
+ */
+ if ((Y * (2 - Y) - V >= 0) || (log(Y / V) + 1 - Y >= 0)) {
+ break;
+ }
+ }
+
+ U = next_double(bitgen_state);
+
+ result = acos(W);
+ if (U < 0.5) {
+ result = -result;
+ }
+ result += mu;
+ neg = (result < 0);
+ mod = fabs(result);
+ mod = (fmod(mod + M_PI, 2 * M_PI) - M_PI);
+ if (neg) {
+ mod *= -1;
+ }
+
+ return mod;
+ }
+}
+
+/*
+ * RAND_INT_TYPE is used to share integer generators with RandomState which
+ * used long in place of int64_t. If changing a distribution that uses
+ * RAND_INT_TYPE, then the original unmodified copy must be retained for
+ * use in RandomState by copying to the legacy distributions source file.
+ */
+RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p) {
+ double q, r, U, V;
+ RAND_INT_TYPE result;
+
+ r = log(1.0 - p);
+
+ while (1) {
+ V = next_double(bitgen_state);
+ if (V >= p) {
+ return 1;
+ }
+ U = next_double(bitgen_state);
+ q = 1.0 - exp(r * U);
+ if (V <= q * q) {
+ result = (RAND_INT_TYPE)floor(1 + log(V) / log(q));
+ if ((result < 1) || (V == 0.0)) {
+ continue;
+ } else {
+ return result;
+ }
+ }
+ if (V >= q) {
+ return 1;
+ }
+ return 2;
+ }
+}
+
+RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p) {
+ double U;
+ RAND_INT_TYPE X;
+ double sum, prod, q;
+
+ X = 1;
+ sum = prod = p;
+ q = 1.0 - p;
+ U = next_double(bitgen_state);
+ while (U > sum) {
+ prod *= q;
+ sum += prod;
+ X++;
+ }
+ return X;
+}
+
+RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p) {
+ return (RAND_INT_TYPE)ceil(log(1.0 - next_double(bitgen_state)) / log(1.0 - p));
+}
+
+RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p) {
+ if (p >= 0.333333333333333333333333) {
+ return random_geometric_search(bitgen_state, p);
+ } else {
+ return random_geometric_inversion(bitgen_state, p);
+ }
+}
+
+RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a) {
+ double am1, b;
+
+ am1 = a - 1.0;
+ b = pow(2.0, am1);
+ while (1) {
+ double T, U, V, X;
+
+ U = 1.0 - random_double(bitgen_state);
+ V = random_double(bitgen_state);
+ X = floor(pow(U, -1.0 / am1));
+ /*
+ * The real result may be above what can be represented in a signed
+ * long. Since this is a straightforward rejection algorithm, we can
+ * just reject this value. This function then models a Zipf
+ * distribution truncated to sys.maxint.
+ */
+ if (X > RAND_INT_MAX || X < 1.0) {
+ continue;
+ }
+
+ T = pow(1.0 + 1.0 / X, am1);
+ if (V * X * (T - 1.0) / (b - 1.0) <= T / b) {
+ return (RAND_INT_TYPE)X;
+ }
+ }
+}
+
+double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right) {
+ double base, leftbase, ratio, leftprod, rightprod;
+ double U;
+
+ base = right - left;
+ leftbase = mode - left;
+ ratio = leftbase / base;
+ leftprod = leftbase * base;
+ rightprod = (right - mode) * base;
+
+ U = next_double(bitgen_state);
+ if (U <= ratio) {
+ return left + sqrt(U * leftprod);
+ } else {
+ return right - sqrt((1.0 - U) * rightprod);
+ }
+}
+
+
+uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max) {
+ uint64_t mask, value;
+ if (max == 0) {
+ return 0;
+ }
+
+ mask = max;
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+ mask |= mask >> 32;
+
+ /* Search a random value in [0..mask] <= max */
+ if (max <= 0xffffffffUL) {
+ while ((value = (next_uint32(bitgen_state) & mask)) > max)
+ ;
+ } else {
+ while ((value = (next_uint64(bitgen_state) & mask)) > max)
+ ;
+ }
+ return value;
+}
+
+/* Bounded generators */
+static NPY_INLINE uint64_t gen_mask(uint64_t max) {
+ uint64_t mask = max;
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+ mask |= mask >> 32;
+ return mask;
+}
+
+/* Generate 16 bit random numbers using a 32 bit buffer. */
+static NPY_INLINE uint16_t buffered_uint16(bitgen_t *bitgen_state, int *bcnt,
+ uint32_t *buf) {
+ if (!(bcnt[0])) {
+ buf[0] = next_uint32(bitgen_state);
+ bcnt[0] = 1;
+ } else {
+ buf[0] >>= 16;
+ bcnt[0] -= 1;
+ }
+
+ return (uint16_t)buf[0];
+}
+
+/* Generate 8 bit random numbers using a 32 bit buffer. */
+static NPY_INLINE uint8_t buffered_uint8(bitgen_t *bitgen_state, int *bcnt,
+ uint32_t *buf) {
+ if (!(bcnt[0])) {
+ buf[0] = next_uint32(bitgen_state);
+ bcnt[0] = 3;
+ } else {
+ buf[0] >>= 8;
+ bcnt[0] -= 1;
+ }
+
+ return (uint8_t)buf[0];
+}
+
+/* Static `masked rejection` function called by random_bounded_uint64(...) */
+static NPY_INLINE uint64_t bounded_masked_uint64(bitgen_t *bitgen_state,
+ uint64_t rng, uint64_t mask) {
+ uint64_t val;
+
+ while ((val = (next_uint64(bitgen_state) & mask)) > rng)
+ ;
+
+ return val;
+}
+
+/* Static `masked rejection` function called by
+ * random_buffered_bounded_uint32(...) */
+static NPY_INLINE uint32_t
+buffered_bounded_masked_uint32(bitgen_t *bitgen_state, uint32_t rng,
+ uint32_t mask, int *bcnt, uint32_t *buf) {
+ /*
+ * The buffer and buffer count are not used here but are included to allow
+ * this function to be templated with the similar uint8 and uint16
+ * functions
+ */
+
+ uint32_t val;
+
+ while ((val = (next_uint32(bitgen_state) & mask)) > rng)
+ ;
+
+ return val;
+}
+
+/* Static `masked rejection` function called by
+ * random_buffered_bounded_uint16(...) */
+static NPY_INLINE uint16_t
+buffered_bounded_masked_uint16(bitgen_t *bitgen_state, uint16_t rng,
+ uint16_t mask, int *bcnt, uint32_t *buf) {
+ uint16_t val;
+
+ while ((val = (buffered_uint16(bitgen_state, bcnt, buf) & mask)) > rng)
+ ;
+
+ return val;
+}
+
+/* Static `masked rejection` function called by
+ * random_buffered_bounded_uint8(...) */
+static NPY_INLINE uint8_t buffered_bounded_masked_uint8(bitgen_t *bitgen_state,
+ uint8_t rng,
+ uint8_t mask, int *bcnt,
+ uint32_t *buf) {
+ uint8_t val;
+
+ while ((val = (buffered_uint8(bitgen_state, bcnt, buf) & mask)) > rng)
+ ;
+
+ return val;
+}
+
+static NPY_INLINE npy_bool buffered_bounded_bool(bitgen_t *bitgen_state,
+ npy_bool off, npy_bool rng,
+ npy_bool mask, int *bcnt,
+ uint32_t *buf) {
+ if (rng == 0)
+ return off;
+ if (!(bcnt[0])) {
+ buf[0] = next_uint32(bitgen_state);
+ bcnt[0] = 31;
+ } else {
+ buf[0] >>= 1;
+ bcnt[0] -= 1;
+ }
+ return (buf[0] & 0x00000001UL) != 0;
+}
+
+/* Static `Lemire rejection` function called by random_bounded_uint64(...) */
+static NPY_INLINE uint64_t bounded_lemire_uint64(bitgen_t *bitgen_state,
+ uint64_t rng) {
+ /*
+ * Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
+ *
+ * Note: `rng` should not be 0xFFFFFFFFFFFFFFFF. When this happens `rng_excl`
+ * becomes zero.
+ */
+ const uint64_t rng_excl = rng + 1;
+
+#if __SIZEOF_INT128__
+ /* 128-bit uint available (e.g. GCC/clang). `m` is the __uint128_t scaled
+ * integer. */
+ __uint128_t m;
+ uint64_t leftover;
+
+ /* Generate a scaled random number. */
+ m = ((__uint128_t)next_uint64(bitgen_state)) * rng_excl;
+
+ /* Rejection sampling to remove any bias. */
+ leftover = m & 0xFFFFFFFFFFFFFFFFULL;
+
+ if (leftover < rng_excl) {
+ /* `rng_excl` is a simple upper bound for `threshold`. */
+
+ const uint64_t threshold = -rng_excl % rng_excl;
+ /* Same as: threshold=((uint64_t)(0x10000000000000000ULLL - rng_excl)) %
+ * rng_excl; */
+
+ while (leftover < threshold) {
+ m = ((__uint128_t)next_uint64(bitgen_state)) * rng_excl;
+ leftover = m & 0xFFFFFFFFFFFFFFFFULL;
+ }
+ }
+
+ return (m >> 64);
+#else
+ /* 128-bit uint NOT available (e.g. MSVS). `m1` is the upper 64-bits of the
+ * scaled integer. */
+ uint64_t m1;
+ uint64_t x;
+ uint64_t leftover;
+
+ x = next_uint64(bitgen_state);
+
+ /* Rejection sampling to remove any bias. */
+ leftover = x * rng_excl; /* The lower 64-bits of the mult. */
+
+ if (leftover < rng_excl) {
+ /* `rng_excl` is a simple upper bound for `threshold`. */
+
+ const uint64_t threshold = -rng_excl % rng_excl;
+ /* Same as:threshold=((uint64_t)(0x10000000000000000ULLL - rng_excl)) %
+ * rng_excl; */
+
+ while (leftover < threshold) {
+ x = next_uint64(bitgen_state);
+ leftover = x * rng_excl;
+ }
+ }
+
+#if defined(_MSC_VER) && defined(_WIN64)
+ /* _WIN64 architecture. Use the __umulh intrinsic to calc `m1`. */
+ m1 = __umulh(x, rng_excl);
+#else
+ /* 32-bit architecture. Emulate __umulh to calc `m1`. */
+ {
+ uint64_t x0, x1, rng_excl0, rng_excl1;
+ uint64_t w0, w1, w2, t;
+
+ x0 = x & 0xFFFFFFFFULL;
+ x1 = x >> 32;
+ rng_excl0 = rng_excl & 0xFFFFFFFFULL;
+ rng_excl1 = rng_excl >> 32;
+ w0 = x0 * rng_excl0;
+ t = x1 * rng_excl0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFULL;
+ w2 = t >> 32;
+ w1 += x0 * rng_excl1;
+ m1 = x1 * rng_excl1 + w2 + (w1 >> 32);
+ }
+#endif
+
+ return m1;
+#endif
+}
+
+/* Static `Lemire rejection` function called by
+ * random_buffered_bounded_uint32(...) */
+static NPY_INLINE uint32_t buffered_bounded_lemire_uint32(
+ bitgen_t *bitgen_state, uint32_t rng, int *bcnt, uint32_t *buf) {
+ /*
+ * Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
+ *
+ * The buffer and buffer count are not used here but are included to allow
+ * this function to be templated with the similar uint8 and uint16
+ * functions
+ *
+ * Note: `rng` should not be 0xFFFFFFFF. When this happens `rng_excl` becomes
+ * zero.
+ */
+ const uint32_t rng_excl = rng + 1;
+
+ uint64_t m;
+ uint32_t leftover;
+
+ /* Generate a scaled random number. */
+ m = ((uint64_t)next_uint32(bitgen_state)) * rng_excl;
+
+ /* Rejection sampling to remove any bias */
+ leftover = m & 0xFFFFFFFFUL;
+
+ if (leftover < rng_excl) {
+ /* `rng_excl` is a simple upper bound for `threshold`. */
+ const uint32_t threshold = -rng_excl % rng_excl;
+ /* Same as: threshold=((uint64_t)(0x100000000ULL - rng_excl)) % rng_excl; */
+
+ while (leftover < threshold) {
+ m = ((uint64_t)next_uint32(bitgen_state)) * rng_excl;
+ leftover = m & 0xFFFFFFFFUL;
+ }
+ }
+
+ return (m >> 32);
+}
+
+/* Static `Lemire rejection` function called by
+ * random_buffered_bounded_uint16(...) */
+static NPY_INLINE uint16_t buffered_bounded_lemire_uint16(
+ bitgen_t *bitgen_state, uint16_t rng, int *bcnt, uint32_t *buf) {
+ /*
+ * Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
+ *
+ * Note: `rng` should not be 0xFFFF. When this happens `rng_excl` becomes
+ * zero.
+ */
+ const uint16_t rng_excl = rng + 1;
+
+ uint32_t m;
+ uint16_t leftover;
+
+ /* Generate a scaled random number. */
+ m = ((uint32_t)buffered_uint16(bitgen_state, bcnt, buf)) * rng_excl;
+
+ /* Rejection sampling to remove any bias */
+ leftover = m & 0xFFFFUL;
+
+ if (leftover < rng_excl) {
+ /* `rng_excl` is a simple upper bound for `threshold`. */
+ const uint16_t threshold = -rng_excl % rng_excl;
+ /* Same as: threshold=((uint32_t)(0x10000ULL - rng_excl)) % rng_excl; */
+
+ while (leftover < threshold) {
+ m = ((uint32_t)buffered_uint16(bitgen_state, bcnt, buf)) * rng_excl;
+ leftover = m & 0xFFFFUL;
+ }
+ }
+
+ return (m >> 16);
+}
+
+/* Static `Lemire rejection` function called by
+ * random_buffered_bounded_uint8(...) */
+static NPY_INLINE uint8_t buffered_bounded_lemire_uint8(bitgen_t *bitgen_state,
+ uint8_t rng, int *bcnt,
+ uint32_t *buf) {
+ /*
+ * Uses Lemire's algorithm - https://arxiv.org/abs/1805.10941
+ *
+ * Note: `rng` should not be 0xFF. When this happens `rng_excl` becomes
+ * zero.
+ */
+ const uint8_t rng_excl = rng + 1;
+
+ uint16_t m;
+ uint8_t leftover;
+
+ /* Generate a scaled random number. */
+ m = ((uint16_t)buffered_uint8(bitgen_state, bcnt, buf)) * rng_excl;
+
+ /* Rejection sampling to remove any bias */
+ leftover = m & 0xFFUL;
+
+ if (leftover < rng_excl) {
+ /* `rng_excl` is a simple upper bound for `threshold`. */
+ const uint8_t threshold = -rng_excl % rng_excl;
+ /* Same as: threshold=((uint16_t)(0x100ULL - rng_excl)) % rng_excl; */
+
+ while (leftover < threshold) {
+ m = ((uint16_t)buffered_uint8(bitgen_state, bcnt, buf)) * rng_excl;
+ leftover = m & 0xFFUL;
+ }
+ }
+
+ return (m >> 8);
+}
+
+/*
+ * Returns a single random npy_uint64 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, uint64_t mask, bool use_masked) {
+ if (rng == 0) {
+ return off;
+ } else if (rng <= 0xFFFFFFFFUL) {
+ /* Call 32-bit generator if range in 32-bit. */
+ if (use_masked) {
+ return off + buffered_bounded_masked_uint32(bitgen_state, rng, mask, NULL,
+ NULL);
+ } else {
+ return off +
+ buffered_bounded_lemire_uint32(bitgen_state, rng, NULL, NULL);
+ }
+ } else if (rng == 0xFFFFFFFFFFFFFFFFULL) {
+ /* Lemire64 doesn't support inclusive rng = 0xFFFFFFFFFFFFFFFF. */
+ return off + next_uint64(bitgen_state);
+ } else {
+ if (use_masked) {
+ return off + bounded_masked_uint64(bitgen_state, rng, mask);
+ } else {
+ return off + bounded_lemire_uint64(bitgen_state, rng);
+ }
+ }
+}
+
+/*
+ * Returns a single random npy_uint64 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state, uint32_t off,
+ uint32_t rng, uint32_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf) {
+ /*
+ * Unused bcnt and buf are here only to allow templating with other uint
+ * generators.
+ */
+ if (rng == 0) {
+ return off;
+ } else if (rng == 0xFFFFFFFFUL) {
+ /* Lemire32 doesn't support inclusive rng = 0xFFFFFFFF. */
+ return off + next_uint32(bitgen_state);
+ } else {
+ if (use_masked) {
+ return off +
+ buffered_bounded_masked_uint32(bitgen_state, rng, mask, bcnt, buf);
+ } else {
+ return off + buffered_bounded_lemire_uint32(bitgen_state, rng, bcnt, buf);
+ }
+ }
+}
+
+/*
+ * Returns a single random npy_uint16 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state, uint16_t off,
+ uint16_t rng, uint16_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf) {
+ if (rng == 0) {
+ return off;
+ } else if (rng == 0xFFFFUL) {
+ /* Lemire16 doesn't support inclusive rng = 0xFFFF. */
+ return off + buffered_uint16(bitgen_state, bcnt, buf);
+ } else {
+ if (use_masked) {
+ return off +
+ buffered_bounded_masked_uint16(bitgen_state, rng, mask, bcnt, buf);
+ } else {
+ return off + buffered_bounded_lemire_uint16(bitgen_state, rng, bcnt, buf);
+ }
+ }
+}
+
+/*
+ * Returns a single random npy_uint8 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, uint8_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf) {
+ if (rng == 0) {
+ return off;
+ } else if (rng == 0xFFUL) {
+ /* Lemire8 doesn't support inclusive rng = 0xFF. */
+ return off + buffered_uint8(bitgen_state, bcnt, buf);
+ } else {
+ if (use_masked) {
+ return off +
+ buffered_bounded_masked_uint8(bitgen_state, rng, mask, bcnt, buf);
+ } else {
+ return off + buffered_bounded_lemire_uint8(bitgen_state, rng, bcnt, buf);
+ }
+ }
+}
+
+npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_bool mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf) {
+ return buffered_bounded_bool(bitgen_state, off, rng, mask, bcnt, buf);
+}
+
+/*
+ * Fills an array with cnt random npy_uint64 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, npy_intp cnt, bool use_masked,
+ uint64_t *out) {
+ npy_intp i;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ } else if (rng <= 0xFFFFFFFFUL) {
+ uint32_t buf = 0;
+ int bcnt = 0;
+
+ /* Call 32-bit generator if range in 32-bit. */
+ if (use_masked) {
+ /* Smallest bit mask >= max */
+ uint64_t mask = gen_mask(rng);
+
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + buffered_bounded_masked_uint32(bitgen_state, rng, mask,
+ &bcnt, &buf);
+ }
+ } else {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off +
+ buffered_bounded_lemire_uint32(bitgen_state, rng, &bcnt, &buf);
+ }
+ }
+ } else if (rng == 0xFFFFFFFFFFFFFFFFULL) {
+ /* Lemire64 doesn't support rng = 0xFFFFFFFFFFFFFFFF. */
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + next_uint64(bitgen_state);
+ }
+ } else {
+ if (use_masked) {
+ /* Smallest bit mask >= max */
+ uint64_t mask = gen_mask(rng);
+
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + bounded_masked_uint64(bitgen_state, rng, mask);
+ }
+ } else {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + bounded_lemire_uint64(bitgen_state, rng);
+ }
+ }
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_uint32 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
+ uint32_t rng, npy_intp cnt, bool use_masked,
+ uint32_t *out) {
+ npy_intp i;
+ uint32_t buf = 0;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ } else if (rng == 0xFFFFFFFFUL) {
+ /* Lemire32 doesn't support rng = 0xFFFFFFFF. */
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + next_uint32(bitgen_state);
+ }
+ } else {
+ if (use_masked) {
+ /* Smallest bit mask >= max */
+ uint32_t mask = (uint32_t)gen_mask(rng);
+
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + buffered_bounded_masked_uint32(bitgen_state, rng, mask,
+ &bcnt, &buf);
+ }
+ } else {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off +
+ buffered_bounded_lemire_uint32(bitgen_state, rng, &bcnt, &buf);
+ }
+ }
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_uint16 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
+ uint16_t rng, npy_intp cnt, bool use_masked,
+ uint16_t *out) {
+ npy_intp i;
+ uint32_t buf = 0;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ } else if (rng == 0xFFFFUL) {
+ /* Lemire16 doesn't support rng = 0xFFFF. */
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + buffered_uint16(bitgen_state, &bcnt, &buf);
+ }
+ } else {
+ if (use_masked) {
+ /* Smallest bit mask >= max */
+ uint16_t mask = (uint16_t)gen_mask(rng);
+
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + buffered_bounded_masked_uint16(bitgen_state, rng, mask,
+ &bcnt, &buf);
+ }
+ } else {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off +
+ buffered_bounded_lemire_uint16(bitgen_state, rng, &bcnt, &buf);
+ }
+ }
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_uint8 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off, uint8_t rng,
+ npy_intp cnt, bool use_masked, uint8_t *out) {
+ npy_intp i;
+ uint32_t buf = 0;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ } else if (rng == 0xFFUL) {
+ /* Lemire8 doesn't support rng = 0xFF. */
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + buffered_uint8(bitgen_state, &bcnt, &buf);
+ }
+ } else {
+ if (use_masked) {
+ /* Smallest bit mask >= max */
+ uint8_t mask = (uint8_t)gen_mask(rng);
+
+ for (i = 0; i < cnt; i++) {
+ out[i] = off + buffered_bounded_masked_uint8(bitgen_state, rng, mask,
+ &bcnt, &buf);
+ }
+ } else {
+ for (i = 0; i < cnt; i++) {
+ out[i] =
+ off + buffered_bounded_lemire_uint8(bitgen_state, rng, &bcnt, &buf);
+ }
+ }
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_bool between off and off + rng
+ * inclusive.
+ */
+void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_intp cnt, bool use_masked,
+ npy_bool *out) {
+ npy_bool mask = 0;
+ npy_intp i;
+ uint32_t buf = 0;
+ int bcnt = 0;
+
+ for (i = 0; i < cnt; i++) {
+ out[i] = buffered_bounded_bool(bitgen_state, off, rng, mask, &bcnt, &buf);
+ }
+}
+
+void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n,
+ RAND_INT_TYPE *mnix, double *pix, npy_intp d,
+ binomial_t *binomial) {
+ double remaining_p = 1.0;
+ npy_intp j;
+ RAND_INT_TYPE dn = n;
+ for (j = 0; j < (d - 1); j++) {
+ mnix[j] = random_binomial(bitgen_state, pix[j] / remaining_p, dn, binomial);
+ dn = dn - mnix[j];
+ if (dn <= 0) {
+ break;
+ }
+ remaining_p -= pix[j];
+ }
+ if (dn > 0) {
+ mnix[d - 1] = dn;
+ }
+}
diff --git a/numpy/random/src/distributions/distributions.h b/numpy/random/src/distributions/distributions.h
new file mode 100644
index 000000000..2a6b2a045
--- /dev/null
+++ b/numpy/random/src/distributions/distributions.h
@@ -0,0 +1,214 @@
+#ifndef _RANDOMDGEN__DISTRIBUTIONS_H_
+#define _RANDOMDGEN__DISTRIBUTIONS_H_
+
+#include "Python.h"
+#include "numpy/npy_common.h"
+#include <stddef.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+#include "numpy/npy_math.h"
+#include "src/bitgen.h"
+
+/*
+ * RAND_INT_TYPE is used to share integer generators with RandomState which
+ * used long in place of int64_t. If changing a distribution that uses
+ * RAND_INT_TYPE, then the original unmodified copy must be retained for
+ * use in RandomState by copying to the legacy distributions source file.
+ */
+#ifdef NP_RANDOM_LEGACY
+#define RAND_INT_TYPE long
+#define RAND_INT_MAX LONG_MAX
+#else
+#define RAND_INT_TYPE int64_t
+#define RAND_INT_MAX INT64_MAX
+#endif
+
+#ifdef DLL_EXPORT
+#define DECLDIR __declspec(dllexport)
+#else
+#define DECLDIR extern
+#endif
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? x : y)
+#define MAX(x, y) (((x) > (y)) ? x : y)
+#endif
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846264338328
+#endif
+
+typedef struct s_binomial_t {
+ int has_binomial; /* !=0: following parameters initialized for binomial */
+ double psave;
+ RAND_INT_TYPE nsave;
+ double r;
+ double q;
+ double fm;
+ RAND_INT_TYPE m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+} binomial_t;
+
+/* Inline generators for internal use */
+static NPY_INLINE uint32_t next_uint32(bitgen_t *bitgen_state) {
+ return bitgen_state->next_uint32(bitgen_state->state);
+}
+
+static NPY_INLINE uint64_t next_uint64(bitgen_t *bitgen_state) {
+ return bitgen_state->next_uint64(bitgen_state->state);
+}
+
+static NPY_INLINE float next_float(bitgen_t *bitgen_state) {
+ return (next_uint32(bitgen_state) >> 9) * (1.0f / 8388608.0f);
+}
+
+static NPY_INLINE double next_double(bitgen_t *bitgen_state) {
+ return bitgen_state->next_double(bitgen_state->state);
+}
+
+DECLDIR double loggam(double x);
+
+DECLDIR float random_float(bitgen_t *bitgen_state);
+DECLDIR double random_double(bitgen_t *bitgen_state);
+DECLDIR void random_double_fill(bitgen_t *bitgen_state, npy_intp cnt, double *out);
+
+DECLDIR int64_t random_positive_int64(bitgen_t *bitgen_state);
+DECLDIR int32_t random_positive_int32(bitgen_t *bitgen_state);
+DECLDIR int64_t random_positive_int(bitgen_t *bitgen_state);
+DECLDIR uint64_t random_uint(bitgen_t *bitgen_state);
+
+DECLDIR double random_standard_exponential(bitgen_t *bitgen_state);
+DECLDIR void random_standard_exponential_fill(bitgen_t *bitgen_state, npy_intp cnt,
+ double *out);
+DECLDIR float random_standard_exponential_f(bitgen_t *bitgen_state);
+DECLDIR double random_standard_exponential_zig(bitgen_t *bitgen_state);
+DECLDIR void random_standard_exponential_zig_fill(bitgen_t *bitgen_state,
+ npy_intp cnt, double *out);
+DECLDIR float random_standard_exponential_zig_f(bitgen_t *bitgen_state);
+
+/*
+DECLDIR double random_gauss(bitgen_t *bitgen_state);
+DECLDIR float random_gauss_f(bitgen_t *bitgen_state);
+*/
+DECLDIR double random_gauss_zig(bitgen_t *bitgen_state);
+DECLDIR float random_gauss_zig_f(bitgen_t *bitgen_state);
+DECLDIR void random_gauss_zig_fill(bitgen_t *bitgen_state, npy_intp cnt,
+ double *out);
+
+/*
+DECLDIR double random_standard_gamma(bitgen_t *bitgen_state, double shape);
+DECLDIR float random_standard_gamma_f(bitgen_t *bitgen_state, float shape);
+*/
+DECLDIR double random_standard_gamma_zig(bitgen_t *bitgen_state, double shape);
+DECLDIR float random_standard_gamma_zig_f(bitgen_t *bitgen_state, float shape);
+
+/*
+DECLDIR double random_normal(bitgen_t *bitgen_state, double loc, double scale);
+*/
+DECLDIR double random_normal_zig(bitgen_t *bitgen_state, double loc, double scale);
+
+DECLDIR double random_gamma(bitgen_t *bitgen_state, double shape, double scale);
+DECLDIR float random_gamma_float(bitgen_t *bitgen_state, float shape, float scale);
+
+DECLDIR double random_exponential(bitgen_t *bitgen_state, double scale);
+DECLDIR double random_uniform(bitgen_t *bitgen_state, double lower, double range);
+DECLDIR double random_beta(bitgen_t *bitgen_state, double a, double b);
+DECLDIR double random_chisquare(bitgen_t *bitgen_state, double df);
+DECLDIR double random_f(bitgen_t *bitgen_state, double dfnum, double dfden);
+DECLDIR double random_standard_cauchy(bitgen_t *bitgen_state);
+DECLDIR double random_pareto(bitgen_t *bitgen_state, double a);
+DECLDIR double random_weibull(bitgen_t *bitgen_state, double a);
+DECLDIR double random_power(bitgen_t *bitgen_state, double a);
+DECLDIR double random_laplace(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_gumbel(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_logistic(bitgen_t *bitgen_state, double loc, double scale);
+DECLDIR double random_lognormal(bitgen_t *bitgen_state, double mean, double sigma);
+DECLDIR double random_rayleigh(bitgen_t *bitgen_state, double mode);
+DECLDIR double random_standard_t(bitgen_t *bitgen_state, double df);
+DECLDIR double random_noncentral_chisquare(bitgen_t *bitgen_state, double df,
+ double nonc);
+DECLDIR double random_noncentral_f(bitgen_t *bitgen_state, double dfnum,
+ double dfden, double nonc);
+DECLDIR double random_wald(bitgen_t *bitgen_state, double mean, double scale);
+DECLDIR double random_vonmises(bitgen_t *bitgen_state, double mu, double kappa);
+DECLDIR double random_triangular(bitgen_t *bitgen_state, double left, double mode,
+ double right);
+
+DECLDIR RAND_INT_TYPE random_poisson(bitgen_t *bitgen_state, double lam);
+DECLDIR RAND_INT_TYPE random_negative_binomial(bitgen_t *bitgen_state, double n,
+ double p);
+
+DECLDIR RAND_INT_TYPE random_binomial_btpe(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+DECLDIR RAND_INT_TYPE random_binomial_inversion(bitgen_t *bitgen_state,
+ RAND_INT_TYPE n,
+ double p,
+ binomial_t *binomial);
+DECLDIR int64_t random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+
+DECLDIR RAND_INT_TYPE random_logseries(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric_search(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric_inversion(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_geometric(bitgen_t *bitgen_state, double p);
+DECLDIR RAND_INT_TYPE random_zipf(bitgen_t *bitgen_state, double a);
+DECLDIR int64_t random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample);
+
+DECLDIR uint64_t random_interval(bitgen_t *bitgen_state, uint64_t max);
+
+/* Generate random uint64 numbers in closed interval [off, off + rng]. */
+DECLDIR uint64_t random_bounded_uint64(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, uint64_t mask,
+ bool use_masked);
+
+/* Generate random uint32 numbers in closed interval [off, off + rng]. */
+DECLDIR uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
+ uint32_t off, uint32_t rng,
+ uint32_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
+ uint16_t off, uint16_t rng,
+ uint16_t mask, bool use_masked,
+ int *bcnt, uint32_t *buf);
+DECLDIR uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, uint8_t mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+DECLDIR npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_bool mask,
+ bool use_masked, int *bcnt,
+ uint32_t *buf);
+
+DECLDIR void random_bounded_uint64_fill(bitgen_t *bitgen_state, uint64_t off,
+ uint64_t rng, npy_intp cnt,
+ bool use_masked, uint64_t *out);
+DECLDIR void random_bounded_uint32_fill(bitgen_t *bitgen_state, uint32_t off,
+ uint32_t rng, npy_intp cnt,
+ bool use_masked, uint32_t *out);
+DECLDIR void random_bounded_uint16_fill(bitgen_t *bitgen_state, uint16_t off,
+ uint16_t rng, npy_intp cnt,
+ bool use_masked, uint16_t *out);
+DECLDIR void random_bounded_uint8_fill(bitgen_t *bitgen_state, uint8_t off,
+ uint8_t rng, npy_intp cnt,
+ bool use_masked, uint8_t *out);
+DECLDIR void random_bounded_bool_fill(bitgen_t *bitgen_state, npy_bool off,
+ npy_bool rng, npy_intp cnt,
+ bool use_masked, npy_bool *out);
+
+DECLDIR void random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n, RAND_INT_TYPE *mnix,
+ double *pix, npy_intp d, binomial_t *binomial);
+
+#endif
diff --git a/numpy/random/src/distributions/logfactorial.c b/numpy/random/src/distributions/logfactorial.c
new file mode 100644
index 000000000..130516469
--- /dev/null
+++ b/numpy/random/src/distributions/logfactorial.c
@@ -0,0 +1,158 @@
+
+#include <math.h>
+#include <stdint.h>
+
+/*
+ * logfact[k] holds log(k!) for k = 0, 1, 2, ..., 125.
+ */
+
+static const double logfact[] = {
+ 0,
+ 0,
+ 0.69314718055994529,
+ 1.791759469228055,
+ 3.1780538303479458,
+ 4.7874917427820458,
+ 6.5792512120101012,
+ 8.5251613610654147,
+ 10.604602902745251,
+ 12.801827480081469,
+ 15.104412573075516,
+ 17.502307845873887,
+ 19.987214495661885,
+ 22.552163853123425,
+ 25.19122118273868,
+ 27.89927138384089,
+ 30.671860106080672,
+ 33.505073450136891,
+ 36.395445208033053,
+ 39.339884187199495,
+ 42.335616460753485,
+ 45.380138898476908,
+ 48.471181351835227,
+ 51.606675567764377,
+ 54.784729398112319,
+ 58.003605222980518,
+ 61.261701761002001,
+ 64.557538627006338,
+ 67.88974313718154,
+ 71.257038967168015,
+ 74.658236348830158,
+ 78.092223553315307,
+ 81.557959456115043,
+ 85.054467017581516,
+ 88.580827542197682,
+ 92.136175603687093,
+ 95.719694542143202,
+ 99.330612454787428,
+ 102.96819861451381,
+ 106.63176026064346,
+ 110.32063971475739,
+ 114.03421178146171,
+ 117.77188139974507,
+ 121.53308151543864,
+ 125.3172711493569,
+ 129.12393363912722,
+ 132.95257503561632,
+ 136.80272263732635,
+ 140.67392364823425,
+ 144.5657439463449,
+ 148.47776695177302,
+ 152.40959258449735,
+ 156.3608363030788,
+ 160.3311282166309,
+ 164.32011226319517,
+ 168.32744544842765,
+ 172.35279713916279,
+ 176.39584840699735,
+ 180.45629141754378,
+ 184.53382886144948,
+ 188.6281734236716,
+ 192.7390472878449,
+ 196.86618167289001,
+ 201.00931639928152,
+ 205.1681994826412,
+ 209.34258675253685,
+ 213.53224149456327,
+ 217.73693411395422,
+ 221.95644181913033,
+ 226.1905483237276,
+ 230.43904356577696,
+ 234.70172344281826,
+ 238.97838956183432,
+ 243.26884900298271,
+ 247.57291409618688,
+ 251.89040220972319,
+ 256.22113555000954,
+ 260.56494097186322,
+ 264.92164979855278,
+ 269.29109765101981,
+ 273.67312428569369,
+ 278.06757344036612,
+ 282.4742926876304,
+ 286.89313329542699,
+ 291.32395009427029,
+ 295.76660135076065,
+ 300.22094864701415,
+ 304.68685676566872,
+ 309.1641935801469,
+ 313.65282994987905,
+ 318.1526396202093,
+ 322.66349912672615,
+ 327.1852877037752,
+ 331.71788719692847,
+ 336.26118197919845,
+ 340.81505887079902,
+ 345.37940706226686,
+ 349.95411804077025,
+ 354.53908551944079,
+ 359.1342053695754,
+ 363.73937555556347,
+ 368.35449607240474,
+ 372.97946888568902,
+ 377.61419787391867,
+ 382.25858877306001,
+ 386.91254912321756,
+ 391.57598821732961,
+ 396.24881705179155,
+ 400.93094827891576,
+ 405.6222961611449,
+ 410.32277652693733,
+ 415.03230672824964,
+ 419.75080559954472,
+ 424.47819341825709,
+ 429.21439186665157,
+ 433.95932399501481,
+ 438.71291418612117,
+ 443.47508812091894,
+ 448.24577274538461,
+ 453.02489623849613,
+ 457.81238798127816,
+ 462.60817852687489,
+ 467.4121995716082,
+ 472.22438392698058,
+ 477.04466549258564,
+ 481.87297922988796
+};
+
+/*
+ * Compute log(k!)
+ */
+
+double logfactorial(int64_t k)
+{
+ const double halfln2pi = 0.9189385332046728;
+
+ if (k < (int64_t) (sizeof(logfact)/sizeof(logfact[0]))) {
+ /* Use the lookup table. */
+ return logfact[k];
+ }
+
+ /*
+ * Use the Stirling series, truncated at the 1/k**3 term.
+ * (In a Python implementation of this approximation, the result
+ * was within 2 ULP of the best 64 bit floating point value for
+ * k up to 10000000.)
+ */
+ return (k + 0.5)*log(k) - k + (halfln2pi + (1.0/k)*(1/12.0 - 1/(360.0*k*k)));
+}
diff --git a/numpy/random/src/distributions/logfactorial.h b/numpy/random/src/distributions/logfactorial.h
new file mode 100644
index 000000000..1fedef3f6
--- /dev/null
+++ b/numpy/random/src/distributions/logfactorial.h
@@ -0,0 +1,9 @@
+
+#ifndef LOGFACTORIAL_H
+#define LOGFACTORIAL_H
+
+#include <stdint.h>
+
+double logfactorial(int64_t k);
+
+#endif
diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c
new file mode 100644
index 000000000..94dc6380f
--- /dev/null
+++ b/numpy/random/src/distributions/random_hypergeometric.c
@@ -0,0 +1,260 @@
+#include "distributions.h"
+#include "logfactorial.h"
+#include <stdint.h>
+
+/*
+ * Generate a sample from the hypergeometric distribution.
+ *
+ * Assume sample is not greater than half the total. See below
+ * for how the opposite case is handled.
+ *
+ * We initialize the following:
+ * computed_sample = sample
+ * remaining_good = good
+ * remaining_total = good + bad
+ *
+ * In the loop:
+ * * computed_sample counts down to 0;
+ * * remaining_good is the number of good choices not selected yet;
+ * * remaining_total is the total number of choices not selected yet.
+ *
+ * In the loop, we select items by choosing a random integer in
+ * the interval [0, remaining_total), and if the value is less
+ * than remaining_good, it means we have selected a good one,
+ * so remaining_good is decremented. Then, regardless of that
+ * result, computed_sample is decremented. The loop continues
+ * until either computed_sample is 0, remaining_good is 0, or
+ * remaining_total == remaining_good. In the latter case, it
+ * means there are only good choices left, so we can stop the
+ * loop early and select what is left of computed_sample from
+ * the good choices (i.e. decrease remaining_good by computed_sample).
+ *
+ * When the loop exits, the actual number of good choices is
+ * good - remaining_good.
+ *
+ * If sample is more than half the total, then initially we set
+ * computed_sample = total - sample
+ * and at the end we return remaining_good (i.e. the loop in effect
+ * selects the complement of the result).
+ *
+ * It is assumed that when this function is called:
+ * * good, bad and sample are nonnegative;
+ * * the sum good+bad will not result in overflow;
+ * * sample <= good+bad.
+ */
+
+static int64_t hypergeometric_sample(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample)
+{
+ int64_t remaining_total, remaining_good, result, computed_sample;
+ int64_t total = good + bad;
+
+ if (sample > total/2) {
+ computed_sample = total - sample;
+ }
+ else {
+ computed_sample = sample;
+ }
+
+ remaining_total = total;
+ remaining_good = good;
+
+ while ((computed_sample > 0) && (remaining_good > 0) &&
+ (remaining_total > remaining_good)) {
+ // random_interval(bitgen_state, max) returns an integer in
+ // [0, max] *inclusive*, so we decrement remaining_total before
+ // passing it to random_interval().
+ --remaining_total;
+ if ((int64_t) random_interval(bitgen_state,
+ remaining_total) < remaining_good) {
+ // Selected a "good" one, so decrement remaining_good.
+ --remaining_good;
+ }
+ --computed_sample;
+ }
+
+ if (remaining_total == remaining_good) {
+ // Only "good" choices are left.
+ remaining_good -= computed_sample;
+ }
+
+ if (sample > total/2) {
+ result = remaining_good;
+ }
+ else {
+ result = good - remaining_good;
+ }
+
+ return result;
+}
+
+
+// D1 = 2*sqrt(2/e)
+// D2 = 3 - 2*sqrt(3/e)
+#define D1 1.7155277699214135
+#define D2 0.8989161620588988
+
+/*
+ * Generate variates from the hypergeometric distribution
+ * using the ratio-of-uniforms method.
+ *
+ * In the code, the variable names a, b, c, g, h, m, p, q, K, T,
+ * U and X match the names used in "Algorithm HRUA" beginning on
+ * page 82 of Stadlober's 1989 thesis.
+ *
+ * It is assumed that when this function is called:
+ * * good, bad and sample are nonnegative;
+ * * the sum good+bad will not result in overflow;
+ * * sample <= good+bad.
+ *
+ * References:
+ * - Ernst Stadlober's thesis "Sampling from Poisson, Binomial and
+ * Hypergeometric Distributions: Ratio of Uniforms as a Simple and
+ * Fast Alternative" (1989)
+ * - Ernst Stadlober, "The ratio of uniforms approach for generating
+ * discrete random variates", Journal of Computational and Applied
+ * Mathematics, 31, pp. 181-189 (1990).
+ */
+
+static int64_t hypergeometric_hrua(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample)
+{
+ int64_t mingoodbad, maxgoodbad, popsize;
+ int64_t computed_sample;
+ double p, q;
+ double mu, var;
+ double a, c, b, h, g;
+ int64_t m, K;
+
+ popsize = good + bad;
+ computed_sample = MIN(sample, popsize - sample);
+ mingoodbad = MIN(good, bad);
+ maxgoodbad = MAX(good, bad);
+
+ /*
+ * Variables that do not match Stadlober (1989)
+ * Here Stadlober
+ * ---------------- ---------
+ * mingoodbad M
+ * popsize N
+ * computed_sample n
+ */
+
+ p = ((double) mingoodbad) / popsize;
+ q = ((double) maxgoodbad) / popsize;
+
+ // mu is the mean of the distribution.
+ mu = computed_sample * p;
+
+ a = mu + 0.5;
+
+ // var is the variance of the distribution.
+ var = ((double)(popsize - computed_sample) *
+ computed_sample * p * q / (popsize - 1));
+
+ c = sqrt(var + 0.5);
+
+ /*
+ * h is 2*s_hat (See Stadlober's theses (1989), Eq. (5.17); or
+ * Stadlober (1990), Eq. 8). s_hat is the scale of the "table mountain"
+ * function that dominates the scaled hypergeometric PMF ("scaled" means
+ * normalized to have a maximum value of 1).
+ */
+ h = D1*c + D2;
+
+ m = (int64_t) floor((double)(computed_sample + 1) * (mingoodbad + 1) /
+ (popsize + 2));
+
+ g = (logfactorial(m) +
+ logfactorial(mingoodbad - m) +
+ logfactorial(computed_sample - m) +
+ logfactorial(maxgoodbad - computed_sample + m));
+
+ /*
+ * b is the upper bound for random samples:
+ * ... min(computed_sample, mingoodbad) + 1 is the length of the support.
+ * ... floor(a + 16*c) is 16 standard deviations beyond the mean.
+ *
+ * The idea behind the second upper bound is that values that far out in
+ * the tail have negligible probabilities.
+ *
+ * There is a comment in a previous version of this algorithm that says
+ * "16 for 16-decimal-digit precision in D1 and D2",
+ * but there is no documented justification for this value. A lower value
+ * might work just as well, but I've kept the value 16 here.
+ */
+ b = MIN(MIN(computed_sample, mingoodbad) + 1, floor(a + 16*c));
+
+ while (1) {
+ double U, V, X, T;
+ double gp;
+ U = random_double(bitgen_state);
+ V = random_double(bitgen_state); // "U star" in Stadlober (1989)
+ X = a + h*(V - 0.5) / U;
+
+ // fast rejection:
+ if ((X < 0.0) || (X >= b)) {
+ continue;
+ }
+
+ K = (int64_t) floor(X);
+
+ gp = (logfactorial(K) +
+ logfactorial(mingoodbad - K) +
+ logfactorial(computed_sample - K) +
+ logfactorial(maxgoodbad - computed_sample + K));
+
+ T = g - gp;
+
+ // fast acceptance:
+ if ((U*(4.0 - U) - 3.0) <= T) {
+ break;
+ }
+
+ // fast rejection:
+ if (U*(U - T) >= 1) {
+ continue;
+ }
+
+ if (2.0*log(U) <= T) {
+ // acceptance
+ break;
+ }
+ }
+
+ if (good > bad) {
+ K = computed_sample - K;
+ }
+
+ if (computed_sample < sample) {
+ K = good - K;
+ }
+
+ return K;
+}
+
+
+/*
+ * Draw a sample from the hypergeometric distribution.
+ *
+ * It is assumed that when this function is called:
+ * * good, bad and sample are nonnegative;
+ * * the sum good+bad will not result in overflow;
+ * * sample <= good+bad.
+ */
+
+int64_t random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad, int64_t sample)
+{
+ int64_t r;
+
+ if ((sample >= 10) && (sample <= good + bad - 10)) {
+ // This will use the ratio-of-uniforms method.
+ r = hypergeometric_hrua(bitgen_state, good, bad, sample);
+ }
+ else {
+ // The simpler implementation is faster for small samples.
+ r = hypergeometric_sample(bitgen_state, good, bad, sample);
+ }
+ return r;
+}
diff --git a/numpy/random/src/distributions/ziggurat_constants.h b/numpy/random/src/distributions/ziggurat_constants.h
new file mode 100644
index 000000000..c254466de
--- /dev/null
+++ b/numpy/random/src/distributions/ziggurat_constants.h
@@ -0,0 +1,1206 @@
+static const uint64_t ki_double[] = {
+ 0x000EF33D8025EF6AULL, 0x0000000000000000ULL, 0x000C08BE98FBC6A8ULL,
+ 0x000DA354FABD8142ULL, 0x000E51F67EC1EEEAULL, 0x000EB255E9D3F77EULL,
+ 0x000EEF4B817ECAB9ULL, 0x000F19470AFA44AAULL, 0x000F37ED61FFCB18ULL,
+ 0x000F4F469561255CULL, 0x000F61A5E41BA396ULL, 0x000F707A755396A4ULL,
+ 0x000F7CB2EC28449AULL, 0x000F86F10C6357D3ULL, 0x000F8FA6578325DEULL,
+ 0x000F9724C74DD0DAULL, 0x000F9DA907DBF509ULL, 0x000FA360F581FA74ULL,
+ 0x000FA86FDE5B4BF8ULL, 0x000FACF160D354DCULL, 0x000FB0FB6718B90FULL,
+ 0x000FB49F8D5374C6ULL, 0x000FB7EC2366FE77ULL, 0x000FBAECE9A1E50EULL,
+ 0x000FBDAB9D040BEDULL, 0x000FC03060FF6C57ULL, 0x000FC2821037A248ULL,
+ 0x000FC4A67AE25BD1ULL, 0x000FC6A2977AEE31ULL, 0x000FC87AA92896A4ULL,
+ 0x000FCA325E4BDE85ULL, 0x000FCBCCE902231AULL, 0x000FCD4D12F839C4ULL,
+ 0x000FCEB54D8FEC99ULL, 0x000FD007BF1DC930ULL, 0x000FD1464DD6C4E6ULL,
+ 0x000FD272A8E2F450ULL, 0x000FD38E4FF0C91EULL, 0x000FD49A9990B478ULL,
+ 0x000FD598B8920F53ULL, 0x000FD689C08E99ECULL, 0x000FD76EA9C8E832ULL,
+ 0x000FD848547B08E8ULL, 0x000FD9178BAD2C8CULL, 0x000FD9DD07A7ADD2ULL,
+ 0x000FDA9970105E8CULL, 0x000FDB4D5DC02E20ULL, 0x000FDBF95C5BFCD0ULL,
+ 0x000FDC9DEBB99A7DULL, 0x000FDD3B8118729DULL, 0x000FDDD288342F90ULL,
+ 0x000FDE6364369F64ULL, 0x000FDEEE708D514EULL, 0x000FDF7401A6B42EULL,
+ 0x000FDFF46599ED40ULL, 0x000FE06FE4BC24F2ULL, 0x000FE0E6C225A258ULL,
+ 0x000FE1593C28B84CULL, 0x000FE1C78CBC3F99ULL, 0x000FE231E9DB1CAAULL,
+ 0x000FE29885DA1B91ULL, 0x000FE2FB8FB54186ULL, 0x000FE35B33558D4AULL,
+ 0x000FE3B799D0002AULL, 0x000FE410E99EAD7FULL, 0x000FE46746D47734ULL,
+ 0x000FE4BAD34C095CULL, 0x000FE50BAED29524ULL, 0x000FE559F74EBC78ULL,
+ 0x000FE5A5C8E41212ULL, 0x000FE5EF3E138689ULL, 0x000FE6366FD91078ULL,
+ 0x000FE67B75C6D578ULL, 0x000FE6BE661E11AAULL, 0x000FE6FF55E5F4F2ULL,
+ 0x000FE73E5900A702ULL, 0x000FE77B823E9E39ULL, 0x000FE7B6E37070A2ULL,
+ 0x000FE7F08D774243ULL, 0x000FE8289053F08CULL, 0x000FE85EFB35173AULL,
+ 0x000FE893DC840864ULL, 0x000FE8C741F0CEBCULL, 0x000FE8F9387D4EF6ULL,
+ 0x000FE929CC879B1DULL, 0x000FE95909D388EAULL, 0x000FE986FB939AA2ULL,
+ 0x000FE9B3AC714866ULL, 0x000FE9DF2694B6D5ULL, 0x000FEA0973ABE67CULL,
+ 0x000FEA329CF166A4ULL, 0x000FEA5AAB32952CULL, 0x000FEA81A6D5741AULL,
+ 0x000FEAA797DE1CF0ULL, 0x000FEACC85F3D920ULL, 0x000FEAF07865E63CULL,
+ 0x000FEB13762FEC13ULL, 0x000FEB3585FE2A4AULL, 0x000FEB56AE3162B4ULL,
+ 0x000FEB76F4E284FAULL, 0x000FEB965FE62014ULL, 0x000FEBB4F4CF9D7CULL,
+ 0x000FEBD2B8F449D0ULL, 0x000FEBEFB16E2E3EULL, 0x000FEC0BE31EBDE8ULL,
+ 0x000FEC2752B15A15ULL, 0x000FEC42049DAFD3ULL, 0x000FEC5BFD29F196ULL,
+ 0x000FEC75406CEEF4ULL, 0x000FEC8DD2500CB4ULL, 0x000FECA5B6911F12ULL,
+ 0x000FECBCF0C427FEULL, 0x000FECD38454FB15ULL, 0x000FECE97488C8B3ULL,
+ 0x000FECFEC47F91B7ULL, 0x000FED1377358528ULL, 0x000FED278F844903ULL,
+ 0x000FED3B10242F4CULL, 0x000FED4DFBAD586EULL, 0x000FED605498C3DDULL,
+ 0x000FED721D414FE8ULL, 0x000FED8357E4A982ULL, 0x000FED9406A42CC8ULL,
+ 0x000FEDA42B85B704ULL, 0x000FEDB3C8746AB4ULL, 0x000FEDC2DF416652ULL,
+ 0x000FEDD171A46E52ULL, 0x000FEDDF813C8AD3ULL, 0x000FEDED0F909980ULL,
+ 0x000FEDFA1E0FD414ULL, 0x000FEE06AE124BC4ULL, 0x000FEE12C0D95A06ULL,
+ 0x000FEE1E579006E0ULL, 0x000FEE29734B6524ULL, 0x000FEE34150AE4BCULL,
+ 0x000FEE3E3DB89B3CULL, 0x000FEE47EE2982F4ULL, 0x000FEE51271DB086ULL,
+ 0x000FEE59E9407F41ULL, 0x000FEE623528B42EULL, 0x000FEE6A0B5897F1ULL,
+ 0x000FEE716C3E077AULL, 0x000FEE7858327B82ULL, 0x000FEE7ECF7B06BAULL,
+ 0x000FEE84D2484AB2ULL, 0x000FEE8A60B66343ULL, 0x000FEE8F7ACCC851ULL,
+ 0x000FEE94207E25DAULL, 0x000FEE9851A829EAULL, 0x000FEE9C0E13485CULL,
+ 0x000FEE9F557273F4ULL, 0x000FEEA22762CCAEULL, 0x000FEEA4836B42ACULL,
+ 0x000FEEA668FC2D71ULL, 0x000FEEA7D76ED6FAULL, 0x000FEEA8CE04FA0AULL,
+ 0x000FEEA94BE8333BULL, 0x000FEEA950296410ULL, 0x000FEEA8D9C0075EULL,
+ 0x000FEEA7E7897654ULL, 0x000FEEA678481D24ULL, 0x000FEEA48AA29E83ULL,
+ 0x000FEEA21D22E4DAULL, 0x000FEE9F2E352024ULL, 0x000FEE9BBC26AF2EULL,
+ 0x000FEE97C524F2E4ULL, 0x000FEE93473C0A3AULL, 0x000FEE8E40557516ULL,
+ 0x000FEE88AE369C7AULL, 0x000FEE828E7F3DFDULL, 0x000FEE7BDEA7B888ULL,
+ 0x000FEE749BFF37FFULL, 0x000FEE6CC3A9BD5EULL, 0x000FEE64529E007EULL,
+ 0x000FEE5B45A32888ULL, 0x000FEE51994E57B6ULL, 0x000FEE474A0006CFULL,
+ 0x000FEE3C53E12C50ULL, 0x000FEE30B2E02AD8ULL, 0x000FEE2462AD8205ULL,
+ 0x000FEE175EB83C5AULL, 0x000FEE09A22A1447ULL, 0x000FEDFB27E349CCULL,
+ 0x000FEDEBEA76216CULL, 0x000FEDDBE422047EULL, 0x000FEDCB0ECE39D3ULL,
+ 0x000FEDB964042CF4ULL, 0x000FEDA6DCE938C9ULL, 0x000FED937237E98DULL,
+ 0x000FED7F1C38A836ULL, 0x000FED69D2B9C02BULL, 0x000FED538D06AE00ULL,
+ 0x000FED3C41DEA422ULL, 0x000FED23E76A2FD8ULL, 0x000FED0A732FE644ULL,
+ 0x000FECEFDA07FE34ULL, 0x000FECD4100EB7B8ULL, 0x000FECB708956EB4ULL,
+ 0x000FEC98B61230C1ULL, 0x000FEC790A0DA978ULL, 0x000FEC57F50F31FEULL,
+ 0x000FEC356686C962ULL, 0x000FEC114CB4B335ULL, 0x000FEBEB948E6FD0ULL,
+ 0x000FEBC429A0B692ULL, 0x000FEB9AF5EE0CDCULL, 0x000FEB6FE1C98542ULL,
+ 0x000FEB42D3AD1F9EULL, 0x000FEB13B00B2D4BULL, 0x000FEAE2591A02E9ULL,
+ 0x000FEAAEAE992257ULL, 0x000FEA788D8EE326ULL, 0x000FEA3FCFFD73E5ULL,
+ 0x000FEA044C8DD9F6ULL, 0x000FE9C5D62F563BULL, 0x000FE9843BA947A4ULL,
+ 0x000FE93F471D4728ULL, 0x000FE8F6BD76C5D6ULL, 0x000FE8AA5DC4E8E6ULL,
+ 0x000FE859E07AB1EAULL, 0x000FE804F690A940ULL, 0x000FE7AB488233C0ULL,
+ 0x000FE74C751F6AA5ULL, 0x000FE6E8102AA202ULL, 0x000FE67DA0B6ABD8ULL,
+ 0x000FE60C9F38307EULL, 0x000FE5947338F742ULL, 0x000FE51470977280ULL,
+ 0x000FE48BD436F458ULL, 0x000FE3F9BFFD1E37ULL, 0x000FE35D35EEB19CULL,
+ 0x000FE2B5122FE4FEULL, 0x000FE20003995557ULL, 0x000FE13C82788314ULL,
+ 0x000FE068C4EE67B0ULL, 0x000FDF82B02B71AAULL, 0x000FDE87C57EFEAAULL,
+ 0x000FDD7509C63BFDULL, 0x000FDC46E529BF13ULL, 0x000FDAF8F82E0282ULL,
+ 0x000FD985E1B2BA75ULL, 0x000FD7E6EF48CF04ULL, 0x000FD613ADBD650BULL,
+ 0x000FD40149E2F012ULL, 0x000FD1A1A7B4C7ACULL, 0x000FCEE204761F9EULL,
+ 0x000FCBA8D85E11B2ULL, 0x000FC7D26ECD2D22ULL, 0x000FC32B2F1E22EDULL,
+ 0x000FBD6581C0B83AULL, 0x000FB606C4005434ULL, 0x000FAC40582A2874ULL,
+ 0x000F9E971E014598ULL, 0x000F89FA48A41DFCULL, 0x000F66C5F7F0302CULL,
+ 0x000F1A5A4B331C4AULL};
+
+static const double wi_double[] = {
+ 8.68362706080130616677e-16, 4.77933017572773682428e-17,
+ 6.35435241740526230246e-17, 7.45487048124769627714e-17,
+ 8.32936681579309972857e-17, 9.06806040505948228243e-17,
+ 9.71486007656776183958e-17, 1.02947503142410192108e-16,
+ 1.08234302884476839838e-16, 1.13114701961090307945e-16,
+ 1.17663594570229211411e-16, 1.21936172787143633280e-16,
+ 1.25974399146370927864e-16, 1.29810998862640315416e-16,
+ 1.33472037368241227547e-16, 1.36978648425712032797e-16,
+ 1.40348230012423820659e-16, 1.43595294520569430270e-16,
+ 1.46732087423644219083e-16, 1.49769046683910367425e-16,
+ 1.52715150035961979750e-16, 1.55578181694607639484e-16,
+ 1.58364940092908853989e-16, 1.61081401752749279325e-16,
+ 1.63732852039698532012e-16, 1.66323990584208352778e-16,
+ 1.68859017086765964015e-16, 1.71341701765596607184e-16,
+ 1.73775443658648593310e-16, 1.76163319230009959832e-16,
+ 1.78508123169767272927e-16, 1.80812402857991522674e-16,
+ 1.83078487648267501776e-16, 1.85308513886180189386e-16,
+ 1.87504446393738816849e-16, 1.89668097007747596212e-16,
+ 1.91801140648386198029e-16, 1.93905129306251037069e-16,
+ 1.95981504266288244037e-16, 1.98031606831281739736e-16,
+ 2.00056687762733300198e-16, 2.02057915620716538808e-16,
+ 2.04036384154802118313e-16, 2.05993118874037063144e-16,
+ 2.07929082904140197311e-16, 2.09845182223703516690e-16,
+ 2.11742270357603418769e-16, 2.13621152594498681022e-16,
+ 2.15482589785814580926e-16, 2.17327301775643674990e-16,
+ 2.19155970504272708519e-16, 2.20969242822353175995e-16,
+ 2.22767733047895534948e-16, 2.24552025294143552381e-16,
+ 2.26322675592856786566e-16, 2.28080213834501706782e-16,
+ 2.29825145544246839061e-16, 2.31557953510408037008e-16,
+ 2.33279099280043561128e-16, 2.34989024534709550938e-16,
+ 2.36688152357916037468e-16, 2.38376888404542434981e-16,
+ 2.40055621981350627349e-16, 2.41724727046750252175e-16,
+ 2.43384563137110286400e-16, 2.45035476226149539878e-16,
+ 2.46677799523270498158e-16, 2.48311854216108767769e-16,
+ 2.49937950162045242375e-16, 2.51556386532965786439e-16,
+ 2.53167452417135826983e-16, 2.54771427381694417303e-16,
+ 2.56368581998939683749e-16, 2.57959178339286723500e-16,
+ 2.59543470433517070146e-16, 2.61121704706701939097e-16,
+ 2.62694120385972564623e-16, 2.64260949884118951286e-16,
+ 2.65822419160830680292e-16, 2.67378748063236329361e-16,
+ 2.68930150647261591777e-16, 2.70476835481199518794e-16,
+ 2.72019005932773206655e-16, 2.73556860440867908686e-16,
+ 2.75090592773016664571e-16, 2.76620392269639032183e-16,
+ 2.78146444075954410103e-16, 2.79668929362423005309e-16,
+ 2.81188025534502074329e-16, 2.82703906432447923059e-16,
+ 2.84216742521840606520e-16, 2.85726701075460149289e-16,
+ 2.87233946347097994381e-16, 2.88738639737848191815e-16,
+ 2.90240939955384233230e-16, 2.91741003166694553259e-16,
+ 2.93238983144718163965e-16, 2.94735031409293489611e-16,
+ 2.96229297362806647792e-16, 2.97721928420902891115e-16,
+ 2.99213070138601307081e-16, 3.00702866332133102993e-16,
+ 3.02191459196806151971e-16, 3.03678989421180184427e-16,
+ 3.05165596297821922381e-16, 3.06651417830895451744e-16,
+ 3.08136590840829717032e-16, 3.09621251066292253306e-16,
+ 3.11105533263689296831e-16, 3.12589571304399892784e-16,
+ 3.14073498269944617203e-16, 3.15557446545280064031e-16,
+ 3.17041547910402852545e-16, 3.18525933630440648871e-16,
+ 3.20010734544401137886e-16, 3.21496081152744704901e-16,
+ 3.22982103703941557538e-16, 3.24468932280169778077e-16,
+ 3.25956696882307838340e-16, 3.27445527514370671802e-16,
+ 3.28935554267536967851e-16, 3.30426907403912838589e-16,
+ 3.31919717440175233652e-16, 3.33414115231237245918e-16,
+ 3.34910232054077845412e-16, 3.36408199691876507948e-16,
+ 3.37908150518594979994e-16, 3.39410217584148914282e-16,
+ 3.40914534700312603713e-16, 3.42421236527501816058e-16,
+ 3.43930458662583133920e-16, 3.45442337727858401604e-16,
+ 3.46957011461378353333e-16, 3.48474618808741370700e-16,
+ 3.49995300016538099813e-16, 3.51519196727607440975e-16,
+ 3.53046452078274009054e-16, 3.54577210797743572160e-16,
+ 3.56111619309838843415e-16, 3.57649825837265051035e-16,
+ 3.59191980508602994994e-16, 3.60738235468235137839e-16,
+ 3.62288744989419151904e-16, 3.63843665590734438546e-16,
+ 3.65403156156136995766e-16, 3.66967378058870090021e-16,
+ 3.68536495289491401456e-16, 3.70110674588289834952e-16,
+ 3.71690085582382297792e-16, 3.73274900927794352614e-16,
+ 3.74865296456848868882e-16, 3.76461451331202869131e-16,
+ 3.78063548200896037651e-16, 3.79671773369794425924e-16,
+ 3.81286316967837738238e-16, 3.82907373130524317507e-16,
+ 3.84535140186095955858e-16, 3.86169820850914927119e-16,
+ 3.87811622433558721164e-16, 3.89460757048192620674e-16,
+ 3.91117441837820542060e-16, 3.92781899208054153270e-16,
+ 3.94454357072087711446e-16, 3.96135049107613542983e-16,
+ 3.97824215026468259474e-16, 3.99522100857856502444e-16,
+ 4.01228959246062907451e-16, 4.02945049763632792393e-16,
+ 4.04670639241074995115e-16, 4.06406002114225038723e-16,
+ 4.08151420790493873480e-16, 4.09907186035326643447e-16,
+ 4.11673597380302570170e-16, 4.13450963554423599878e-16,
+ 4.15239602940268833891e-16, 4.17039844056831587498e-16,
+ 4.18852026071011229572e-16, 4.20676499339901510978e-16,
+ 4.22513625986204937320e-16, 4.24363780509307796137e-16,
+ 4.26227350434779809917e-16, 4.28104737005311666397e-16,
+ 4.29996355916383230161e-16, 4.31902638100262944617e-16,
+ 4.33824030562279080411e-16, 4.35760997273684900553e-16,
+ 4.37714020125858747008e-16, 4.39683599951052137423e-16,
+ 4.41670257615420348435e-16, 4.43674535190656726604e-16,
+ 4.45696997211204306674e-16, 4.47738232024753387312e-16,
+ 4.49798853244554968009e-16, 4.51879501313005876278e-16,
+ 4.53980845187003400947e-16, 4.56103584156742206384e-16,
+ 4.58248449810956667052e-16, 4.60416208163115281428e-16,
+ 4.62607661954784567754e-16, 4.64823653154320737780e-16,
+ 4.67065065671263059081e-16, 4.69332828309332890697e-16,
+ 4.71627917983835129766e-16, 4.73951363232586715165e-16,
+ 4.76304248053313737663e-16, 4.78687716104872284247e-16,
+ 4.81102975314741720538e-16, 4.83551302941152515162e-16,
+ 4.86034051145081195402e-16, 4.88552653135360343280e-16,
+ 4.91108629959526955862e-16, 4.93703598024033454728e-16,
+ 4.96339277440398725619e-16, 4.99017501309182245754e-16,
+ 5.01740226071808946011e-16, 5.04509543081872748637e-16,
+ 5.07327691573354207058e-16, 5.10197073234156184149e-16,
+ 5.13120268630678373200e-16, 5.16100055774322824569e-16,
+ 5.19139431175769859873e-16, 5.22241633800023428760e-16,
+ 5.25410172417759732697e-16, 5.28648856950494511482e-16,
+ 5.31961834533840037535e-16, 5.35353631181649688145e-16,
+ 5.38829200133405320160e-16, 5.42393978220171234073e-16,
+ 5.46053951907478041166e-16, 5.49815735089281410703e-16,
+ 5.53686661246787600374e-16, 5.57674893292657647836e-16,
+ 5.61789555355541665830e-16, 5.66040892008242216739e-16,
+ 5.70440462129138908417e-16, 5.75001376891989523684e-16,
+ 5.79738594572459365014e-16, 5.84669289345547900201e-16,
+ 5.89813317647789942685e-16, 5.95193814964144415532e-16,
+ 6.00837969627190832234e-16, 6.06778040933344851394e-16,
+ 6.13052720872528159123e-16, 6.19708989458162555387e-16,
+ 6.26804696330128439415e-16, 6.34412240712750598627e-16,
+ 6.42623965954805540945e-16, 6.51560331734499356881e-16,
+ 6.61382788509766415145e-16, 6.72315046250558662913e-16,
+ 6.84680341756425875856e-16, 6.98971833638761995415e-16,
+ 7.15999493483066421560e-16, 7.37242430179879890722e-16,
+ 7.65893637080557275482e-16, 8.11384933765648418565e-16};
+
+static const double fi_double[] = {
+ 1.00000000000000000000e+00, 9.77101701267671596263e-01,
+ 9.59879091800106665211e-01, 9.45198953442299649730e-01,
+ 9.32060075959230460718e-01, 9.19991505039347012840e-01,
+ 9.08726440052130879366e-01, 8.98095921898343418910e-01,
+ 8.87984660755833377088e-01, 8.78309655808917399966e-01,
+ 8.69008688036857046555e-01, 8.60033621196331532488e-01,
+ 8.51346258458677951353e-01, 8.42915653112204177333e-01,
+ 8.34716292986883434679e-01, 8.26726833946221373317e-01,
+ 8.18929191603702366642e-01, 8.11307874312656274185e-01,
+ 8.03849483170964274059e-01, 7.96542330422958966274e-01,
+ 7.89376143566024590648e-01, 7.82341832654802504798e-01,
+ 7.75431304981187174974e-01, 7.68637315798486264740e-01,
+ 7.61953346836795386565e-01, 7.55373506507096115214e-01,
+ 7.48892447219156820459e-01, 7.42505296340151055290e-01,
+ 7.36207598126862650112e-01, 7.29995264561476231435e-01,
+ 7.23864533468630222401e-01, 7.17811932630721960535e-01,
+ 7.11834248878248421200e-01, 7.05928501332754310127e-01,
+ 7.00091918136511615067e-01, 6.94321916126116711609e-01,
+ 6.88616083004671808432e-01, 6.82972161644994857355e-01,
+ 6.77388036218773526009e-01, 6.71861719897082099173e-01,
+ 6.66391343908750100056e-01, 6.60975147776663107813e-01,
+ 6.55611470579697264149e-01, 6.50298743110816701574e-01,
+ 6.45035480820822293424e-01, 6.39820277453056585060e-01,
+ 6.34651799287623608059e-01, 6.29528779924836690007e-01,
+ 6.24450015547026504592e-01, 6.19414360605834324325e-01,
+ 6.14420723888913888899e-01, 6.09468064925773433949e-01,
+ 6.04555390697467776029e-01, 5.99681752619125263415e-01,
+ 5.94846243767987448159e-01, 5.90047996332826008015e-01,
+ 5.85286179263371453274e-01, 5.80559996100790898232e-01,
+ 5.75868682972353718164e-01, 5.71211506735253227163e-01,
+ 5.66587763256164445025e-01, 5.61996775814524340831e-01,
+ 5.57437893618765945014e-01, 5.52910490425832290562e-01,
+ 5.48413963255265812791e-01, 5.43947731190026262382e-01,
+ 5.39511234256952132426e-01, 5.35103932380457614215e-01,
+ 5.30725304403662057062e-01, 5.26374847171684479008e-01,
+ 5.22052074672321841931e-01, 5.17756517229756352272e-01,
+ 5.13487720747326958914e-01, 5.09245245995747941592e-01,
+ 5.05028667943468123624e-01, 5.00837575126148681903e-01,
+ 4.96671569052489714213e-01, 4.92530263643868537748e-01,
+ 4.88413284705458028423e-01, 4.84320269426683325253e-01,
+ 4.80250865909046753544e-01, 4.76204732719505863248e-01,
+ 4.72181538467730199660e-01, 4.68180961405693596422e-01,
+ 4.64202689048174355069e-01, 4.60246417812842867345e-01,
+ 4.56311852678716434184e-01, 4.52398706861848520777e-01,
+ 4.48506701507203064949e-01, 4.44635565395739396077e-01,
+ 4.40785034665803987508e-01, 4.36954852547985550526e-01,
+ 4.33144769112652261445e-01, 4.29354541029441427735e-01,
+ 4.25583931338021970170e-01, 4.21832709229495894654e-01,
+ 4.18100649837848226120e-01, 4.14387534040891125642e-01,
+ 4.10693148270188157500e-01, 4.07017284329473372217e-01,
+ 4.03359739221114510510e-01, 3.99720314980197222177e-01,
+ 3.96098818515832451492e-01, 3.92495061459315619512e-01,
+ 3.88908860018788715696e-01, 3.85340034840077283462e-01,
+ 3.81788410873393657674e-01, 3.78253817245619183840e-01,
+ 3.74736087137891138443e-01, 3.71235057668239498696e-01,
+ 3.67750569779032587814e-01, 3.64282468129004055601e-01,
+ 3.60830600989648031529e-01, 3.57394820145780500731e-01,
+ 3.53974980800076777232e-01, 3.50570941481406106455e-01,
+ 3.47182563956793643900e-01, 3.43809713146850715049e-01,
+ 3.40452257044521866547e-01, 3.37110066637006045021e-01,
+ 3.33783015830718454708e-01, 3.30470981379163586400e-01,
+ 3.27173842813601400970e-01, 3.23891482376391093290e-01,
+ 3.20623784956905355514e-01, 3.17370638029913609834e-01,
+ 3.14131931596337177215e-01, 3.10907558126286509559e-01,
+ 3.07697412504292056035e-01, 3.04501391976649993243e-01,
+ 3.01319396100803049698e-01, 2.98151326696685481377e-01,
+ 2.94997087799961810184e-01, 2.91856585617095209972e-01,
+ 2.88729728482182923521e-01, 2.85616426815501756042e-01,
+ 2.82516593083707578948e-01, 2.79430141761637940157e-01,
+ 2.76356989295668320494e-01, 2.73297054068577072172e-01,
+ 2.70250256365875463072e-01, 2.67216518343561471038e-01,
+ 2.64195763997261190426e-01, 2.61187919132721213522e-01,
+ 2.58192911337619235290e-01, 2.55210669954661961700e-01,
+ 2.52241126055942177508e-01, 2.49284212418528522415e-01,
+ 2.46339863501263828249e-01, 2.43408015422750312329e-01,
+ 2.40488605940500588254e-01, 2.37581574431238090606e-01,
+ 2.34686861872330010392e-01, 2.31804410824338724684e-01,
+ 2.28934165414680340644e-01, 2.26076071322380278694e-01,
+ 2.23230075763917484855e-01, 2.20396127480151998723e-01,
+ 2.17574176724331130872e-01, 2.14764175251173583536e-01,
+ 2.11966076307030182324e-01, 2.09179834621125076977e-01,
+ 2.06405406397880797353e-01, 2.03642749310334908452e-01,
+ 2.00891822494656591136e-01, 1.98152586545775138971e-01,
+ 1.95425003514134304483e-01, 1.92709036903589175926e-01,
+ 1.90004651670464985713e-01, 1.87311814223800304768e-01,
+ 1.84630492426799269756e-01, 1.81960655599522513892e-01,
+ 1.79302274522847582272e-01, 1.76655321443734858455e-01,
+ 1.74019770081838553999e-01, 1.71395595637505754327e-01,
+ 1.68782774801211288285e-01, 1.66181285764481906364e-01,
+ 1.63591108232365584074e-01, 1.61012223437511009516e-01,
+ 1.58444614155924284882e-01, 1.55888264724479197465e-01,
+ 1.53343161060262855866e-01, 1.50809290681845675763e-01,
+ 1.48286642732574552861e-01, 1.45775208005994028060e-01,
+ 1.43274978973513461566e-01, 1.40785949814444699690e-01,
+ 1.38308116448550733057e-01, 1.35841476571253755301e-01,
+ 1.33386029691669155683e-01, 1.30941777173644358090e-01,
+ 1.28508722279999570981e-01, 1.26086870220185887081e-01,
+ 1.23676228201596571932e-01, 1.21276805484790306533e-01,
+ 1.18888613442910059947e-01, 1.16511665625610869035e-01,
+ 1.14145977827838487895e-01, 1.11791568163838089811e-01,
+ 1.09448457146811797824e-01, 1.07116667774683801961e-01,
+ 1.04796225622487068629e-01, 1.02487158941935246892e-01,
+ 1.00189498768810017482e-01, 9.79032790388624646338e-02,
+ 9.56285367130089991594e-02, 9.33653119126910124859e-02,
+ 9.11136480663737591268e-02, 8.88735920682758862021e-02,
+ 8.66451944505580717859e-02, 8.44285095703534715916e-02,
+ 8.22235958132029043366e-02, 8.00305158146630696292e-02,
+ 7.78493367020961224423e-02, 7.56801303589271778804e-02,
+ 7.35229737139813238622e-02, 7.13779490588904025339e-02,
+ 6.92451443970067553879e-02, 6.71246538277884968737e-02,
+ 6.50165779712428976156e-02, 6.29210244377581412456e-02,
+ 6.08381083495398780614e-02, 5.87679529209337372930e-02,
+ 5.67106901062029017391e-02, 5.46664613248889208474e-02,
+ 5.26354182767921896513e-02, 5.06177238609477817000e-02,
+ 4.86135532158685421122e-02, 4.66230949019303814174e-02,
+ 4.46465522512944634759e-02, 4.26841449164744590750e-02,
+ 4.07361106559409394401e-02, 3.88027074045261474722e-02,
+ 3.68842156885673053135e-02, 3.49809414617161251737e-02,
+ 3.30932194585785779961e-02, 3.12214171919203004046e-02,
+ 2.93659397581333588001e-02, 2.75272356696031131329e-02,
+ 2.57058040085489103443e-02, 2.39022033057958785407e-02,
+ 2.21170627073088502113e-02, 2.03510962300445102935e-02,
+ 1.86051212757246224594e-02, 1.68800831525431419000e-02,
+ 1.51770883079353092332e-02, 1.34974506017398673818e-02,
+ 1.18427578579078790488e-02, 1.02149714397014590439e-02,
+ 8.61658276939872638800e-03, 7.05087547137322242369e-03,
+ 5.52240329925099155545e-03, 4.03797259336302356153e-03,
+ 2.60907274610215926189e-03, 1.26028593049859797236e-03};
+
+static const uint32_t ki_float[] = {
+ 0x007799ECUL, 0x00000000UL, 0x006045F5UL, 0x006D1AA8UL, 0x00728FB4UL,
+ 0x007592AFUL, 0x00777A5CUL, 0x0078CA38UL, 0x0079BF6BUL, 0x007A7A35UL,
+ 0x007B0D2FUL, 0x007B83D4UL, 0x007BE597UL, 0x007C3788UL, 0x007C7D33UL,
+ 0x007CB926UL, 0x007CED48UL, 0x007D1B08UL, 0x007D437FUL, 0x007D678BUL,
+ 0x007D87DBUL, 0x007DA4FCUL, 0x007DBF61UL, 0x007DD767UL, 0x007DED5DUL,
+ 0x007E0183UL, 0x007E1411UL, 0x007E2534UL, 0x007E3515UL, 0x007E43D5UL,
+ 0x007E5193UL, 0x007E5E67UL, 0x007E6A69UL, 0x007E75AAUL, 0x007E803EUL,
+ 0x007E8A32UL, 0x007E9395UL, 0x007E9C72UL, 0x007EA4D5UL, 0x007EACC6UL,
+ 0x007EB44EUL, 0x007EBB75UL, 0x007EC243UL, 0x007EC8BCUL, 0x007ECEE8UL,
+ 0x007ED4CCUL, 0x007EDA6BUL, 0x007EDFCBUL, 0x007EE4EFUL, 0x007EE9DCUL,
+ 0x007EEE94UL, 0x007EF31BUL, 0x007EF774UL, 0x007EFBA0UL, 0x007EFFA3UL,
+ 0x007F037FUL, 0x007F0736UL, 0x007F0ACAUL, 0x007F0E3CUL, 0x007F118FUL,
+ 0x007F14C4UL, 0x007F17DCUL, 0x007F1ADAUL, 0x007F1DBDUL, 0x007F2087UL,
+ 0x007F233AUL, 0x007F25D7UL, 0x007F285DUL, 0x007F2AD0UL, 0x007F2D2EUL,
+ 0x007F2F7AUL, 0x007F31B3UL, 0x007F33DCUL, 0x007F35F3UL, 0x007F37FBUL,
+ 0x007F39F3UL, 0x007F3BDCUL, 0x007F3DB7UL, 0x007F3F84UL, 0x007F4145UL,
+ 0x007F42F8UL, 0x007F449FUL, 0x007F463AUL, 0x007F47CAUL, 0x007F494EUL,
+ 0x007F4AC8UL, 0x007F4C38UL, 0x007F4D9DUL, 0x007F4EF9UL, 0x007F504CUL,
+ 0x007F5195UL, 0x007F52D5UL, 0x007F540DUL, 0x007F553DUL, 0x007F5664UL,
+ 0x007F5784UL, 0x007F589CUL, 0x007F59ACUL, 0x007F5AB5UL, 0x007F5BB8UL,
+ 0x007F5CB3UL, 0x007F5DA8UL, 0x007F5E96UL, 0x007F5F7EUL, 0x007F605FUL,
+ 0x007F613BUL, 0x007F6210UL, 0x007F62E0UL, 0x007F63AAUL, 0x007F646FUL,
+ 0x007F652EUL, 0x007F65E8UL, 0x007F669CUL, 0x007F674CUL, 0x007F67F6UL,
+ 0x007F689CUL, 0x007F693CUL, 0x007F69D9UL, 0x007F6A70UL, 0x007F6B03UL,
+ 0x007F6B91UL, 0x007F6C1BUL, 0x007F6CA0UL, 0x007F6D21UL, 0x007F6D9EUL,
+ 0x007F6E17UL, 0x007F6E8CUL, 0x007F6EFCUL, 0x007F6F68UL, 0x007F6FD1UL,
+ 0x007F7035UL, 0x007F7096UL, 0x007F70F3UL, 0x007F714CUL, 0x007F71A1UL,
+ 0x007F71F2UL, 0x007F723FUL, 0x007F7289UL, 0x007F72CFUL, 0x007F7312UL,
+ 0x007F7350UL, 0x007F738BUL, 0x007F73C3UL, 0x007F73F6UL, 0x007F7427UL,
+ 0x007F7453UL, 0x007F747CUL, 0x007F74A1UL, 0x007F74C3UL, 0x007F74E0UL,
+ 0x007F74FBUL, 0x007F7511UL, 0x007F7524UL, 0x007F7533UL, 0x007F753FUL,
+ 0x007F7546UL, 0x007F754AUL, 0x007F754BUL, 0x007F7547UL, 0x007F753FUL,
+ 0x007F7534UL, 0x007F7524UL, 0x007F7511UL, 0x007F74F9UL, 0x007F74DEUL,
+ 0x007F74BEUL, 0x007F749AUL, 0x007F7472UL, 0x007F7445UL, 0x007F7414UL,
+ 0x007F73DFUL, 0x007F73A5UL, 0x007F7366UL, 0x007F7323UL, 0x007F72DAUL,
+ 0x007F728DUL, 0x007F723AUL, 0x007F71E3UL, 0x007F7186UL, 0x007F7123UL,
+ 0x007F70BBUL, 0x007F704DUL, 0x007F6FD9UL, 0x007F6F5FUL, 0x007F6EDFUL,
+ 0x007F6E58UL, 0x007F6DCBUL, 0x007F6D37UL, 0x007F6C9CUL, 0x007F6BF9UL,
+ 0x007F6B4FUL, 0x007F6A9CUL, 0x007F69E2UL, 0x007F691FUL, 0x007F6854UL,
+ 0x007F677FUL, 0x007F66A1UL, 0x007F65B8UL, 0x007F64C6UL, 0x007F63C8UL,
+ 0x007F62C0UL, 0x007F61ABUL, 0x007F608AUL, 0x007F5F5DUL, 0x007F5E21UL,
+ 0x007F5CD8UL, 0x007F5B7FUL, 0x007F5A17UL, 0x007F589EUL, 0x007F5713UL,
+ 0x007F5575UL, 0x007F53C4UL, 0x007F51FEUL, 0x007F5022UL, 0x007F4E2FUL,
+ 0x007F4C22UL, 0x007F49FAUL, 0x007F47B6UL, 0x007F4553UL, 0x007F42CFUL,
+ 0x007F4028UL, 0x007F3D5AUL, 0x007F3A64UL, 0x007F3741UL, 0x007F33EDUL,
+ 0x007F3065UL, 0x007F2CA4UL, 0x007F28A4UL, 0x007F245FUL, 0x007F1FCEUL,
+ 0x007F1AEAUL, 0x007F15A9UL, 0x007F1000UL, 0x007F09E4UL, 0x007F0346UL,
+ 0x007EFC16UL, 0x007EF43EUL, 0x007EEBA8UL, 0x007EE237UL, 0x007ED7C8UL,
+ 0x007ECC2FUL, 0x007EBF37UL, 0x007EB09DUL, 0x007EA00AUL, 0x007E8D0DUL,
+ 0x007E7710UL, 0x007E5D47UL, 0x007E3E93UL, 0x007E1959UL, 0x007DEB2CUL,
+ 0x007DB036UL, 0x007D6203UL, 0x007CF4B9UL, 0x007C4FD2UL, 0x007B3630UL,
+ 0x0078D2D2UL};
+
+static const float wi_float[] = {
+ 4.66198677960027669255e-07f, 2.56588335019207033255e-08f,
+ 3.41146697750176784592e-08f, 4.00230311410932959821e-08f,
+ 4.47179475877737745459e-08f, 4.86837785973537366722e-08f,
+ 5.21562578925932412861e-08f, 5.52695199001886257153e-08f,
+ 5.81078488992733116465e-08f, 6.07279932024587421409e-08f,
+ 6.31701613261172047795e-08f, 6.54639842900233842742e-08f,
+ 6.76319905583641815324e-08f, 6.96917493470166688656e-08f,
+ 7.16572544283857476692e-08f, 7.35398519048393832969e-08f,
+ 7.53488822443557479279e-08f, 7.70921367281667127885e-08f,
+ 7.87761895947956022626e-08f, 8.04066446825615346857e-08f,
+ 8.19883218760237408659e-08f, 8.35254002936857088917e-08f,
+ 8.50215298165053411740e-08f, 8.64799190652369040985e-08f,
+ 8.79034055989140110861e-08f, 8.92945125124233511541e-08f,
+ 9.06554945027956262312e-08f, 9.19883756905278607229e-08f,
+ 9.32949809202232869780e-08f, 9.45769618559625849039e-08f,
+ 9.58358188855612866442e-08f, 9.70729196232813152662e-08f,
+ 9.82895146313061088986e-08f, 9.94867508514382224721e-08f,
+ 1.00665683139461669691e-07f, 1.01827284217853923044e-07f,
+ 1.02972453302539369464e-07f, 1.04102023612124921572e-07f,
+ 1.05216768930574060431e-07f, 1.06317409364335657741e-07f,
+ 1.07404616410877866490e-07f, 1.08479017436113134283e-07f,
+ 1.09541199642370962438e-07f, 1.10591713595628691212e-07f,
+ 1.11631076370069356306e-07f, 1.12659774359245895023e-07f,
+ 1.13678265795837113569e-07f, 1.14686983015899673063e-07f,
+ 1.15686334498432158725e-07f, 1.16676706706789039179e-07f,
+ 1.17658465754873988919e-07f, 1.18631958917986203582e-07f,
+ 1.19597516005596215528e-07f, 1.20555450611113917226e-07f,
+ 1.21506061251817163689e-07f, 1.22449632410483948386e-07f,
+ 1.23386435488872536840e-07f, 1.24316729681986364321e-07f,
+ 1.25240762781015530062e-07f, 1.26158771911939892267e-07f,
+ 1.27070984215989333455e-07f, 1.27977617477468922011e-07f,
+ 1.28878880703854958297e-07f, 1.29774974662539874521e-07f,
+ 1.30666092378141980504e-07f, 1.31552419593887221722e-07f,
+ 1.32434135200211397569e-07f, 1.33311411633413359243e-07f,
+ 1.34184415246907777059e-07f, 1.35053306657377859830e-07f,
+ 1.35918241067904315860e-07f, 1.36779368569952053923e-07f,
+ 1.37636834425917531047e-07f, 1.38490779333783508675e-07f,
+ 1.39341339675287344817e-07f, 1.40188647748881762555e-07f,
+ 1.41032831988654882776e-07f, 1.41874017170273235693e-07f,
+ 1.42712324604921442006e-07f, 1.43547872322127921816e-07f,
+ 1.44380775242292721080e-07f, 1.45211145339665544509e-07f,
+ 1.46039091796461362146e-07f, 1.46864721148745476208e-07f,
+ 1.47688137424670065700e-07f, 1.48509442275598857119e-07f,
+ 1.49328735100614641423e-07f, 1.50146113164867617390e-07f,
+ 1.50961671712187416111e-07f, 1.51775504072350982845e-07f,
+ 1.52587701763369746341e-07f, 1.53398354589133671168e-07f,
+ 1.54207550732725568797e-07f, 1.55015376845697999657e-07f,
+ 1.55821918133584372604e-07f, 1.56627258437898192833e-07f,
+ 1.57431480314857468671e-07f, 1.58234665111056041043e-07f,
+ 1.59036893036289199880e-07f, 1.59838243233728855017e-07f,
+ 1.60638793847630850137e-07f, 1.61438622088746393909e-07f,
+ 1.62237804297600106296e-07f, 1.63036416005787357730e-07f,
+ 1.63834531995435479082e-07f, 1.64632226356965902954e-07f,
+ 1.65429572545287097020e-07f, 1.66226643434541294491e-07f,
+ 1.67023511371523209274e-07f, 1.67820248227882200051e-07f,
+ 1.68616925451215588827e-07f, 1.69413614115155757272e-07f,
+ 1.70210384968549673733e-07f, 1.71007308483826142122e-07f,
+ 1.71804454904642543391e-07f, 1.72601894292900061024e-07f,
+ 1.73399696575213681990e-07f, 1.74197931588920988271e-07f,
+ 1.74996669127712165834e-07f, 1.75795978986961275677e-07f,
+ 1.76595931008838063924e-07f, 1.77396595127278238022e-07f,
+ 1.78198041412889183130e-07f, 1.79000340117867431104e-07f,
+ 1.79803561721004406185e-07f, 1.80607776972855859813e-07f,
+ 1.81413056941151359868e-07f, 1.82219473056520464354e-07f,
+ 1.83027097158612474240e-07f, 1.83836001542687613069e-07f,
+ 1.84646259006759307383e-07f, 1.85457942899367347876e-07f,
+ 1.86271127168064649331e-07f, 1.87085886408701333260e-07f,
+ 1.87902295915592424729e-07f, 1.88720431732658022414e-07f,
+ 1.89540370705627262627e-07f, 1.90362190535400839128e-07f,
+ 1.91185969832669990437e-07f, 1.92011788173893651535e-07f,
+ 1.92839726158739913768e-07f, 1.93669865469102145482e-07f,
+ 1.94502288929804890433e-07f, 1.95337080571120616772e-07f,
+ 1.96174325693223683314e-07f, 1.97014110932714374919e-07f,
+ 1.97856524331352952716e-07f, 1.98701655407150388211e-07f,
+ 1.99549595227971635348e-07f, 2.00400436487814600236e-07f,
+ 2.01254273585938820883e-07f, 2.02111202709026498408e-07f,
+ 2.02971321916571014951e-07f, 2.03834731229698846698e-07f,
+ 2.04701532723644121196e-07f, 2.05571830624108885378e-07f,
+ 2.06445731407757185541e-07f, 2.07323343907107312957e-07f,
+ 2.08204779420104330037e-07f, 2.09090151824673600213e-07f,
+ 2.09979577698577670508e-07f, 2.10873176444920111011e-07f,
+ 2.11771070423665379388e-07f, 2.12673385089569268965e-07f,
+ 2.13580249136944118603e-07f, 2.14491794651713402832e-07f,
+ 2.15408157271244625533e-07f, 2.16329476352486921685e-07f,
+ 2.17255895148978920488e-07f, 2.18187560997337924713e-07f,
+ 2.19124625513888206785e-07f, 2.20067244802139479285e-07f,
+ 2.21015579671883851683e-07f, 2.21969795870742159701e-07f,
+ 2.22930064329060010376e-07f, 2.23896561419128954210e-07f,
+ 2.24869469229791575583e-07f, 2.25848975857580322189e-07f,
+ 2.26835275715640744118e-07f, 2.27828569861799901001e-07f,
+ 2.28829066347263833069e-07f, 2.29836980587561823183e-07f,
+ 2.30852535757505260518e-07f, 2.31875963212094114516e-07f,
+ 2.32907502935486642699e-07f, 2.33947404020352726160e-07f,
+ 2.34995925180156140289e-07f, 2.36053335297164516378e-07f,
+ 2.37119914009265667728e-07f, 2.38195952338983970691e-07f,
+ 2.39281753368440712742e-07f, 2.40377632964396957621e-07f,
+ 2.41483920557958384709e-07f, 2.42600959984018662258e-07f,
+ 2.43729110386077326413e-07f, 2.44868747192698939290e-07f,
+ 2.46020263172594533433e-07f, 2.47184069576113545901e-07f,
+ 2.48360597371852893654e-07f, 2.49550298588131851232e-07f,
+ 2.50753647770270890721e-07f, 2.51971143565970967140e-07f,
+ 2.53203310452642767375e-07f, 2.54450700622322097890e-07f,
+ 2.55713896041856770961e-07f, 2.56993510708419870887e-07f,
+ 2.58290193123138874550e-07f, 2.59604629008804833146e-07f,
+ 2.60937544301314385690e-07f, 2.62289708448800566945e-07f,
+ 2.63661938057441759882e-07f, 2.65055100928844238758e-07f,
+ 2.66470120540847889467e-07f, 2.67907981031821866252e-07f,
+ 2.69369732758258246335e-07f, 2.70856498507068313229e-07f,
+ 2.72369480457841388042e-07f, 2.73909968006952220135e-07f,
+ 2.75479346585437289399e-07f, 2.77079107626811561009e-07f,
+ 2.78710859870496796972e-07f, 2.80376342222588603820e-07f,
+ 2.82077438439999912690e-07f, 2.83816193958769527230e-07f,
+ 2.85594835255375795814e-07f, 2.87415792215003905739e-07f,
+ 2.89281724087851835900e-07f, 2.91195549750371467233e-07f,
+ 2.93160483161771875581e-07f, 2.95180075129332912389e-07f,
+ 2.97258262785797916083e-07f, 2.99399428561531794298e-07f,
+ 3.01608470935804138388e-07f, 3.03890889921758510417e-07f,
+ 3.06252891144972267537e-07f, 3.08701513613258141075e-07f,
+ 3.11244787989714509378e-07f, 3.13891934589336184321e-07f,
+ 3.16653613755314681314e-07f, 3.19542246256559459667e-07f,
+ 3.22572428717978242099e-07f, 3.25761480217458181578e-07f,
+ 3.29130173358915628534e-07f, 3.32703730345002116955e-07f,
+ 3.36513208964639108346e-07f, 3.40597478255417943913e-07f,
+ 3.45006114675213401550e-07f, 3.49803789521323211592e-07f,
+ 3.55077180848341416206e-07f, 3.60946392031859609868e-07f,
+ 3.67584959507244041831e-07f, 3.75257645787954431030e-07f,
+ 3.84399301057791926300e-07f, 3.95804015855768440983e-07f,
+ 4.11186015434435801956e-07f, 4.35608969373823260746e-07f};
+
+static const float fi_float[] = {
+ 1.00000000000000000000e+00f, 9.77101701267671596263e-01f,
+ 9.59879091800106665211e-01f, 9.45198953442299649730e-01f,
+ 9.32060075959230460718e-01f, 9.19991505039347012840e-01f,
+ 9.08726440052130879366e-01f, 8.98095921898343418910e-01f,
+ 8.87984660755833377088e-01f, 8.78309655808917399966e-01f,
+ 8.69008688036857046555e-01f, 8.60033621196331532488e-01f,
+ 8.51346258458677951353e-01f, 8.42915653112204177333e-01f,
+ 8.34716292986883434679e-01f, 8.26726833946221373317e-01f,
+ 8.18929191603702366642e-01f, 8.11307874312656274185e-01f,
+ 8.03849483170964274059e-01f, 7.96542330422958966274e-01f,
+ 7.89376143566024590648e-01f, 7.82341832654802504798e-01f,
+ 7.75431304981187174974e-01f, 7.68637315798486264740e-01f,
+ 7.61953346836795386565e-01f, 7.55373506507096115214e-01f,
+ 7.48892447219156820459e-01f, 7.42505296340151055290e-01f,
+ 7.36207598126862650112e-01f, 7.29995264561476231435e-01f,
+ 7.23864533468630222401e-01f, 7.17811932630721960535e-01f,
+ 7.11834248878248421200e-01f, 7.05928501332754310127e-01f,
+ 7.00091918136511615067e-01f, 6.94321916126116711609e-01f,
+ 6.88616083004671808432e-01f, 6.82972161644994857355e-01f,
+ 6.77388036218773526009e-01f, 6.71861719897082099173e-01f,
+ 6.66391343908750100056e-01f, 6.60975147776663107813e-01f,
+ 6.55611470579697264149e-01f, 6.50298743110816701574e-01f,
+ 6.45035480820822293424e-01f, 6.39820277453056585060e-01f,
+ 6.34651799287623608059e-01f, 6.29528779924836690007e-01f,
+ 6.24450015547026504592e-01f, 6.19414360605834324325e-01f,
+ 6.14420723888913888899e-01f, 6.09468064925773433949e-01f,
+ 6.04555390697467776029e-01f, 5.99681752619125263415e-01f,
+ 5.94846243767987448159e-01f, 5.90047996332826008015e-01f,
+ 5.85286179263371453274e-01f, 5.80559996100790898232e-01f,
+ 5.75868682972353718164e-01f, 5.71211506735253227163e-01f,
+ 5.66587763256164445025e-01f, 5.61996775814524340831e-01f,
+ 5.57437893618765945014e-01f, 5.52910490425832290562e-01f,
+ 5.48413963255265812791e-01f, 5.43947731190026262382e-01f,
+ 5.39511234256952132426e-01f, 5.35103932380457614215e-01f,
+ 5.30725304403662057062e-01f, 5.26374847171684479008e-01f,
+ 5.22052074672321841931e-01f, 5.17756517229756352272e-01f,
+ 5.13487720747326958914e-01f, 5.09245245995747941592e-01f,
+ 5.05028667943468123624e-01f, 5.00837575126148681903e-01f,
+ 4.96671569052489714213e-01f, 4.92530263643868537748e-01f,
+ 4.88413284705458028423e-01f, 4.84320269426683325253e-01f,
+ 4.80250865909046753544e-01f, 4.76204732719505863248e-01f,
+ 4.72181538467730199660e-01f, 4.68180961405693596422e-01f,
+ 4.64202689048174355069e-01f, 4.60246417812842867345e-01f,
+ 4.56311852678716434184e-01f, 4.52398706861848520777e-01f,
+ 4.48506701507203064949e-01f, 4.44635565395739396077e-01f,
+ 4.40785034665803987508e-01f, 4.36954852547985550526e-01f,
+ 4.33144769112652261445e-01f, 4.29354541029441427735e-01f,
+ 4.25583931338021970170e-01f, 4.21832709229495894654e-01f,
+ 4.18100649837848226120e-01f, 4.14387534040891125642e-01f,
+ 4.10693148270188157500e-01f, 4.07017284329473372217e-01f,
+ 4.03359739221114510510e-01f, 3.99720314980197222177e-01f,
+ 3.96098818515832451492e-01f, 3.92495061459315619512e-01f,
+ 3.88908860018788715696e-01f, 3.85340034840077283462e-01f,
+ 3.81788410873393657674e-01f, 3.78253817245619183840e-01f,
+ 3.74736087137891138443e-01f, 3.71235057668239498696e-01f,
+ 3.67750569779032587814e-01f, 3.64282468129004055601e-01f,
+ 3.60830600989648031529e-01f, 3.57394820145780500731e-01f,
+ 3.53974980800076777232e-01f, 3.50570941481406106455e-01f,
+ 3.47182563956793643900e-01f, 3.43809713146850715049e-01f,
+ 3.40452257044521866547e-01f, 3.37110066637006045021e-01f,
+ 3.33783015830718454708e-01f, 3.30470981379163586400e-01f,
+ 3.27173842813601400970e-01f, 3.23891482376391093290e-01f,
+ 3.20623784956905355514e-01f, 3.17370638029913609834e-01f,
+ 3.14131931596337177215e-01f, 3.10907558126286509559e-01f,
+ 3.07697412504292056035e-01f, 3.04501391976649993243e-01f,
+ 3.01319396100803049698e-01f, 2.98151326696685481377e-01f,
+ 2.94997087799961810184e-01f, 2.91856585617095209972e-01f,
+ 2.88729728482182923521e-01f, 2.85616426815501756042e-01f,
+ 2.82516593083707578948e-01f, 2.79430141761637940157e-01f,
+ 2.76356989295668320494e-01f, 2.73297054068577072172e-01f,
+ 2.70250256365875463072e-01f, 2.67216518343561471038e-01f,
+ 2.64195763997261190426e-01f, 2.61187919132721213522e-01f,
+ 2.58192911337619235290e-01f, 2.55210669954661961700e-01f,
+ 2.52241126055942177508e-01f, 2.49284212418528522415e-01f,
+ 2.46339863501263828249e-01f, 2.43408015422750312329e-01f,
+ 2.40488605940500588254e-01f, 2.37581574431238090606e-01f,
+ 2.34686861872330010392e-01f, 2.31804410824338724684e-01f,
+ 2.28934165414680340644e-01f, 2.26076071322380278694e-01f,
+ 2.23230075763917484855e-01f, 2.20396127480151998723e-01f,
+ 2.17574176724331130872e-01f, 2.14764175251173583536e-01f,
+ 2.11966076307030182324e-01f, 2.09179834621125076977e-01f,
+ 2.06405406397880797353e-01f, 2.03642749310334908452e-01f,
+ 2.00891822494656591136e-01f, 1.98152586545775138971e-01f,
+ 1.95425003514134304483e-01f, 1.92709036903589175926e-01f,
+ 1.90004651670464985713e-01f, 1.87311814223800304768e-01f,
+ 1.84630492426799269756e-01f, 1.81960655599522513892e-01f,
+ 1.79302274522847582272e-01f, 1.76655321443734858455e-01f,
+ 1.74019770081838553999e-01f, 1.71395595637505754327e-01f,
+ 1.68782774801211288285e-01f, 1.66181285764481906364e-01f,
+ 1.63591108232365584074e-01f, 1.61012223437511009516e-01f,
+ 1.58444614155924284882e-01f, 1.55888264724479197465e-01f,
+ 1.53343161060262855866e-01f, 1.50809290681845675763e-01f,
+ 1.48286642732574552861e-01f, 1.45775208005994028060e-01f,
+ 1.43274978973513461566e-01f, 1.40785949814444699690e-01f,
+ 1.38308116448550733057e-01f, 1.35841476571253755301e-01f,
+ 1.33386029691669155683e-01f, 1.30941777173644358090e-01f,
+ 1.28508722279999570981e-01f, 1.26086870220185887081e-01f,
+ 1.23676228201596571932e-01f, 1.21276805484790306533e-01f,
+ 1.18888613442910059947e-01f, 1.16511665625610869035e-01f,
+ 1.14145977827838487895e-01f, 1.11791568163838089811e-01f,
+ 1.09448457146811797824e-01f, 1.07116667774683801961e-01f,
+ 1.04796225622487068629e-01f, 1.02487158941935246892e-01f,
+ 1.00189498768810017482e-01f, 9.79032790388624646338e-02f,
+ 9.56285367130089991594e-02f, 9.33653119126910124859e-02f,
+ 9.11136480663737591268e-02f, 8.88735920682758862021e-02f,
+ 8.66451944505580717859e-02f, 8.44285095703534715916e-02f,
+ 8.22235958132029043366e-02f, 8.00305158146630696292e-02f,
+ 7.78493367020961224423e-02f, 7.56801303589271778804e-02f,
+ 7.35229737139813238622e-02f, 7.13779490588904025339e-02f,
+ 6.92451443970067553879e-02f, 6.71246538277884968737e-02f,
+ 6.50165779712428976156e-02f, 6.29210244377581412456e-02f,
+ 6.08381083495398780614e-02f, 5.87679529209337372930e-02f,
+ 5.67106901062029017391e-02f, 5.46664613248889208474e-02f,
+ 5.26354182767921896513e-02f, 5.06177238609477817000e-02f,
+ 4.86135532158685421122e-02f, 4.66230949019303814174e-02f,
+ 4.46465522512944634759e-02f, 4.26841449164744590750e-02f,
+ 4.07361106559409394401e-02f, 3.88027074045261474722e-02f,
+ 3.68842156885673053135e-02f, 3.49809414617161251737e-02f,
+ 3.30932194585785779961e-02f, 3.12214171919203004046e-02f,
+ 2.93659397581333588001e-02f, 2.75272356696031131329e-02f,
+ 2.57058040085489103443e-02f, 2.39022033057958785407e-02f,
+ 2.21170627073088502113e-02f, 2.03510962300445102935e-02f,
+ 1.86051212757246224594e-02f, 1.68800831525431419000e-02f,
+ 1.51770883079353092332e-02f, 1.34974506017398673818e-02f,
+ 1.18427578579078790488e-02f, 1.02149714397014590439e-02f,
+ 8.61658276939872638800e-03f, 7.05087547137322242369e-03f,
+ 5.52240329925099155545e-03f, 4.03797259336302356153e-03f,
+ 2.60907274610215926189e-03f, 1.26028593049859797236e-03f};
+
+static const uint64_t ke_double[] = {
+ 0x001C5214272497C6, 0x0000000000000000, 0x00137D5BD79C317E,
+ 0x00186EF58E3F3C10, 0x001A9BB7320EB0AE, 0x001BD127F719447C,
+ 0x001C951D0F88651A, 0x001D1BFE2D5C3972, 0x001D7E5BD56B18B2,
+ 0x001DC934DD172C70, 0x001E0409DFAC9DC8, 0x001E337B71D47836,
+ 0x001E5A8B177CB7A2, 0x001E7B42096F046C, 0x001E970DAF08AE3E,
+ 0x001EAEF5B14EF09E, 0x001EC3BD07B46556, 0x001ED5F6F08799CE,
+ 0x001EE614AE6E5688, 0x001EF46ECA361CD0, 0x001F014B76DDD4A4,
+ 0x001F0CE313A796B6, 0x001F176369F1F77A, 0x001F20F20C452570,
+ 0x001F29AE1951A874, 0x001F31B18FB95532, 0x001F39125157C106,
+ 0x001F3FE2EB6E694C, 0x001F463332D788FA, 0x001F4C10BF1D3A0E,
+ 0x001F51874C5C3322, 0x001F56A109C3ECC0, 0x001F5B66D9099996,
+ 0x001F5FE08210D08C, 0x001F6414DD445772, 0x001F6809F6859678,
+ 0x001F6BC52A2B02E6, 0x001F6F4B3D32E4F4, 0x001F72A07190F13A,
+ 0x001F75C8974D09D6, 0x001F78C71B045CC0, 0x001F7B9F12413FF4,
+ 0x001F7E5346079F8A, 0x001F80E63BE21138, 0x001F835A3DAD9162,
+ 0x001F85B16056B912, 0x001F87ED89B24262, 0x001F8A10759374FA,
+ 0x001F8C1BBA3D39AC, 0x001F8E10CC45D04A, 0x001F8FF102013E16,
+ 0x001F91BD968358E0, 0x001F9377AC47AFD8, 0x001F95204F8B64DA,
+ 0x001F96B878633892, 0x001F98410C968892, 0x001F99BAE146BA80,
+ 0x001F9B26BC697F00, 0x001F9C85561B717A, 0x001F9DD759CFD802,
+ 0x001F9F1D6761A1CE, 0x001FA058140936C0, 0x001FA187EB3A3338,
+ 0x001FA2AD6F6BC4FC, 0x001FA3C91ACE0682, 0x001FA4DB5FEE6AA2,
+ 0x001FA5E4AA4D097C, 0x001FA6E55EE46782, 0x001FA7DDDCA51EC4,
+ 0x001FA8CE7CE6A874, 0x001FA9B793CE5FEE, 0x001FAA9970ADB858,
+ 0x001FAB745E588232, 0x001FAC48A3740584, 0x001FAD1682BF9FE8,
+ 0x001FADDE3B5782C0, 0x001FAEA008F21D6C, 0x001FAF5C2418B07E,
+ 0x001FB012C25B7A12, 0x001FB0C41681DFF4, 0x001FB17050B6F1FA,
+ 0x001FB2179EB2963A, 0x001FB2BA2BDFA84A, 0x001FB358217F4E18,
+ 0x001FB3F1A6C9BE0C, 0x001FB486E10CACD6, 0x001FB517F3C793FC,
+ 0x001FB5A500C5FDAA, 0x001FB62E2837FE58, 0x001FB6B388C9010A,
+ 0x001FB7353FB50798, 0x001FB7B368DC7DA8, 0x001FB82E1ED6BA08,
+ 0x001FB8A57B0347F6, 0x001FB919959A0F74, 0x001FB98A85BA7204,
+ 0x001FB9F861796F26, 0x001FBA633DEEE286, 0x001FBACB2F41EC16,
+ 0x001FBB3048B49144, 0x001FBB929CAEA4E2, 0x001FBBF23CC8029E,
+ 0x001FBC4F39D22994, 0x001FBCA9A3E140D4, 0x001FBD018A548F9E,
+ 0x001FBD56FBDE729C, 0x001FBDAA068BD66A, 0x001FBDFAB7CB3F40,
+ 0x001FBE491C7364DE, 0x001FBE9540C9695E, 0x001FBEDF3086B128,
+ 0x001FBF26F6DE6174, 0x001FBF6C9E828AE2, 0x001FBFB031A904C4,
+ 0x001FBFF1BA0FFDB0, 0x001FC03141024588, 0x001FC06ECF5B54B2,
+ 0x001FC0AA6D8B1426, 0x001FC0E42399698A, 0x001FC11BF9298A64,
+ 0x001FC151F57D1942, 0x001FC1861F770F4A, 0x001FC1B87D9E74B4,
+ 0x001FC1E91620EA42, 0x001FC217EED505DE, 0x001FC2450D3C83FE,
+ 0x001FC27076864FC2, 0x001FC29A2F90630E, 0x001FC2C23CE98046,
+ 0x001FC2E8A2D2C6B4, 0x001FC30D654122EC, 0x001FC33087DE9C0E,
+ 0x001FC3520E0B7EC6, 0x001FC371FADF66F8, 0x001FC390512A2886,
+ 0x001FC3AD137497FA, 0x001FC3C844013348, 0x001FC3E1E4CCAB40,
+ 0x001FC3F9F78E4DA8, 0x001FC4107DB85060, 0x001FC4257877FD68,
+ 0x001FC438E8B5BFC6, 0x001FC44ACF15112A, 0x001FC45B2BF447E8,
+ 0x001FC469FF6C4504, 0x001FC477495001B2, 0x001FC483092BFBB8,
+ 0x001FC48D3E457FF6, 0x001FC495E799D21A, 0x001FC49D03DD30B0,
+ 0x001FC4A29179B432, 0x001FC4A68E8E07FC, 0x001FC4A8F8EBFB8C,
+ 0x001FC4A9CE16EA9E, 0x001FC4A90B41FA34, 0x001FC4A6AD4E28A0,
+ 0x001FC4A2B0C82E74, 0x001FC49D11E62DE2, 0x001FC495CC852DF4,
+ 0x001FC48CDC265EC0, 0x001FC4823BEC237A, 0x001FC475E696DEE6,
+ 0x001FC467D6817E82, 0x001FC458059DC036, 0x001FC4466D702E20,
+ 0x001FC433070BCB98, 0x001FC41DCB0D6E0E, 0x001FC406B196BBF6,
+ 0x001FC3EDB248CB62, 0x001FC3D2C43E593C, 0x001FC3B5DE0591B4,
+ 0x001FC396F599614C, 0x001FC376005A4592, 0x001FC352F3069370,
+ 0x001FC32DC1B22818, 0x001FC3065FBD7888, 0x001FC2DCBFCBF262,
+ 0x001FC2B0D3B99F9E, 0x001FC2828C8FFCF0, 0x001FC251DA79F164,
+ 0x001FC21EACB6D39E, 0x001FC1E8F18C6756, 0x001FC1B09637BB3C,
+ 0x001FC17586DCCD10, 0x001FC137AE74D6B6, 0x001FC0F6F6BB2414,
+ 0x001FC0B348184DA4, 0x001FC06C898BAFF0, 0x001FC022A092F364,
+ 0x001FBFD5710F72B8, 0x001FBF84DD29488E, 0x001FBF30C52FC60A,
+ 0x001FBED907770CC6, 0x001FBE7D80327DDA, 0x001FBE1E094BA614,
+ 0x001FBDBA7A354408, 0x001FBD52A7B9F826, 0x001FBCE663C6201A,
+ 0x001FBC757D2C4DE4, 0x001FBBFFBF63B7AA, 0x001FBB84F23FE6A2,
+ 0x001FBB04D9A0D18C, 0x001FBA7F351A70AC, 0x001FB9F3BF92B618,
+ 0x001FB9622ED4ABFC, 0x001FB8CA33174A16, 0x001FB82B76765B54,
+ 0x001FB7859C5B895C, 0x001FB6D840D55594, 0x001FB622F7D96942,
+ 0x001FB5654C6F37E0, 0x001FB49EBFBF69D2, 0x001FB3CEC803E746,
+ 0x001FB2F4CF539C3E, 0x001FB21032442852, 0x001FB1203E5A9604,
+ 0x001FB0243042E1C2, 0x001FAF1B31C479A6, 0x001FAE045767E104,
+ 0x001FACDE9DBF2D72, 0x001FABA8E640060A, 0x001FAA61F399FF28,
+ 0x001FA908656F66A2, 0x001FA79AB3508D3C, 0x001FA61726D1F214,
+ 0x001FA47BD48BEA00, 0x001FA2C693C5C094, 0x001FA0F4F47DF314,
+ 0x001F9F04336BBE0A, 0x001F9CF12B79F9BC, 0x001F9AB84415ABC4,
+ 0x001F98555B782FB8, 0x001F95C3ABD03F78, 0x001F92FDA9CEF1F2,
+ 0x001F8FFCDA9AE41C, 0x001F8CB99E7385F8, 0x001F892AEC479606,
+ 0x001F8545F904DB8E, 0x001F80FDC336039A, 0x001F7C427839E926,
+ 0x001F7700A3582ACC, 0x001F71200F1A241C, 0x001F6A8234B7352A,
+ 0x001F630000A8E266, 0x001F5A66904FE3C4, 0x001F50724ECE1172,
+ 0x001F44C7665C6FDA, 0x001F36E5A38A59A2, 0x001F26143450340A,
+ 0x001F113E047B0414, 0x001EF6AEFA57CBE6, 0x001ED38CA188151E,
+ 0x001EA2A61E122DB0, 0x001E5961C78B267C, 0x001DDDF62BAC0BB0,
+ 0x001CDB4DD9E4E8C0};
+
+static const double we_double[] = {
+ 9.655740063209182975e-16, 7.089014243955414331e-18,
+ 1.163941249669122378e-17, 1.524391512353216015e-17,
+ 1.833284885723743916e-17, 2.108965109464486630e-17,
+ 2.361128077843138196e-17, 2.595595772310893952e-17,
+ 2.816173554197752338e-17, 3.025504130321382330e-17,
+ 3.225508254836375280e-17, 3.417632340185027033e-17,
+ 3.602996978734452488e-17, 3.782490776869649048e-17,
+ 3.956832198097553231e-17, 4.126611778175946428e-17,
+ 4.292321808442525631e-17, 4.454377743282371417e-17,
+ 4.613133981483185932e-17, 4.768895725264635940e-17,
+ 4.921928043727962847e-17, 5.072462904503147014e-17,
+ 5.220704702792671737e-17, 5.366834661718192181e-17,
+ 5.511014372835094717e-17, 5.653388673239667134e-17,
+ 5.794088004852766616e-17, 5.933230365208943081e-17,
+ 6.070922932847179572e-17, 6.207263431163193485e-17,
+ 6.342341280303076511e-17, 6.476238575956142121e-17,
+ 6.609030925769405241e-17, 6.740788167872722244e-17,
+ 6.871574991183812442e-17, 7.001451473403929616e-17,
+ 7.130473549660643409e-17, 7.258693422414648352e-17,
+ 7.386159921381791997e-17, 7.512918820723728089e-17,
+ 7.639013119550825792e-17, 7.764483290797848102e-17,
+ 7.889367502729790548e-17, 8.013701816675454434e-17,
+ 8.137520364041762206e-17, 8.260855505210038174e-17,
+ 8.383737972539139383e-17, 8.506196999385323132e-17,
+ 8.628260436784112996e-17, 8.749954859216182511e-17,
+ 8.871305660690252281e-17, 8.992337142215357066e-17,
+ 9.113072591597909173e-17, 9.233534356381788123e-17,
+ 9.353743910649128938e-17, 9.473721916312949566e-17,
+ 9.593488279457997317e-17, 9.713062202221521206e-17,
+ 9.832462230649511362e-17, 9.951706298915071878e-17,
+ 1.007081177024294931e-16, 1.018979547484694078e-16,
+ 1.030867374515421954e-16, 1.042746244856188556e-16,
+ 1.054617701794576406e-16, 1.066483248011914702e-16,
+ 1.078344348241948498e-16, 1.090202431758350473e-16,
+ 1.102058894705578110e-16, 1.113915102286197502e-16,
+ 1.125772390816567488e-16, 1.137632069661684705e-16,
+ 1.149495423059009298e-16, 1.161363711840218308e-16,
+ 1.173238175059045788e-16, 1.185120031532669434e-16,
+ 1.197010481303465158e-16, 1.208910707027385520e-16,
+ 1.220821875294706151e-16, 1.232745137888415193e-16,
+ 1.244681632985112523e-16, 1.256632486302898513e-16,
+ 1.268598812200397542e-16, 1.280581714730749379e-16,
+ 1.292582288654119552e-16, 1.304601620412028847e-16,
+ 1.316640789066572582e-16, 1.328700867207380889e-16,
+ 1.340782921828999433e-16, 1.352888015181175458e-16,
+ 1.365017205594397770e-16, 1.377171548282880964e-16,
+ 1.389352096127063919e-16, 1.401559900437571538e-16,
+ 1.413796011702485188e-16, 1.426061480319665444e-16,
+ 1.438357357315790180e-16, 1.450684695053687684e-16,
+ 1.463044547929475721e-16, 1.475437973060951633e-16,
+ 1.487866030968626066e-16, 1.500329786250736949e-16,
+ 1.512830308253539427e-16, 1.525368671738125550e-16,
+ 1.537945957544996933e-16, 1.550563253257577148e-16,
+ 1.563221653865837505e-16, 1.575922262431176140e-16,
+ 1.588666190753684151e-16, 1.601454560042916733e-16,
+ 1.614288501593278662e-16, 1.627169157465130500e-16,
+ 1.640097681172717950e-16, 1.653075238380036909e-16,
+ 1.666103007605742067e-16, 1.679182180938228863e-16,
+ 1.692313964762022267e-16, 1.705499580496629830e-16,
+ 1.718740265349031656e-16, 1.732037273081008369e-16,
+ 1.745391874792533975e-16, 1.758805359722491379e-16,
+ 1.772279036068006489e-16, 1.785814231823732619e-16,
+ 1.799412295642463721e-16, 1.813074597718501559e-16,
+ 1.826802530695252266e-16, 1.840597510598587828e-16,
+ 1.854460977797569461e-16, 1.868394397994192684e-16,
+ 1.882399263243892051e-16, 1.896477093008616722e-16,
+ 1.910629435244376536e-16, 1.924857867525243818e-16,
+ 1.939163998205899420e-16, 1.953549467624909132e-16,
+ 1.968015949351037382e-16, 1.982565151475019047e-16,
+ 1.997198817949342081e-16, 2.011918729978734671e-16,
+ 2.026726707464198289e-16, 2.041624610503588774e-16,
+ 2.056614340951917875e-16, 2.071697844044737034e-16,
+ 2.086877110088159721e-16, 2.102154176219292789e-16,
+ 2.117531128241075913e-16, 2.133010102535779087e-16,
+ 2.148593288061663316e-16, 2.164282928437604723e-16,
+ 2.180081324120784027e-16, 2.195990834682870728e-16,
+ 2.212013881190495942e-16, 2.228152948696180545e-16,
+ 2.244410588846308588e-16, 2.260789422613173739e-16,
+ 2.277292143158621037e-16, 2.293921518837311354e-16,
+ 2.310680396348213318e-16, 2.327571704043534613e-16,
+ 2.344598455404957859e-16, 2.361763752697773994e-16,
+ 2.379070790814276700e-16, 2.396522861318623520e-16,
+ 2.414123356706293277e-16, 2.431875774892255956e-16,
+ 2.449783723943070217e-16, 2.467850927069288738e-16,
+ 2.486081227895851719e-16, 2.504478596029557040e-16,
+ 2.523047132944217013e-16, 2.541791078205812227e-16,
+ 2.560714816061770759e-16, 2.579822882420530896e-16,
+ 2.599119972249746917e-16, 2.618610947423924219e-16,
+ 2.638300845054942823e-16, 2.658194886341845120e-16,
+ 2.678298485979525166e-16, 2.698617262169488933e-16,
+ 2.719157047279818500e-16, 2.739923899205814823e-16,
+ 2.760924113487617126e-16, 2.782164236246436081e-16,
+ 2.803651078006983464e-16, 2.825391728480253184e-16,
+ 2.847393572388174091e-16, 2.869664306419817679e-16,
+ 2.892211957417995598e-16, 2.915044901905293183e-16,
+ 2.938171887070028633e-16, 2.961602053345465687e-16,
+ 2.985344958730045276e-16, 3.009410605012618141e-16,
+ 3.033809466085003416e-16, 3.058552518544860874e-16,
+ 3.083651274815310004e-16, 3.109117819034266344e-16,
+ 3.134964845996663118e-16, 3.161205703467105734e-16,
+ 3.187854438219713117e-16, 3.214925846206797361e-16,
+ 3.242435527309451638e-16, 3.270399945182240440e-16,
+ 3.298836492772283149e-16, 3.327763564171671408e-16,
+ 3.357200633553244075e-16, 3.387168342045505162e-16,
+ 3.417688593525636996e-16, 3.448784660453423890e-16,
+ 3.480481301037442286e-16, 3.512804889222979418e-16,
+ 3.545783559224791863e-16, 3.579447366604276541e-16,
+ 3.613828468219060593e-16, 3.648961323764542545e-16,
+ 3.684882922095621322e-16, 3.721633036080207290e-16,
+ 3.759254510416256532e-16, 3.797793587668874387e-16,
+ 3.837300278789213687e-16, 3.877828785607895292e-16,
+ 3.919437984311428867e-16, 3.962191980786774996e-16,
+ 4.006160751056541688e-16, 4.051420882956573177e-16,
+ 4.098056438903062509e-16, 4.146159964290904582e-16,
+ 4.195833672073398926e-16, 4.247190841824385048e-16,
+ 4.300357481667470702e-16, 4.355474314693952008e-16,
+ 4.412699169036069903e-16, 4.472209874259932285e-16,
+ 4.534207798565834480e-16, 4.598922204905932469e-16,
+ 4.666615664711475780e-16, 4.737590853262492027e-16,
+ 4.812199172829237933e-16, 4.890851827392209900e-16,
+ 4.974034236191939753e-16, 5.062325072144159699e-16,
+ 5.156421828878082953e-16, 5.257175802022274839e-16,
+ 5.365640977112021618e-16, 5.483144034258703912e-16,
+ 5.611387454675159622e-16, 5.752606481503331688e-16,
+ 5.909817641652102998e-16, 6.087231416180907671e-16,
+ 6.290979034877557049e-16, 6.530492053564040799e-16,
+ 6.821393079028928626e-16, 7.192444966089361564e-16,
+ 7.706095350032096755e-16, 8.545517038584027421e-16};
+
+static const double fe_double[] = {
+ 1.000000000000000000e+00, 9.381436808621747003e-01,
+ 9.004699299257464817e-01, 8.717043323812035949e-01,
+ 8.477855006239896074e-01, 8.269932966430503241e-01,
+ 8.084216515230083777e-01, 7.915276369724956185e-01,
+ 7.759568520401155522e-01, 7.614633888498962833e-01,
+ 7.478686219851951034e-01, 7.350380924314234843e-01,
+ 7.228676595935720206e-01, 7.112747608050760117e-01,
+ 7.001926550827881623e-01, 6.895664961170779872e-01,
+ 6.793505722647653622e-01, 6.695063167319247333e-01,
+ 6.600008410789997004e-01, 6.508058334145710999e-01,
+ 6.418967164272660897e-01, 6.332519942143660652e-01,
+ 6.248527387036659775e-01, 6.166821809152076561e-01,
+ 6.087253820796220127e-01, 6.009689663652322267e-01,
+ 5.934009016917334289e-01, 5.860103184772680329e-01,
+ 5.787873586028450257e-01, 5.717230486648258170e-01,
+ 5.648091929124001709e-01, 5.580382822625874484e-01,
+ 5.514034165406412891e-01, 5.448982376724396115e-01,
+ 5.385168720028619127e-01, 5.322538802630433219e-01,
+ 5.261042139836197284e-01, 5.200631773682335979e-01,
+ 5.141263938147485613e-01, 5.082897764106428795e-01,
+ 5.025495018413477233e-01, 4.969019872415495476e-01,
+ 4.913438695940325340e-01, 4.858719873418849144e-01,
+ 4.804833639304542103e-01, 4.751751930373773747e-01,
+ 4.699448252839599771e-01, 4.647897562504261781e-01,
+ 4.597076156421376902e-01, 4.546961574746155033e-01,
+ 4.497532511627549967e-01, 4.448768734145485126e-01,
+ 4.400651008423538957e-01, 4.353161032156365740e-01,
+ 4.306281372884588343e-01, 4.259995411430343437e-01,
+ 4.214287289976165751e-01, 4.169141864330028757e-01,
+ 4.124544659971611793e-01, 4.080481831520323954e-01,
+ 4.036940125305302773e-01, 3.993906844752310725e-01,
+ 3.951369818332901573e-01, 3.909317369847971069e-01,
+ 3.867738290841376547e-01, 3.826621814960098344e-01,
+ 3.785957594095807899e-01, 3.745735676159021588e-01,
+ 3.705946484351460013e-01, 3.666580797815141568e-01,
+ 3.627629733548177748e-01, 3.589084729487497794e-01,
+ 3.550937528667874599e-01, 3.513180164374833381e-01,
+ 3.475804946216369817e-01, 3.438804447045024082e-01,
+ 3.402171490667800224e-01, 3.365899140286776059e-01,
+ 3.329980687618089852e-01, 3.294409642641363267e-01,
+ 3.259179723935561879e-01, 3.224284849560891675e-01,
+ 3.189719128449572394e-01, 3.155476852271289490e-01,
+ 3.121552487741795501e-01, 3.087940669345601852e-01,
+ 3.054636192445902565e-01, 3.021634006756935276e-01,
+ 2.988929210155817917e-01, 2.956517042812611962e-01,
+ 2.924392881618925744e-01, 2.892552234896777485e-01,
+ 2.860990737370768255e-01, 2.829704145387807457e-01,
+ 2.798688332369729248e-01, 2.767939284485173568e-01,
+ 2.737453096528029706e-01, 2.707225967990600224e-01,
+ 2.677254199320447947e-01, 2.647534188350622042e-01,
+ 2.618062426893629779e-01, 2.588835497490162285e-01,
+ 2.559850070304153791e-01, 2.531102900156294577e-01,
+ 2.502590823688622956e-01, 2.474310756653276266e-01,
+ 2.446259691318921070e-01, 2.418434693988772144e-01,
+ 2.390832902624491774e-01, 2.363451524570596429e-01,
+ 2.336287834374333461e-01, 2.309339171696274118e-01,
+ 2.282602939307167011e-01, 2.256076601166840667e-01,
+ 2.229757680581201940e-01, 2.203643758433594946e-01,
+ 2.177732471487005272e-01, 2.152021510753786837e-01,
+ 2.126508619929782795e-01, 2.101191593889882581e-01,
+ 2.076068277242220372e-01, 2.051136562938377095e-01,
+ 2.026394390937090173e-01, 2.001839746919112650e-01,
+ 1.977470661050988732e-01, 1.953285206795632167e-01,
+ 1.929281499767713515e-01, 1.905457696631953912e-01,
+ 1.881811994042543179e-01, 1.858342627621971110e-01,
+ 1.835047870977674633e-01, 1.811926034754962889e-01,
+ 1.788975465724783054e-01, 1.766194545904948843e-01,
+ 1.743581691713534942e-01, 1.721135353153200598e-01,
+ 1.698854013025276610e-01, 1.676736186172501919e-01,
+ 1.654780418749360049e-01, 1.632985287519018169e-01,
+ 1.611349399175920349e-01, 1.589871389693142123e-01,
+ 1.568549923693652315e-01, 1.547383693844680830e-01,
+ 1.526371420274428570e-01, 1.505511850010398944e-01,
+ 1.484803756438667910e-01, 1.464245938783449441e-01,
+ 1.443837221606347754e-01, 1.423576454324722018e-01,
+ 1.403462510748624548e-01, 1.383494288635802039e-01,
+ 1.363670709264288572e-01, 1.343990717022136294e-01,
+ 1.324453279013875218e-01, 1.305057384683307731e-01,
+ 1.285802045452281717e-01, 1.266686294375106714e-01,
+ 1.247709185808309612e-01, 1.228869795095451356e-01,
+ 1.210167218266748335e-01, 1.191600571753276827e-01,
+ 1.173168992115555670e-01, 1.154871635786335338e-01,
+ 1.136707678827443141e-01, 1.118676316700562973e-01,
+ 1.100776764051853845e-01, 1.083008254510337970e-01,
+ 1.065370040500016602e-01, 1.047861393065701724e-01,
+ 1.030481601712577161e-01, 1.013229974259536315e-01,
+ 9.961058367063713170e-02, 9.791085331149219917e-02,
+ 9.622374255043279756e-02, 9.454918937605585882e-02,
+ 9.288713355604354127e-02, 9.123751663104015530e-02,
+ 8.960028191003285847e-02, 8.797537446727021759e-02,
+ 8.636274114075691288e-02, 8.476233053236811865e-02,
+ 8.317409300963238272e-02, 8.159798070923741931e-02,
+ 8.003394754231990538e-02, 7.848194920160642130e-02,
+ 7.694194317048050347e-02, 7.541388873405840965e-02,
+ 7.389774699236474620e-02, 7.239348087570873780e-02,
+ 7.090105516237182881e-02, 6.942043649872875477e-02,
+ 6.795159342193660135e-02, 6.649449638533977414e-02,
+ 6.504911778675374900e-02, 6.361543199980733421e-02,
+ 6.219341540854099459e-02, 6.078304644547963265e-02,
+ 5.938430563342026597e-02, 5.799717563120065922e-02,
+ 5.662164128374287675e-02, 5.525768967669703741e-02,
+ 5.390531019604608703e-02, 5.256449459307169225e-02,
+ 5.123523705512628146e-02, 4.991753428270637172e-02,
+ 4.861138557337949667e-02, 4.731679291318154762e-02,
+ 4.603376107617516977e-02, 4.476229773294328196e-02,
+ 4.350241356888818328e-02, 4.225412241331623353e-02,
+ 4.101744138041481941e-02, 3.979239102337412542e-02,
+ 3.857899550307485742e-02, 3.737728277295936097e-02,
+ 3.618728478193142251e-02, 3.500903769739741045e-02,
+ 3.384258215087432992e-02, 3.268796350895953468e-02,
+ 3.154523217289360859e-02, 3.041444391046660423e-02,
+ 2.929566022463739317e-02, 2.818894876397863569e-02,
+ 2.709438378095579969e-02, 2.601204664513421735e-02,
+ 2.494202641973178314e-02, 2.388442051155817078e-02,
+ 2.283933540638524023e-02, 2.180688750428358066e-02,
+ 2.078720407257811723e-02, 1.978042433800974303e-02,
+ 1.878670074469603046e-02, 1.780620041091136169e-02,
+ 1.683910682603994777e-02, 1.588562183997316302e-02,
+ 1.494596801169114850e-02, 1.402039140318193759e-02,
+ 1.310916493125499106e-02, 1.221259242625538123e-02,
+ 1.133101359783459695e-02, 1.046481018102997894e-02,
+ 9.614413642502209895e-03, 8.780314985808975251e-03,
+ 7.963077438017040002e-03, 7.163353183634983863e-03,
+ 6.381905937319179087e-03, 5.619642207205483020e-03,
+ 4.877655983542392333e-03, 4.157295120833795314e-03,
+ 3.460264777836904049e-03, 2.788798793574076128e-03,
+ 2.145967743718906265e-03, 1.536299780301572356e-03,
+ 9.672692823271745359e-04, 4.541343538414967652e-04};
+
+static const uint32_t ke_float[] = {
+ 0x00714851UL, 0x00000000UL, 0x004DF56FUL, 0x0061BBD6UL, 0x006A6EDDUL,
+ 0x006F44A0UL, 0x00725474UL, 0x00746FF9UL, 0x0075F96FUL, 0x007724D3UL,
+ 0x00781027UL, 0x0078CDEEUL, 0x00796A2CUL, 0x0079ED08UL, 0x007A5C37UL,
+ 0x007ABBD7UL, 0x007B0EF4UL, 0x007B57DCUL, 0x007B9853UL, 0x007BD1BBUL,
+ 0x007C052EUL, 0x007C338CUL, 0x007C5D8EUL, 0x007C83C8UL, 0x007CA6B8UL,
+ 0x007CC6C6UL, 0x007CE449UL, 0x007CFF8CUL, 0x007D18CDUL, 0x007D3043UL,
+ 0x007D461DUL, 0x007D5A84UL, 0x007D6D9BUL, 0x007D7F82UL, 0x007D9053UL,
+ 0x007DA028UL, 0x007DAF15UL, 0x007DBD2DUL, 0x007DCA82UL, 0x007DD722UL,
+ 0x007DE31CUL, 0x007DEE7CUL, 0x007DF94DUL, 0x007E0399UL, 0x007E0D69UL,
+ 0x007E16C6UL, 0x007E1FB6UL, 0x007E2842UL, 0x007E306FUL, 0x007E3843UL,
+ 0x007E3FC4UL, 0x007E46F6UL, 0x007E4DDFUL, 0x007E5481UL, 0x007E5AE2UL,
+ 0x007E6104UL, 0x007E66ECUL, 0x007E6C9BUL, 0x007E7215UL, 0x007E775DUL,
+ 0x007E7C76UL, 0x007E8160UL, 0x007E8620UL, 0x007E8AB6UL, 0x007E8F24UL,
+ 0x007E936DUL, 0x007E9793UL, 0x007E9B95UL, 0x007E9F77UL, 0x007EA33AUL,
+ 0x007EA6DEUL, 0x007EAA66UL, 0x007EADD1UL, 0x007EB123UL, 0x007EB45AUL,
+ 0x007EB779UL, 0x007EBA80UL, 0x007EBD71UL, 0x007EC04BUL, 0x007EC310UL,
+ 0x007EC5C1UL, 0x007EC85EUL, 0x007ECAE9UL, 0x007ECD61UL, 0x007ECFC7UL,
+ 0x007ED21CUL, 0x007ED460UL, 0x007ED694UL, 0x007ED8B9UL, 0x007EDACEUL,
+ 0x007EDCD5UL, 0x007EDECEUL, 0x007EE0B8UL, 0x007EE296UL, 0x007EE466UL,
+ 0x007EE62AUL, 0x007EE7E2UL, 0x007EE98DUL, 0x007EEB2DUL, 0x007EECC1UL,
+ 0x007EEE4AUL, 0x007EEFC9UL, 0x007EF13DUL, 0x007EF2A7UL, 0x007EF406UL,
+ 0x007EF55CUL, 0x007EF6A8UL, 0x007EF7EBUL, 0x007EF924UL, 0x007EFA55UL,
+ 0x007EFB7DUL, 0x007EFC9CUL, 0x007EFDB2UL, 0x007EFEC1UL, 0x007EFFC7UL,
+ 0x007F00C5UL, 0x007F01BBUL, 0x007F02AAUL, 0x007F0391UL, 0x007F0470UL,
+ 0x007F0548UL, 0x007F0618UL, 0x007F06E2UL, 0x007F07A4UL, 0x007F0860UL,
+ 0x007F0914UL, 0x007F09C2UL, 0x007F0A69UL, 0x007F0B09UL, 0x007F0BA3UL,
+ 0x007F0C36UL, 0x007F0CC2UL, 0x007F0D48UL, 0x007F0DC8UL, 0x007F0E41UL,
+ 0x007F0EB4UL, 0x007F0F21UL, 0x007F0F88UL, 0x007F0FE8UL, 0x007F1042UL,
+ 0x007F1096UL, 0x007F10E4UL, 0x007F112BUL, 0x007F116DUL, 0x007F11A8UL,
+ 0x007F11DDUL, 0x007F120CUL, 0x007F1235UL, 0x007F1258UL, 0x007F1274UL,
+ 0x007F128AUL, 0x007F129AUL, 0x007F12A4UL, 0x007F12A7UL, 0x007F12A4UL,
+ 0x007F129BUL, 0x007F128BUL, 0x007F1274UL, 0x007F1257UL, 0x007F1233UL,
+ 0x007F1209UL, 0x007F11D8UL, 0x007F119FUL, 0x007F1160UL, 0x007F111AUL,
+ 0x007F10CCUL, 0x007F1077UL, 0x007F101BUL, 0x007F0FB7UL, 0x007F0F4BUL,
+ 0x007F0ED7UL, 0x007F0E5CUL, 0x007F0DD8UL, 0x007F0D4CUL, 0x007F0CB7UL,
+ 0x007F0C19UL, 0x007F0B73UL, 0x007F0AC3UL, 0x007F0A0AUL, 0x007F0947UL,
+ 0x007F087BUL, 0x007F07A4UL, 0x007F06C2UL, 0x007F05D6UL, 0x007F04DFUL,
+ 0x007F03DCUL, 0x007F02CDUL, 0x007F01B2UL, 0x007F008BUL, 0x007EFF56UL,
+ 0x007EFE13UL, 0x007EFCC3UL, 0x007EFB64UL, 0x007EF9F6UL, 0x007EF878UL,
+ 0x007EF6EAUL, 0x007EF54BUL, 0x007EF39AUL, 0x007EF1D6UL, 0x007EEFFFUL,
+ 0x007EEE14UL, 0x007EEC13UL, 0x007EE9FDUL, 0x007EE7CFUL, 0x007EE589UL,
+ 0x007EE329UL, 0x007EE0AEUL, 0x007EDE16UL, 0x007EDB61UL, 0x007ED88CUL,
+ 0x007ED595UL, 0x007ED27BUL, 0x007ECF3BUL, 0x007ECBD3UL, 0x007EC841UL,
+ 0x007EC481UL, 0x007EC091UL, 0x007EBC6DUL, 0x007EB811UL, 0x007EB37AUL,
+ 0x007EAEA4UL, 0x007EA988UL, 0x007EA422UL, 0x007E9E6BUL, 0x007E985DUL,
+ 0x007E91EFUL, 0x007E8B1AUL, 0x007E83D4UL, 0x007E7C11UL, 0x007E73C5UL,
+ 0x007E6AE1UL, 0x007E6155UL, 0x007E570FUL, 0x007E4BF7UL, 0x007E3FF3UL,
+ 0x007E32E6UL, 0x007E24ACUL, 0x007E1518UL, 0x007E03F7UL, 0x007DF10AUL,
+ 0x007DDC03UL, 0x007DC480UL, 0x007DAA09UL, 0x007D8C00UL, 0x007D699AUL,
+ 0x007D41C9UL, 0x007D131EUL, 0x007CDB97UL, 0x007C9851UL, 0x007C44F8UL,
+ 0x007BDABCUL, 0x007B4E33UL, 0x007A8A98UL, 0x00796587UL, 0x007777D9UL,
+ 0x00736D37UL,
+};
+static const float we_float[] = {
+ 1.03677719e-06F, 7.61177108e-09F, 1.24977240e-08F, 1.63680292e-08F,
+ 1.96847466e-08F, 2.26448404e-08F, 2.53524197e-08F, 2.78699974e-08F,
+ 3.02384333e-08F, 3.24861032e-08F, 3.46336312e-08F, 3.66965478e-08F,
+ 3.86868855e-08F, 4.06141855e-08F, 4.24861622e-08F, 4.43091566e-08F,
+ 4.60884545e-08F, 4.78285168e-08F, 4.95331490e-08F, 5.12056279e-08F,
+ 5.28488000e-08F, 5.44651557e-08F, 5.60568899e-08F, 5.76259484e-08F,
+ 5.91740662e-08F, 6.07027987e-08F, 6.22135462e-08F, 6.37075759e-08F,
+ 6.51860386e-08F, 6.66499836e-08F, 6.81003709e-08F, 6.95380822e-08F,
+ 7.09639292e-08F, 7.23786618e-08F, 7.37829746e-08F, 7.51775128e-08F,
+ 7.65628768e-08F, 7.79396272e-08F, 7.93082883e-08F, 8.06693516e-08F,
+ 8.20232788e-08F, 8.33705045e-08F, 8.47114385e-08F, 8.60464681e-08F,
+ 8.73759596e-08F, 8.87002606e-08F, 9.00197010e-08F, 9.13345948e-08F,
+ 9.26452410e-08F, 9.39519249e-08F, 9.52549192e-08F, 9.65544849e-08F,
+ 9.78508719e-08F, 9.91443202e-08F, 1.00435060e-07F, 1.01723315e-07F,
+ 1.03009296e-07F, 1.04293211e-07F, 1.05575259e-07F, 1.06855633e-07F,
+ 1.08134518e-07F, 1.09412096e-07F, 1.10688542e-07F, 1.11964025e-07F,
+ 1.13238713e-07F, 1.14512767e-07F, 1.15786343e-07F, 1.17059595e-07F,
+ 1.18332673e-07F, 1.19605723e-07F, 1.20878890e-07F, 1.22152313e-07F,
+ 1.23426131e-07F, 1.24700479e-07F, 1.25975490e-07F, 1.27251294e-07F,
+ 1.28528022e-07F, 1.29805799e-07F, 1.31084751e-07F, 1.32365001e-07F,
+ 1.33646673e-07F, 1.34929886e-07F, 1.36214760e-07F, 1.37501415e-07F,
+ 1.38789966e-07F, 1.40080532e-07F, 1.41373228e-07F, 1.42668169e-07F,
+ 1.43965470e-07F, 1.45265245e-07F, 1.46567606e-07F, 1.47872669e-07F,
+ 1.49180545e-07F, 1.50491348e-07F, 1.51805191e-07F, 1.53122186e-07F,
+ 1.54442445e-07F, 1.55766083e-07F, 1.57093212e-07F, 1.58423946e-07F,
+ 1.59758399e-07F, 1.61096684e-07F, 1.62438917e-07F, 1.63785214e-07F,
+ 1.65135690e-07F, 1.66490462e-07F, 1.67849647e-07F, 1.69213364e-07F,
+ 1.70581733e-07F, 1.71954874e-07F, 1.73332908e-07F, 1.74715958e-07F,
+ 1.76104148e-07F, 1.77497602e-07F, 1.78896448e-07F, 1.80300814e-07F,
+ 1.81710828e-07F, 1.83126623e-07F, 1.84548331e-07F, 1.85976086e-07F,
+ 1.87410026e-07F, 1.88850288e-07F, 1.90297012e-07F, 1.91750343e-07F,
+ 1.93210424e-07F, 1.94677403e-07F, 1.96151428e-07F, 1.97632653e-07F,
+ 1.99121231e-07F, 2.00617321e-07F, 2.02121082e-07F, 2.03632677e-07F,
+ 2.05152273e-07F, 2.06680040e-07F, 2.08216149e-07F, 2.09760777e-07F,
+ 2.11314104e-07F, 2.12876312e-07F, 2.14447590e-07F, 2.16028129e-07F,
+ 2.17618123e-07F, 2.19217773e-07F, 2.20827283e-07F, 2.22446862e-07F,
+ 2.24076723e-07F, 2.25717086e-07F, 2.27368174e-07F, 2.29030216e-07F,
+ 2.30703448e-07F, 2.32388110e-07F, 2.34084450e-07F, 2.35792720e-07F,
+ 2.37513182e-07F, 2.39246101e-07F, 2.40991752e-07F, 2.42750416e-07F,
+ 2.44522382e-07F, 2.46307948e-07F, 2.48107418e-07F, 2.49921109e-07F,
+ 2.51749342e-07F, 2.53592452e-07F, 2.55450781e-07F, 2.57324683e-07F,
+ 2.59214522e-07F, 2.61120673e-07F, 2.63043524e-07F, 2.64983476e-07F,
+ 2.66940939e-07F, 2.68916342e-07F, 2.70910123e-07F, 2.72922739e-07F,
+ 2.74954660e-07F, 2.77006373e-07F, 2.79078382e-07F, 2.81171210e-07F,
+ 2.83285396e-07F, 2.85421503e-07F, 2.87580110e-07F, 2.89761822e-07F,
+ 2.91967265e-07F, 2.94197089e-07F, 2.96451969e-07F, 2.98732610e-07F,
+ 3.01039742e-07F, 3.03374127e-07F, 3.05736557e-07F, 3.08127859e-07F,
+ 3.10548894e-07F, 3.13000563e-07F, 3.15483804e-07F, 3.17999599e-07F,
+ 3.20548974e-07F, 3.23133003e-07F, 3.25752811e-07F, 3.28409576e-07F,
+ 3.31104534e-07F, 3.33838984e-07F, 3.36614287e-07F, 3.39431878e-07F,
+ 3.42293264e-07F, 3.45200034e-07F, 3.48153864e-07F, 3.51156520e-07F,
+ 3.54209871e-07F, 3.57315892e-07F, 3.60476673e-07F, 3.63694431e-07F,
+ 3.66971518e-07F, 3.70310433e-07F, 3.73713834e-07F, 3.77184553e-07F,
+ 3.80725611e-07F, 3.84340234e-07F, 3.88031877e-07F, 3.91804239e-07F,
+ 3.95661291e-07F, 3.99607304e-07F, 4.03646879e-07F, 4.07784981e-07F,
+ 4.12026980e-07F, 4.16378695e-07F, 4.20846449e-07F, 4.25437124e-07F,
+ 4.30158235e-07F, 4.35018005e-07F, 4.40025460e-07F, 4.45190536e-07F,
+ 4.50524210e-07F, 4.56038644e-07F, 4.61747369e-07F, 4.67665494e-07F,
+ 4.73809965e-07F, 4.80199879e-07F, 4.86856855e-07F, 4.93805512e-07F,
+ 5.01074042e-07F, 5.08694944e-07F, 5.16705952e-07F, 5.25151216e-07F,
+ 5.34082859e-07F, 5.43563016e-07F, 5.53666578e-07F, 5.64484953e-07F,
+ 5.76131313e-07F, 5.88748108e-07F, 6.02518140e-07F, 6.17681418e-07F,
+ 6.34561837e-07F, 6.53611496e-07F, 6.75488730e-07F, 7.01206245e-07F,
+ 7.32441505e-07F, 7.72282898e-07F, 8.27435688e-07F, 9.17567905e-07F,
+};
+static const float fe_float[] = {
+ 1.00000000e+00F, 9.38143681e-01F, 9.00469930e-01F, 8.71704332e-01F,
+ 8.47785501e-01F, 8.26993297e-01F, 8.08421652e-01F, 7.91527637e-01F,
+ 7.75956852e-01F, 7.61463389e-01F, 7.47868622e-01F, 7.35038092e-01F,
+ 7.22867660e-01F, 7.11274761e-01F, 7.00192655e-01F, 6.89566496e-01F,
+ 6.79350572e-01F, 6.69506317e-01F, 6.60000841e-01F, 6.50805833e-01F,
+ 6.41896716e-01F, 6.33251994e-01F, 6.24852739e-01F, 6.16682181e-01F,
+ 6.08725382e-01F, 6.00968966e-01F, 5.93400902e-01F, 5.86010318e-01F,
+ 5.78787359e-01F, 5.71723049e-01F, 5.64809193e-01F, 5.58038282e-01F,
+ 5.51403417e-01F, 5.44898238e-01F, 5.38516872e-01F, 5.32253880e-01F,
+ 5.26104214e-01F, 5.20063177e-01F, 5.14126394e-01F, 5.08289776e-01F,
+ 5.02549502e-01F, 4.96901987e-01F, 4.91343870e-01F, 4.85871987e-01F,
+ 4.80483364e-01F, 4.75175193e-01F, 4.69944825e-01F, 4.64789756e-01F,
+ 4.59707616e-01F, 4.54696157e-01F, 4.49753251e-01F, 4.44876873e-01F,
+ 4.40065101e-01F, 4.35316103e-01F, 4.30628137e-01F, 4.25999541e-01F,
+ 4.21428729e-01F, 4.16914186e-01F, 4.12454466e-01F, 4.08048183e-01F,
+ 4.03694013e-01F, 3.99390684e-01F, 3.95136982e-01F, 3.90931737e-01F,
+ 3.86773829e-01F, 3.82662181e-01F, 3.78595759e-01F, 3.74573568e-01F,
+ 3.70594648e-01F, 3.66658080e-01F, 3.62762973e-01F, 3.58908473e-01F,
+ 3.55093753e-01F, 3.51318016e-01F, 3.47580495e-01F, 3.43880445e-01F,
+ 3.40217149e-01F, 3.36589914e-01F, 3.32998069e-01F, 3.29440964e-01F,
+ 3.25917972e-01F, 3.22428485e-01F, 3.18971913e-01F, 3.15547685e-01F,
+ 3.12155249e-01F, 3.08794067e-01F, 3.05463619e-01F, 3.02163401e-01F,
+ 2.98892921e-01F, 2.95651704e-01F, 2.92439288e-01F, 2.89255223e-01F,
+ 2.86099074e-01F, 2.82970415e-01F, 2.79868833e-01F, 2.76793928e-01F,
+ 2.73745310e-01F, 2.70722597e-01F, 2.67725420e-01F, 2.64753419e-01F,
+ 2.61806243e-01F, 2.58883550e-01F, 2.55985007e-01F, 2.53110290e-01F,
+ 2.50259082e-01F, 2.47431076e-01F, 2.44625969e-01F, 2.41843469e-01F,
+ 2.39083290e-01F, 2.36345152e-01F, 2.33628783e-01F, 2.30933917e-01F,
+ 2.28260294e-01F, 2.25607660e-01F, 2.22975768e-01F, 2.20364376e-01F,
+ 2.17773247e-01F, 2.15202151e-01F, 2.12650862e-01F, 2.10119159e-01F,
+ 2.07606828e-01F, 2.05113656e-01F, 2.02639439e-01F, 2.00183975e-01F,
+ 1.97747066e-01F, 1.95328521e-01F, 1.92928150e-01F, 1.90545770e-01F,
+ 1.88181199e-01F, 1.85834263e-01F, 1.83504787e-01F, 1.81192603e-01F,
+ 1.78897547e-01F, 1.76619455e-01F, 1.74358169e-01F, 1.72113535e-01F,
+ 1.69885401e-01F, 1.67673619e-01F, 1.65478042e-01F, 1.63298529e-01F,
+ 1.61134940e-01F, 1.58987139e-01F, 1.56854992e-01F, 1.54738369e-01F,
+ 1.52637142e-01F, 1.50551185e-01F, 1.48480376e-01F, 1.46424594e-01F,
+ 1.44383722e-01F, 1.42357645e-01F, 1.40346251e-01F, 1.38349429e-01F,
+ 1.36367071e-01F, 1.34399072e-01F, 1.32445328e-01F, 1.30505738e-01F,
+ 1.28580205e-01F, 1.26668629e-01F, 1.24770919e-01F, 1.22886980e-01F,
+ 1.21016722e-01F, 1.19160057e-01F, 1.17316899e-01F, 1.15487164e-01F,
+ 1.13670768e-01F, 1.11867632e-01F, 1.10077676e-01F, 1.08300825e-01F,
+ 1.06537004e-01F, 1.04786139e-01F, 1.03048160e-01F, 1.01322997e-01F,
+ 9.96105837e-02F, 9.79108533e-02F, 9.62237426e-02F, 9.45491894e-02F,
+ 9.28871336e-02F, 9.12375166e-02F, 8.96002819e-02F, 8.79753745e-02F,
+ 8.63627411e-02F, 8.47623305e-02F, 8.31740930e-02F, 8.15979807e-02F,
+ 8.00339475e-02F, 7.84819492e-02F, 7.69419432e-02F, 7.54138887e-02F,
+ 7.38977470e-02F, 7.23934809e-02F, 7.09010552e-02F, 6.94204365e-02F,
+ 6.79515934e-02F, 6.64944964e-02F, 6.50491178e-02F, 6.36154320e-02F,
+ 6.21934154e-02F, 6.07830464e-02F, 5.93843056e-02F, 5.79971756e-02F,
+ 5.66216413e-02F, 5.52576897e-02F, 5.39053102e-02F, 5.25644946e-02F,
+ 5.12352371e-02F, 4.99175343e-02F, 4.86113856e-02F, 4.73167929e-02F,
+ 4.60337611e-02F, 4.47622977e-02F, 4.35024136e-02F, 4.22541224e-02F,
+ 4.10174414e-02F, 3.97923910e-02F, 3.85789955e-02F, 3.73772828e-02F,
+ 3.61872848e-02F, 3.50090377e-02F, 3.38425822e-02F, 3.26879635e-02F,
+ 3.15452322e-02F, 3.04144439e-02F, 2.92956602e-02F, 2.81889488e-02F,
+ 2.70943838e-02F, 2.60120466e-02F, 2.49420264e-02F, 2.38844205e-02F,
+ 2.28393354e-02F, 2.18068875e-02F, 2.07872041e-02F, 1.97804243e-02F,
+ 1.87867007e-02F, 1.78062004e-02F, 1.68391068e-02F, 1.58856218e-02F,
+ 1.49459680e-02F, 1.40203914e-02F, 1.31091649e-02F, 1.22125924e-02F,
+ 1.13310136e-02F, 1.04648102e-02F, 9.61441364e-03F, 8.78031499e-03F,
+ 7.96307744e-03F, 7.16335318e-03F, 6.38190594e-03F, 5.61964221e-03F,
+ 4.87765598e-03F, 4.15729512e-03F, 3.46026478e-03F, 2.78879879e-03F,
+ 2.14596774e-03F, 1.53629978e-03F, 9.67269282e-04F, 4.54134354e-04F,
+};
+
+
+static const double ziggurat_nor_r = 3.6541528853610087963519472518;
+static const double ziggurat_nor_inv_r =
+ 0.27366123732975827203338247596; // 1.0 / ziggurat_nor_r;
+static const double ziggurat_exp_r = 7.6971174701310497140446280481;
+
+static const float ziggurat_nor_r_f = 3.6541528853610087963519472518f;
+static const float ziggurat_nor_inv_r_f = 0.27366123732975827203338247596f;
+static const float ziggurat_exp_r_f = 7.6971174701310497140446280481f;
diff --git a/numpy/random/src/legacy/legacy-distributions.c b/numpy/random/src/legacy/legacy-distributions.c
new file mode 100644
index 000000000..684b3d762
--- /dev/null
+++ b/numpy/random/src/legacy/legacy-distributions.c
@@ -0,0 +1,392 @@
+#include "legacy-distributions.h"
+
+
+static NPY_INLINE double legacy_double(aug_bitgen_t *aug_state) {
+ return aug_state->bit_generator->next_double(aug_state->bit_generator->state);
+}
+
+double legacy_gauss(aug_bitgen_t *aug_state) {
+ if (aug_state->has_gauss) {
+ const double temp = aug_state->gauss;
+ aug_state->has_gauss = false;
+ aug_state->gauss = 0.0;
+ return temp;
+ } else {
+ double f, x1, x2, r2;
+
+ do {
+ x1 = 2.0 * legacy_double(aug_state) - 1.0;
+ x2 = 2.0 * legacy_double(aug_state) - 1.0;
+ r2 = x1 * x1 + x2 * x2;
+ } while (r2 >= 1.0 || r2 == 0.0);
+
+ /* Polar method, a more efficient version of the Box-Muller approach. */
+ f = sqrt(-2.0 * log(r2) / r2);
+ /* Keep for next call */
+ aug_state->gauss = f * x1;
+ aug_state->has_gauss = true;
+ return f * x2;
+ }
+}
+
+double legacy_standard_exponential(aug_bitgen_t *aug_state) {
+ /* We use -log(1-U) since U is [0, 1) */
+ return -log(1.0 - legacy_double(aug_state));
+}
+
+double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape) {
+ double b, c;
+ double U, V, X, Y;
+
+ if (shape == 1.0) {
+ return legacy_standard_exponential(aug_state);
+ }
+ else if (shape == 0.0) {
+ return 0.0;
+ } else if (shape < 1.0) {
+ for (;;) {
+ U = legacy_double(aug_state);
+ V = legacy_standard_exponential(aug_state);
+ if (U <= 1.0 - shape) {
+ X = pow(U, 1. / shape);
+ if (X <= V) {
+ return X;
+ }
+ } else {
+ Y = -log((1 - U) / shape);
+ X = pow(1.0 - shape + shape * Y, 1. / shape);
+ if (X <= (V + Y)) {
+ return X;
+ }
+ }
+ }
+ } else {
+ b = shape - 1. / 3.;
+ c = 1. / sqrt(9 * b);
+ for (;;) {
+ do {
+ X = legacy_gauss(aug_state);
+ V = 1.0 + c * X;
+ } while (V <= 0.0);
+
+ V = V * V * V;
+ U = legacy_double(aug_state);
+ if (U < 1.0 - 0.0331 * (X * X) * (X * X))
+ return (b * V);
+ if (log(U) < 0.5 * X * X + b * (1. - V + log(V)))
+ return (b * V);
+ }
+ }
+}
+
+double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale) {
+ return scale * legacy_standard_gamma(aug_state, shape);
+}
+
+double legacy_pareto(aug_bitgen_t *aug_state, double a) {
+ return exp(legacy_standard_exponential(aug_state) / a) - 1;
+}
+
+double legacy_weibull(aug_bitgen_t *aug_state, double a) {
+ if (a == 0.0) {
+ return 0.0;
+ }
+ return pow(legacy_standard_exponential(aug_state), 1. / a);
+}
+
+double legacy_power(aug_bitgen_t *aug_state, double a) {
+ return pow(1 - exp(-legacy_standard_exponential(aug_state)), 1. / a);
+}
+
+double legacy_chisquare(aug_bitgen_t *aug_state, double df) {
+ return 2.0 * legacy_standard_gamma(aug_state, df / 2.0);
+}
+
+double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
+ double nonc) {
+ double out;
+ if (nonc == 0) {
+ return legacy_chisquare(aug_state, df);
+ }
+ if (1 < df) {
+ const double Chi2 = legacy_chisquare(aug_state, df - 1);
+ const double n = legacy_gauss(aug_state) + sqrt(nonc);
+ return Chi2 + n * n;
+ } else {
+ const long i = random_poisson(aug_state->bit_generator, nonc / 2.0);
+ out = legacy_chisquare(aug_state, df + 2 * i);
+ /* Insert nan guard here to avoid changing the stream */
+ if (npy_isnan(nonc)){
+ return NPY_NAN;
+ } else {
+ return out;
+ }
+ }
+}
+
+double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum, double dfden,
+ double nonc) {
+ double t = legacy_noncentral_chisquare(aug_state, dfnum, nonc) * dfden;
+ return t / (legacy_chisquare(aug_state, dfden) * dfnum);
+}
+
+double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale) {
+ double U, X, Y;
+ double mu_2l;
+
+ mu_2l = mean / (2 * scale);
+ Y = legacy_gauss(aug_state);
+ Y = mean * Y * Y;
+ X = mean + mu_2l * (Y - sqrt(4 * scale * Y + Y * Y));
+ U = legacy_double(aug_state);
+ if (U <= mean / (mean + X)) {
+ return X;
+ } else {
+ return mean * mean / X;
+ }
+}
+
+double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale) {
+ return loc + scale * legacy_gauss(aug_state);
+}
+
+double legacy_lognormal(aug_bitgen_t *aug_state, double mean, double sigma) {
+ return exp(legacy_normal(aug_state, mean, sigma));
+}
+
+double legacy_standard_t(aug_bitgen_t *aug_state, double df) {
+ double num, denom;
+
+ num = legacy_gauss(aug_state);
+ denom = legacy_standard_gamma(aug_state, df / 2);
+ return sqrt(df / 2) * num / sqrt(denom);
+}
+
+int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n, double p) {
+ double Y = legacy_gamma(aug_state, n, (1 - p) / p);
+ return (int64_t)random_poisson(aug_state->bit_generator, Y);
+}
+
+double legacy_standard_cauchy(aug_bitgen_t *aug_state) {
+ return legacy_gauss(aug_state) / legacy_gauss(aug_state);
+}
+
+double legacy_beta(aug_bitgen_t *aug_state, double a, double b) {
+ double Ga, Gb;
+
+ if ((a <= 1.0) && (b <= 1.0)) {
+ double U, V, X, Y;
+ /* Use Johnk's algorithm */
+
+ while (1) {
+ U = legacy_double(aug_state);
+ V = legacy_double(aug_state);
+ X = pow(U, 1.0 / a);
+ Y = pow(V, 1.0 / b);
+
+ if ((X + Y) <= 1.0) {
+ if (X + Y > 0) {
+ return X / (X + Y);
+ } else {
+ double logX = log(U) / a;
+ double logY = log(V) / b;
+ double logM = logX > logY ? logX : logY;
+ logX -= logM;
+ logY -= logM;
+
+ return exp(logX - log(exp(logX) + exp(logY)));
+ }
+ }
+ }
+ } else {
+ Ga = legacy_standard_gamma(aug_state, a);
+ Gb = legacy_standard_gamma(aug_state, b);
+ return Ga / (Ga + Gb);
+ }
+}
+
+double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden) {
+ return ((legacy_chisquare(aug_state, dfnum) * dfden) /
+ (legacy_chisquare(aug_state, dfden) * dfnum));
+}
+
+double legacy_exponential(aug_bitgen_t *aug_state, double scale) {
+ return scale * legacy_standard_exponential(aug_state);
+}
+
+
+static RAND_INT_TYPE legacy_random_binomial_original(bitgen_t *bitgen_state,
+ double p,
+ RAND_INT_TYPE n,
+ binomial_t *binomial) {
+ double q;
+
+ if (p <= 0.5) {
+ if (p * n <= 30.0) {
+ return random_binomial_inversion(bitgen_state, n, p, binomial);
+ } else {
+ return random_binomial_btpe(bitgen_state, n, p, binomial);
+ }
+ } else {
+ q = 1.0 - p;
+ if (q * n <= 30.0) {
+ return n - random_binomial_inversion(bitgen_state, n, q, binomial);
+ } else {
+ return n - random_binomial_btpe(bitgen_state, n, q, binomial);
+ }
+ }
+}
+
+
+int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial) {
+ return (int64_t) legacy_random_binomial_original(bitgen_state, p,
+ (RAND_INT_TYPE) n,
+ binomial);
+}
+
+
+static RAND_INT_TYPE random_hypergeometric_hyp(bitgen_t *bitgen_state,
+ RAND_INT_TYPE good,
+ RAND_INT_TYPE bad,
+ RAND_INT_TYPE sample) {
+ RAND_INT_TYPE d1, k, z;
+ double d2, u, y;
+
+ d1 = bad + good - sample;
+ d2 = (double)MIN(bad, good);
+
+ y = d2;
+ k = sample;
+ while (y > 0.0) {
+ u = next_double(bitgen_state);
+ y -= (RAND_INT_TYPE)floor(u + y / (d1 + k));
+ k--;
+ if (k == 0)
+ break;
+ }
+ z = (RAND_INT_TYPE)(d2 - y);
+ if (good > bad)
+ z = sample - z;
+ return z;
+}
+
+/* D1 = 2*sqrt(2/e) */
+/* D2 = 3 - 2*sqrt(3/e) */
+#define D1 1.7155277699214135
+#define D2 0.8989161620588988
+static RAND_INT_TYPE random_hypergeometric_hrua(bitgen_t *bitgen_state,
+ RAND_INT_TYPE good,
+ RAND_INT_TYPE bad,
+ RAND_INT_TYPE sample) {
+ RAND_INT_TYPE mingoodbad, maxgoodbad, popsize, m, d9;
+ double d4, d5, d6, d7, d8, d10, d11;
+ RAND_INT_TYPE Z;
+ double T, W, X, Y;
+
+ mingoodbad = MIN(good, bad);
+ popsize = good + bad;
+ maxgoodbad = MAX(good, bad);
+ m = MIN(sample, popsize - sample);
+ d4 = ((double)mingoodbad) / popsize;
+ d5 = 1.0 - d4;
+ d6 = m * d4 + 0.5;
+ d7 = sqrt((double)(popsize - m) * sample * d4 * d5 / (popsize - 1) + 0.5);
+ d8 = D1 * d7 + D2;
+ d9 = (RAND_INT_TYPE)floor((double)(m + 1) * (mingoodbad + 1) / (popsize + 2));
+ d10 = (loggam(d9 + 1) + loggam(mingoodbad - d9 + 1) + loggam(m - d9 + 1) +
+ loggam(maxgoodbad - m + d9 + 1));
+ d11 = MIN(MIN(m, mingoodbad) + 1.0, floor(d6 + 16 * d7));
+ /* 16 for 16-decimal-digit precision in D1 and D2 */
+
+ while (1) {
+ X = next_double(bitgen_state);
+ Y = next_double(bitgen_state);
+ W = d6 + d8 * (Y - 0.5) / X;
+
+ /* fast rejection: */
+ if ((W < 0.0) || (W >= d11))
+ continue;
+
+ Z = (RAND_INT_TYPE)floor(W);
+ T = d10 - (loggam(Z + 1) + loggam(mingoodbad - Z + 1) + loggam(m - Z + 1) +
+ loggam(maxgoodbad - m + Z + 1));
+
+ /* fast acceptance: */
+ if ((X * (4.0 - X) - 3.0) <= T)
+ break;
+
+ /* fast rejection: */
+ if (X * (X - T) >= 1)
+ continue;
+ /* log(0.0) is ok here, since always accept */
+ if (2.0 * log(X) <= T)
+ break; /* acceptance */
+ }
+
+ /* this is a correction to HRUA* by Ivan Frohne in rv.py */
+ if (good > bad)
+ Z = m - Z;
+
+ /* another fix from rv.py to allow sample to exceed popsize/2 */
+ if (m < sample)
+ Z = good - Z;
+
+ return Z;
+}
+#undef D1
+#undef D2
+
+static RAND_INT_TYPE random_hypergeometric_original(bitgen_t *bitgen_state,
+ RAND_INT_TYPE good,
+ RAND_INT_TYPE bad,
+ RAND_INT_TYPE sample)
+{
+ if (sample > 10) {
+ return random_hypergeometric_hrua(bitgen_state, good, bad, sample);
+ } else if (sample > 0) {
+ return random_hypergeometric_hyp(bitgen_state, good, bad, sample);
+ } else {
+ return 0;
+ }
+}
+
+
+/*
+ * This is a wrapper function that matches the expected template. In the legacy
+ * generator, all int types are long, so this accepts int64 and then converts
+ * them to longs. These values must be in bounds for long and this is checked
+ * outside this function
+ *
+ * The remaining are included for the return type only
+ */
+int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state, int64_t good,
+ int64_t bad, int64_t sample) {
+ return (int64_t)random_hypergeometric_original(bitgen_state,
+ (RAND_INT_TYPE)good,
+ (RAND_INT_TYPE)bad,
+ (RAND_INT_TYPE)sample);
+}
+
+
+int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p) {
+ return (int64_t)random_logseries(bitgen_state, p);
+}
+
+ int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam) {
+ return (int64_t)random_poisson(bitgen_state, lam);
+}
+
+ int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a) {
+ return (int64_t)random_zipf(bitgen_state, a);
+}
+
+ int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p) {
+ return (int64_t)random_geometric(bitgen_state, p);
+}
+
+ void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n,
+ RAND_INT_TYPE *mnix, double *pix, npy_intp d,
+ binomial_t *binomial) {
+ return random_multinomial(bitgen_state, n, mnix, pix, d, binomial);
+}
diff --git a/numpy/random/src/legacy/legacy-distributions.h b/numpy/random/src/legacy/legacy-distributions.h
new file mode 100644
index 000000000..4bc15d58e
--- /dev/null
+++ b/numpy/random/src/legacy/legacy-distributions.h
@@ -0,0 +1,49 @@
+#ifndef _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
+#define _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
+
+
+#include "../distributions/distributions.h"
+
+typedef struct aug_bitgen {
+ bitgen_t *bit_generator;
+ int has_gauss;
+ double gauss;
+} aug_bitgen_t;
+
+extern double legacy_gauss(aug_bitgen_t *aug_state);
+extern double legacy_standard_exponential(aug_bitgen_t *aug_state);
+extern double legacy_pareto(aug_bitgen_t *aug_state, double a);
+extern double legacy_weibull(aug_bitgen_t *aug_state, double a);
+extern double legacy_power(aug_bitgen_t *aug_state, double a);
+extern double legacy_gamma(aug_bitgen_t *aug_state, double shape, double scale);
+extern double legacy_chisquare(aug_bitgen_t *aug_state, double df);
+extern double legacy_noncentral_chisquare(aug_bitgen_t *aug_state, double df,
+ double nonc);
+extern double legacy_noncentral_f(aug_bitgen_t *aug_state, double dfnum,
+ double dfden, double nonc);
+extern double legacy_wald(aug_bitgen_t *aug_state, double mean, double scale);
+extern double legacy_lognormal(aug_bitgen_t *aug_state, double mean,
+ double sigma);
+extern double legacy_standard_t(aug_bitgen_t *aug_state, double df);
+extern double legacy_standard_cauchy(aug_bitgen_t *state);
+extern double legacy_beta(aug_bitgen_t *aug_state, double a, double b);
+extern double legacy_f(aug_bitgen_t *aug_state, double dfnum, double dfden);
+extern double legacy_normal(aug_bitgen_t *aug_state, double loc, double scale);
+extern double legacy_standard_gamma(aug_bitgen_t *aug_state, double shape);
+extern double legacy_exponential(aug_bitgen_t *aug_state, double scale);
+extern int64_t legacy_random_binomial(bitgen_t *bitgen_state, double p,
+ int64_t n, binomial_t *binomial);
+extern int64_t legacy_negative_binomial(aug_bitgen_t *aug_state, double n,
+ double p);
+extern int64_t legacy_random_hypergeometric(bitgen_t *bitgen_state,
+ int64_t good, int64_t bad,
+ int64_t sample);
+extern int64_t legacy_random_logseries(bitgen_t *bitgen_state, double p);
+extern int64_t legacy_random_poisson(bitgen_t *bitgen_state, double lam);
+extern int64_t legacy_random_zipf(bitgen_t *bitgen_state, double a);
+extern int64_t legacy_random_geometric(bitgen_t *bitgen_state, double p);
+void legacy_random_multinomial(bitgen_t *bitgen_state, RAND_INT_TYPE n,
+ RAND_INT_TYPE *mnix, double *pix, npy_intp d,
+ binomial_t *binomial);
+
+#endif
diff --git a/numpy/random/src/mt19937/LICENSE.md b/numpy/random/src/mt19937/LICENSE.md
new file mode 100644
index 000000000..f65c3d46e
--- /dev/null
+++ b/numpy/random/src/mt19937/LICENSE.md
@@ -0,0 +1,61 @@
+# MT19937
+
+Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org)
+
+The rk_random and rk_seed functions algorithms and the original design of
+the Mersenne Twister RNG:
+
+ Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. The names of its contributors may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Original algorithm for the implementation of rk_interval function from
+Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by
+Magnus Jonsson.
+
+Constants used in the rk_double implementation by Isaku Wada.
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be included
+in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/numpy/random/src/mt19937/mt19937-benchmark.c b/numpy/random/src/mt19937/mt19937-benchmark.c
new file mode 100644
index 000000000..039f8030a
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937-benchmark.c
@@ -0,0 +1,31 @@
+/*
+ * cl mt19937-benchmark.c mt19937.c /Ox
+ * Measure-Command { .\mt19937-benchmark.exe }
+ *
+ * gcc mt19937-benchmark.c mt19937.c -O3 -o mt19937-benchmark
+ * time ./mt19937-benchmark
+ */
+#include "mt19937.h"
+#include <inttypes.h>
+#include <stdio.h>
+#include <time.h>
+
+#define Q 1000000000
+
+int main() {
+ int i;
+ uint32_t seed = 0x0;
+ uint64_t sum = 0, count = 0;
+ mt19937_state state;
+ mt19937_seed(&state, seed);
+ clock_t begin = clock();
+ for (i = 0; i < Q; i++) {
+ sum += mt19937_next64(&state);
+ count++;
+ }
+ clock_t end = clock();
+ double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
+ printf("0x%" PRIx64 "\ncount: %" PRIu64 "\n", sum, count);
+ printf("%" PRIu64 " randoms per second\n",
+ (uint64_t)(Q / time_spent) / 1000000 * 1000000);
+}
diff --git a/numpy/random/src/mt19937/mt19937-jump.c b/numpy/random/src/mt19937/mt19937-jump.c
new file mode 100644
index 000000000..46b28cf96
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937-jump.c
@@ -0,0 +1,224 @@
+#include "mt19937-jump.h"
+#include "mt19937.h"
+
+/* 32-bits function */
+/* return the i-th coefficient of the polynomial pf */
+unsigned long get_coef(unsigned long *pf, unsigned int deg) {
+ if ((pf[deg >> 5] & (LSB << (deg & 0x1ful))) != 0)
+ return (1);
+ else
+ return (0);
+}
+
+/* 32-bit function */
+/* set the coefficient of the polynomial pf with v */
+void set_coef(unsigned long *pf, unsigned int deg, unsigned long v) {
+ if (v != 0)
+ pf[deg >> 5] ^= (LSB << (deg & 0x1ful));
+ else
+ ;
+}
+
+void gray_code(unsigned long *h) {
+ unsigned int i, j = 1, l = 1, term = LL;
+
+ h[0] = 0;
+
+ for (i = 1; i <= QQ; i++) {
+ l = (l << 1);
+ term = (term >> 1);
+ for (; j < l; j++)
+ h[j] = h[l - j - 1] ^ term;
+ }
+}
+
+void copy_state(mt19937_state *target_state, mt19937_state *state) {
+ int i;
+
+ for (i = 0; i < N; i++)
+ target_state->key[i] = state->key[i];
+
+ target_state->pos = state->pos;
+}
+
+/* next state generating function */
+void gen_next(mt19937_state *state) {
+ int num;
+ unsigned long y;
+ static unsigned long mag02[2] = {0x0ul, MATRIX_A};
+
+ num = state->pos;
+ if (num < N - M) {
+ y = (state->key[num] & UPPER_MASK) | (state->key[num + 1] & LOWER_MASK);
+ state->key[num] = state->key[num + M] ^ (y >> 1) ^ mag02[y % 2];
+ state->pos++;
+ } else if (num < N - 1) {
+ y = (state->key[num] & UPPER_MASK) | (state->key[num + 1] & LOWER_MASK);
+ state->key[num] = state->key[num + (M - N)] ^ (y >> 1) ^ mag02[y % 2];
+ state->pos++;
+ } else if (num == N - 1) {
+ y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK);
+ state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ mag02[y % 2];
+ state->pos = 0;
+ }
+}
+
+void add_state(mt19937_state *state1, mt19937_state *state2) {
+ int i, pt1 = state1->pos, pt2 = state2->pos;
+
+ if (pt2 - pt1 >= 0) {
+ for (i = 0; i < N - pt2; i++)
+ state1->key[i + pt1] ^= state2->key[i + pt2];
+ for (; i < N - pt1; i++)
+ state1->key[i + pt1] ^= state2->key[i + (pt2 - N)];
+ for (; i < N; i++)
+ state1->key[i + (pt1 - N)] ^= state2->key[i + (pt2 - N)];
+ } else {
+ for (i = 0; i < N - pt1; i++)
+ state1->key[i + pt1] ^= state2->key[i + pt2];
+ for (; i < N - pt2; i++)
+ state1->key[i + (pt1 - N)] ^= state2->key[i + pt2];
+ for (; i < N; i++)
+ state1->key[i + (pt1 - N)] ^= state2->key[i + (pt2 - N)];
+ }
+}
+
+/*
+void gen_vec_h(mt19937_state *state, mt19937_state *vec_h,
+ unsigned long *h) {
+ int i;
+ unsigned long k, g;
+ mt19937_state v;
+
+ gray_code(h);
+
+ copy_state(&vec_h[0], state);
+
+ for (i = 0; i < QQ; i++)
+ gen_next(&vec_h[0]);
+
+ for (i = 1; i < LL; i++) {
+ copy_state(&v, state);
+ g = h[i] ^ h[i - 1];
+ for (k = 1; k < g; k = (k << 1))
+ gen_next(&v);
+ copy_state(&vec_h[h[i]], &vec_h[h[i - 1]]);
+ add_state(&vec_h[h[i]], &v);
+ }
+}
+*/
+
+/* compute pf(ss) using Sliding window algorithm */
+/*
+void calc_state(unsigned long *pf, mt19937_state *state,
+ mt19937_state *vec_h) {
+ mt19937_state *temp1;
+ int i = MEXP - 1, j, digit, skip = 0;
+
+ temp1 = (mt19937_state *)calloc(1, sizeof(mt19937_state));
+
+ while (get_coef(pf, i) == 0)
+ i--;
+
+ for (; i >= QQ; i--) {
+ if (get_coef(pf, i) != 0) {
+ for (j = 0; j < QQ + 1; j++)
+ gen_next(temp1);
+ digit = 0;
+ for (j = 0; j < QQ; j++)
+ digit = (digit << 1) ^ get_coef(pf, i - j - 1);
+ add_state(temp1, &vec_h[digit]);
+ i -= QQ;
+ } else
+ gen_next(temp1);
+ }
+
+ for (; i > -1; i--) {
+ gen_next(temp1);
+ if (get_coef(pf, i) == 1)
+ add_state(temp1, state);
+ else
+ ;
+ }
+
+ copy_state(state, temp1);
+ free(temp1);
+}
+*/
+
+/* compute pf(ss) using standard Horner method */
+void horner1(unsigned long *pf, mt19937_state *state) {
+ int i = MEXP - 1;
+ mt19937_state *temp;
+
+ temp = (mt19937_state *)calloc(1, sizeof(mt19937_state));
+
+ while (get_coef(pf, i) == 0)
+ i--;
+
+ if (i > 0) {
+ copy_state(temp, state);
+ gen_next(temp);
+ i--;
+ for (; i > 0; i--) {
+ if (get_coef(pf, i) != 0)
+ add_state(temp, state);
+ else
+ ;
+ gen_next(temp);
+ }
+ if (get_coef(pf, 0) != 0)
+ add_state(temp, state);
+ else
+ ;
+ } else if (i == 0)
+ copy_state(temp, state);
+ else
+ ;
+
+ copy_state(state, temp);
+ free(temp);
+}
+
+void mt19937_jump_state(mt19937_state *state, const char *jump_str) {
+ unsigned long *pf;
+ int i;
+
+ pf = (unsigned long *)calloc(P_SIZE, sizeof(unsigned long));
+
+ for (i = MEXP - 1; i > -1; i--) {
+ if (jump_str[i] == '1')
+ set_coef(pf, i, 1);
+ }
+ /* TODO: Should generate the next set and start from 0, but doesn't matter ??
+ */
+ if (state->pos >= N) {
+ state->pos = 0;
+ }
+
+ horner1(pf, state);
+
+ free(pf);
+}
+/*
+void mt19937_jump(mt19937_state *state, const char *jump_str)
+{
+ unsigned long h[LL];
+ mt19937_state vec_h[LL];
+ unsigned long *pf;
+ int i;
+
+ pf = (unsigned long *)calloc(P_SIZE, sizeof(unsigned long));
+
+ for (i = MEXP - 1; i > -1; i--)
+ {
+ if (jump_str[i] == '1')
+ set_coef(pf, i, 1);
+ }
+
+ gen_vec_h(state, &vec_h, &h);
+ calc_state(pf, state, &vec_h);
+
+ free(pf);
+}
+*/ \ No newline at end of file
diff --git a/numpy/random/src/mt19937/mt19937-jump.h b/numpy/random/src/mt19937/mt19937-jump.h
new file mode 100644
index 000000000..394c150a0
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937-jump.h
@@ -0,0 +1,15 @@
+#pragma once
+#include "mt19937.h"
+#include <stdlib.h>
+
+/* parameters for computing Jump */
+#define W_SIZE 32 /* size of unsigned long */
+#define MEXP 19937
+#define P_SIZE ((MEXP / W_SIZE) + 1)
+#define LSB 0x00000001UL
+#define QQ 7
+#define LL 128 /* LL = 2^(QQ) */
+
+void mt19937_jump_state(mt19937_state *state, const char *jump_str);
+
+void set_coef(unsigned long *pf, unsigned int deg, unsigned long v); \ No newline at end of file
diff --git a/numpy/random/src/mt19937/mt19937-poly.h b/numpy/random/src/mt19937/mt19937-poly.h
new file mode 100644
index 000000000..b03747881
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937-poly.h
@@ -0,0 +1,207 @@
+static const char * poly =
+"0001000111110111011100100010101111000000010100100101000001110111100010101000110100101001011001010"
+"1110101101100101011100101101011001110011100011110100001000001011100101100010100000010011101110011"
+"0100001001111010000100100101001011100111101101001100000111001000011101100100010000001111110100010"
+"0000111101000101000101101111001011000011001001001011010011001000001000011100100010110101111111101"
+"0010001001100010011011101111101110111010111000010000011010110011111101100000100100101001010000001"
+"1001111000011010011101001101011000111001110010110000011000110101111010110011011000001110110010001"
+"1001101011011101000011001011111111100011001010111100000001111011111101000101000011000011111100101"
+"0100001111101010101100000110100110010010101011011100110011000101100101011110010101110000101011100"
+"0001010100010110100000111001100000011101011001101000001000101101010100010101100000100011110110011"
+"0101100110111101010111100010100110100011111011100111000001110110010000000100000110101010111001111"
+"0011110010000110101101010001110010100111111111100100101010010011101111011000010111101001110110110"
+"1011101101101100110111000100101100111001011111110101001000011111010011000111110011100100001101111"
+"1001010110110001000100001001000010000000001011011100101010010100011000110101001000010101100111101"
+"0011110101100110111100000111001011011001100101111011000101001011011111110110100010001100101001100"
+"1111110011111111110111011011100011000100110011011011011001101011100110010001111100001111100100001"
+"1000100011001010100101010100111110001100111111011111100100011110011101101000110100101110010111111"
+"1001010110000101001110010110001011011010101111111001110001100100011001000010111001011011000111100"
+"1101001011110111111010011000110100001010000000101010101001111101111110101111110101110101010010100"
+"1100100101010110011111001101100110001011000101010001000110011011111101111110001100000010110110101"
+"1111110100001011101011101110111101100001111000011100000110110100100100100101011000111000100110001"
+"0110110001001000111110101111000000100100010100100101101111100011010100111101110010000001011111111"
+"1101010000011001010101111001111110001111100010100010100001011001110001010010100001011111110110111"
+"1100100100001111000111110111000100010101010110100111100001011001101001111101001110010110110011010"
+"1000010011000110000000110110110000111010010000111001100010100101010101111100010111000000011101110"
+"1100011010110001101100110000001010001100111101101011100111110111000110010011011011001101001111100"
+"1011111001100011010110101111100110111101011100000011000010001010001101001011000001111000101000100"
+"0110001011001010110000001101100000011000011110010000101000011010011110001101111111010010101100100"
+"1111010100000011011001111111011011111001101110101010110111110110101000100001011110111010100111100"
+"0000001001111100111111111000100000100100010001011001100001111100100000001111011101100010011000111"
+"0011110110100011011001110011100011011000010000000101101101001010111000010000010101111110000000100"
+"1011010100001001000011001100011000000111100111100101010100000111000000110111011101011111100010101"
+"0011001100110000010101111001000111001001010100011000110010011011101001001100101100000000111000111"
+"0111111000010010010100000101010010000100101011111111111001100101101010011010100010111001011100011"
+"1001001011010000110000111100010110110100000100110010000010010000001000110010101000110101101100100"
+"0001100001100011110110010000100000100010011001010010110111100011011000101011001100001111110110110"
+"0001100110010100011001101000100001110011011111101001101011110011011011111110111110101110010011001"
+"1000000101100000101100100000100000001011000100100001100100101101010111101010111101010001001010110"
+"0011111011001101001110110010100100000011001001111010001001100101110000000010111101000111111101010"
+"0110101110101110001001110000111110100000101101100110010001111101111011001000101110111010110111110"
+"0011001101011010001011000010000111111111101001011100110101011000000001111000101100011101011011100"
+"1111101110000000000110001110011001101100111111010001110000111110100011000100001100110010000110111"
+"1001011011001111011100000000011011000100000011000010010111000111101000011001001100011010001111000"
+"0011110010100010001101011101010011001100000010101001001101111101000111001110110000000010111101001"
+"1110110011101110111010011100101001010101100100011111100110001111011111110010100000011100110110001"
+"1011100000101000010100011101000010111100101111101100110001010001010000101110000000110100010110011"
+"1111110100101010011010100001100110110110011111110010000100001010011110010110001000000100000111000"
+"0111001010011001000010111001100110100110110101111011110111001001000101010010010011000111110010101"
+"1100110001100101001000010001101010011001110011001110001110010100010000000000000110111001010101000"
+"0111111011011101000111011001011011000101110100010001111100101110000100001011111101111101010011001"
+"0010001100011011101100010010101011001000001001010101100110001111001110011100110111111010110010001"
+"1111111101111001001101101001001010011001110000101000110010111110010110111111000100101000101011010"
+"0000101101101100000110101000101000010001111000100000111110011111111110010010001010001111011001100"
+"0011110111000000111111000100001111101110100010101011001010110110011001010010001011100001010110101"
+"0100000010101101000011001101110010000010110011000101100100000111111100011001110011010011001110000"
+"1110011110000000001001001010100000111001010110001110011100011010010010001110010011001010111100000"
+"1110000101101001011010001001010000111000010011010100001010110000101101110110011000011100111100001"
+"1001000011010001110110111001100100001111110010110010011111000010100000001101110100000000101101000"
+"0011000000100011000111110001000011100111110110000110101111101100011110100111111000000011011110110"
+"1101011010111010010001001101000110110010000010101000000001100100100000001111011001001010110100011"
+"1011000010101111010111000001001100111110000010110010011011110011111001000101111011010011010100001"
+"0110011111100001011111101010010100110001001001001000100010101011011000011100111000110101110000001"
+"1100001111100011110010000101011000010101111010001101010101100001100101100000100100000101011001100"
+"0011001000101010101010100111000100100010101000111111101010000000101010101001000101010100100111001"
+"1001100001010001100110111101010001111010011100000001001110100010010011110100001000011111100010001"
+"0010001000100110101011001110100110101110110110100101111000110101101101001000001110011010110011001"
+"0111111101011011101001111001011100001010110111000001100010110110100011010111011000111010100011000"
+"1111010110001001010000110001000101101100010100000000100001111100000010111001000011000101010100001"
+"0001101100011100010100101110010100000010011011010100000111110110000110101011011010010001110000111"
+"0110101000110101110010011100010010100111001101110110010001101001101101010100001010001110111011011"
+"1010011001010111101001011000100111001110011000000001101000001111001100001100000011001110100110011"
+"0011000110001001010111111111110110111111000111100010010101110000101100101000001010001011010100010"
+"1010010100010011101111100111010010010001110101011110110100001000001001000111001110010001001100100"
+"1100100010001010011011110100000101101011101010110110100100010001110000111010111001111011111001011"
+"0000000000011000100100100111001000101111000000110001011110101111110111100000000100101011000111011"
+"1011010011101000001011001001110001111010000100101101010111001010001000100001000111011010000110111"
+"1010110001001110001100001110011000101100000101100000000110101000000110101100100101110001100100100"
+"0110000110101011100001010001010000011101111011111011011000100100101011110101111000001011110010110"
+"0111011011100111101010110001111011010011111000010111110100001001010001011001000110111100000101011"
+"0010111111010100000110111101001100000100001011101010100011010010000001101100100101001000100011000"
+"0101010111111100100000111011101111100000011011111111010001100011001100101101011110101011101100001"
+"0100010011101111111011000111111101001000101101111001111000101110010111001010101011010111000000101"
+"0110010000010010101111100010111110000000011101001000011111001011111100111100100101100101111010110"
+"1010101001110011111100111110100000111100100000111111000010100001111011111110110010001001000000000"
+"1110100110010111100101111111001010001111001101100001011000111011100010100001000010100000011001000"
+"0000111000110111001001100010111010100111111001111101100101000011001001110011100110101110001101110"
+"1110000010110110010110000111001110110000011011100111000101100101000000001110011011001001111001111"
+"0000101100001000000111100110110000110111111001101001111111010000001011110011011011100100110000110"
+"1001011111101100100111111000000010001110111011010011011101001100000011001010000010101111111010110"
+"0001000100101110101101100001001010100110010000110110100110011001000111011110110011001110111110101"
+"0000011111011011001111010010101011000010011101001011100001010001111001000110000010000101010011111"
+"0110011000001111101001110001101011111111001010010110100001101000000011101000101011101000110101111"
+"0000101110011010010000110100000101100011000100101111100011001111011101001010100111001110100001101"
+"0000110111011000000110011001101011110000101100110110000101100000110110100001001001110001110001001"
+"1100110111111100101001100010010110011011110001000111111111001101111110010000011001011010111101001"
+"1101111110101110110100101100110001101101001010111101101000000011111111100101000101110001000011001"
+"1000111110111011010010101011110110110001010001001001100111111010011101111000000111011000011010011"
+"0111010101001110010100101101000110000110001100010101001110101011010100000110110111111111110011110"
+"0100011110100011001000110101111010000001011011110101001100111100010100101100010000010110011001111"
+"0011011110001110010010100100011111110000110011011100010110110101001110011010101111011001010101011"
+"1001001111001000001100100111000001000110110101100111000101011000000100001000100010011000001110011"
+"0000111100000111001101011111010000010001100000010101101000111100001000010011110000001011001001100"
+"0011011011111011100000111101001011101000010010001001111110010101111010110101101110110111010000101"
+"1100011000000000110110100011010100100010001101010101101110110111111011010110011101011010110101011"
+"1101000000010010011111000000101000110001000011100001101111010101100000100000100111111111100000000"
+"0011100011100101110010111100010111110010101110101000011000111111001110111111000001101101011011111"
+"1100110101001000011111001111000000001010001001010101101000001100111010101100010111001001111100000"
+"1110101101110001011100011101101100001001001011100111100110011101111000100010010001111100001010010"
+"1011001001010100101100010010000110010000101010111111001000011100000000101101110010001101110101001"
+"1110000011100101010000011110000010001000001010110001010000100111001100110001111000100100011100110"
+"1100010011110111001001100000100111001010000000000011100011111111101110010101111010100010000100001"
+"0101101001010111111110000110110010100000001011110100010111110111010000001011110110111000000110010"
+"0001100100111110001100010101000010011111100000100010000101110000111001101100100000011111111100010"
+"1001101101001000001111000100100001010110111011110110001001010001110001001100011001001100000000101"
+"1100011110101101011001100001010110001010000111100000011011011001000010101100010101110011001101110"
+"0000101011010001010011111001011000010101010100110110111110101000111110001000010100000000100010100"
+"1000111111000110110010001111000010101011101101111101011110101111100111111100111101000101000010011"
+"0010111010100010011001000000010111100010000101001011001101100011100001001111010100100110101111111"
+"1000010011110101001010011111111011101001110100001001100010000100001001100101101111011100100011001"
+"1111010001011001111101011110101101000111110101001010011101010010010101001000000000011001100110001"
+"0001000010101010101000010100111000001110000111001110001101111111000010101010111001011101001001011"
+"0011001111011010101110101111110001001100100111010001011000010100000100000001001100000011000011101"
+"1100000110000001011001110000101001010111101000110101000011000000111011100101010000111000010010101"
+"1010100101100001011011011110110011000100100101010011111101000000100001001101000011000101010111101"
+"1110111111100010111000111000010110111010010110000000000100101001000111101101100000000110111011001"
+"0100000000100100011110111011101101101101010110001110100001100001001011000000111111110100011110011"
+"0010000010000000010100110011110000000010000011111000111101011110000000000010101101001100000010010"
+"1011001001101110110011100001100011101001101011110011010001011101000100011111001010100000011111111"
+"1010101100000010001000110000110000101000110100110011100000110010110100011111010001000011100001001"
+"1000101000010111111011100010111000111001010100110000000010011011101010101111000110001000110111011"
+"1011100001100011010001101011010100110110011100000010111001011111110010100110100010001100000011100"
+"0001011001011000101011010000001010011010001011000111000011000011110011111001111010001101011010010"
+"0010010001001001101000101001011011101110001100010001010100010111111001100100000010001111100010111"
+"0100001111001100101001011101010010110010100010001100011010100110000100011010111110001011011001000"
+"1001001111011010010011101110100001111100000110101001010111110001101100110010111010111001011111010"
+"1110111011111110000001110010000010011111000111011011000011000010011110011111111101100101111011100"
+"0101101100000110101110000111111111111010110101010100111000011111011001100000100000101011000101110"
+"1011010010100000000100100000010111101110111001000011111011111110100011010010000110001101111101100"
+"1100010111001011011001011001010100100110100101001000111011011001100011001010010101111001100100110"
+"1000110000111011100101110101101000011001010010100011000001111001110110101101010010110110001100100"
+"0100001011101100111001010001111011010110010010110010110111110001001001111001111010010001010001101"
+"1110100110101100011110100100110111000111010110011000100100110110001101111100111110100001000110000"
+"1110011011001101100101100000001010100011101000010100111011111100011010000110000001011100010000101"
+"0100101000010001110010001100010110011111111101111000011001110111011100110010010100100010001000010"
+"0100001110010000011000110001101011101001110100100011011001000111010101110100110011010111001100001"
+"0100001001101010010111110101110111000000010100111101011010101001000001001000001000101101111000000"
+"0110000101110100001111001101110111011110010111101000100101110111010101001101100001110001101101101"
+"0010101100100101000100100100110111000111000111100111000001100001000111101011000110111110001010000"
+"0100110010001101100011010111000111010111000111110000110000101111101110010110111001011000111010001"
+"1011000010010101010010011001000011010110111011010001001010100111001000010110110110101110000110000"
+"1110110010011001011011000100011101001001000111011100100000000000100001101101000101000100000111001"
+"0011100001100110101011011101110111101111000100100011100001010001011001110010101010001110101101110"
+"1011001110111111111010101101000010111111011011011100011100101010001011011100011111011100101011000"
+"1000110100101000011111010011110000000101101110010000101100001000100000000010010110000000000110011"
+"1000000000001111001001000100000111001110111111001111101100001100111000101100011000100111111110011"
+"1110010101011010111100110010110001010000101111111101001010100010001001111010111000010000010010001"
+"1111111101100100001101011011100001010101000111110111111101011010011111111101000111011001011011000"
+"0000101011100011101110110011101111011110011110010000011001111001110111011011111010011011001110111"
+"0101100111110100000100010110010010101001010100010111000101111001011011001001110010100011101111110"
+"1101011110010101101011010010011111110000011010011101000000010000111010100100111110111000001101010"
+"0101100001111001111010101011110001001010000011010110010100011100100100111110100110000010011111001"
+"0100010011001001010101110111111010011101101100000101011110111010011110001111110100111011110011010"
+"0111001010110101010110000011001010000000101101010101001101011000011011010110101010101111101101100"
+"1100100000111101010111011011011110011001100010010000010100101000111111101011100111010101011000111"
+"1100110010101100010011111100000110011111101011100100001110001100001010101001001100010011001000100"
+"1101101000101101110010000001101001001110101111000110111000011101111110100100110111000000101011110"
+"0001100100001010101001101111001000001100000011010000100101100000001110100010010000110110101010111"
+"1100010100000110011100101010111110010110111100000010110011011001011110111001010011011110010001110"
+"1101110000001011101101011111101011111110110110000111110011101100110100010000100000110100010010110"
+"0011000011000110101001110100111010110000100010110101110111100010110001000111100111001011011110010"
+"0001001110101001101101011010111001001101100011101001011011001110011010001010110100111001111100101"
+"1000111001010010000010111010101110001100110111111000011101001000001010010011101000111001100111110"
+"1110100100100110010111111101010011101111011011111011011010011110100101100001011000001001001010010"
+"1100001000000110110011011101010001011110010001001110110100100001101101001011101010001110111111010"
+"1100011100101000011110111110110011111111100010110010110111010010001111101110011011010110000001000"
+"0010110100010101110100001000010011100110001110001110010100010010010110011100100110010100001110011"
+"1100001011010000001101011011011110100000001110100111001000101000001000001001000010000111010000100"
+"0111100000101010110010111010010101100000001100110101001001000110001110111011110001010010010011000"
+"1100001111101101100001111000101100110010001000111001101101011110100110100011101000011111011010101"
+"0101000011111010010110001001100110110111000100100011011101000010001010110001111001111101110001111"
+"0100100000010111010011111110000101001001011110100100010011101110011010100101100001010000001110100"
+"0011111101111000100110011000011001100100001010110011111100111010100011110100010101011110011001000"
+"0000110000100100001011101110111010001001011110010101111100001111101101111011011110001010000100010"
+"1001100100100100110010010101100110000000100000000111110011100111101001010000010000000000101011100"
+"0011101011100110000001100101010101011111111011010011110010011011001010011101010010100010001011010"
+"1100010011101011010111110100001010100011000011001001011011101111110011001110010001100101011001101"
+"0100010001111111100000101000001011010100011100111011010111001100110110001100110101000011010001010"
+"1011100001001010011110001010100100001101110011101011100100101010001100110011110010001100100001000"
+"0110001001110110010111101011101101010111001010011010101110000010100010000111011000010110011000001"
+"1000110010100001110001100010010000001101111110000010010110100000000000001111110010001110111100001"
+"0100111101000011101110010101011011000101011010111100111111001011110001110011110011011010010111101"
+"1010111011101101000001110111001010011001110010100100100100001010001100101010111001110100000110111"
+"1010000111000011101101100101101001100000011100100111100110010110011100101000111110111000110111110"
+"1101100101011101100111011111111001111000011110111110101100000111000101100100110111000010100101000"
+"0110000011011101111101111000110101011000010111010000111011000000100011101010100111001111101010111"
+"0001110100001000100001011101001010001110100000101100001011101111100111101011111001111100101101111"
+"0101100001110011111110110100110010000011011111101101110110000110110011100110111000111101000010111"
+"0111101011100100000000011101111011000100001000111000000111011010101010110000111111101010110001111"
+"0000110100111101111011001010101110000011001101001101000010011001101011111110111101010111010011100"
+"0101010011001111101111001100101000101000111110111001011111100000001101111011000001001100111111111"
+"1010111101000001111011110010001001001110100111110010000011110000011000000101001100011110110011001"
+"1010101001000010001010110000010011110101011110010111010001010111101100001001100011101001111101001"
+"0110110100111001110011100011111010010010100010111000001100001011010010000100100110101010111001001"
+"0110000101011011011100110111111001010000001001011010101010010001011010111100111010101101000101101"
+"0100100001011101110111111001111111110110111011000101010000010000011111001000100101100100100110110"
+"1100000111110010110011010100000100011111110001110010110001000001001111001101110110110101101010111"
+"0000100111101100010001110010110111100011100101100011";
diff --git a/numpy/random/src/mt19937/mt19937-test-data-gen.c b/numpy/random/src/mt19937/mt19937-test-data-gen.c
new file mode 100644
index 000000000..4f4ec1d64
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937-test-data-gen.c
@@ -0,0 +1,59 @@
+/*
+ * Generate testing csv files
+ *
+ * cl mt19937-test-data-gen.c randomkit.c
+ * -IC:\Anaconda\Lib\site-packages\numpy\core\include -IC:\Anaconda\include
+ * Advapi32.lib Kernel32.lib C:\Anaconda\libs\python36.lib -DRK_NO_WINCRYPT=1
+ *
+ */
+#include "randomkit.h"
+#include <inttypes.h>
+#include <stdio.h>
+
+#define N 1000
+
+int main() {
+ uint64_t sum = 0;
+ uint32_t seed = 0xDEADBEAF;
+ int i;
+ rk_state state;
+ rk_seed(seed, &state);
+ uint64_t store[N];
+ for (i = 0; i < N; i++) {
+ store[i] = (uint64_t)rk_random(&state);
+ }
+
+ FILE *fp;
+ fp = fopen("mt19937-testset-1.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx32 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+
+ seed = 0;
+ rk_seed(seed, &state);
+ for (i = 0; i < N; i++) {
+ store[i] = (uint64_t)rk_random(&state);
+ }
+ fp = fopen("mt19937-testset-2.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx32 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+}
diff --git a/numpy/random/src/mt19937/mt19937.c b/numpy/random/src/mt19937/mt19937.c
new file mode 100644
index 000000000..e5ca9e0cf
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937.c
@@ -0,0 +1,107 @@
+#include "mt19937.h"
+#include "mt19937-jump.h"
+#include "mt19937-poly.h"
+
+void mt19937_seed(mt19937_state *state, uint32_t seed) {
+ int pos;
+ seed &= 0xffffffffUL;
+
+ /* Knuth's PRNG as used in the Mersenne Twister reference implementation */
+ for (pos = 0; pos < RK_STATE_LEN; pos++) {
+ state->key[pos] = seed;
+ seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL;
+ }
+ state->pos = RK_STATE_LEN;
+}
+
+/* initializes mt[RK_STATE_LEN] with a seed */
+static void init_genrand(mt19937_state *state, uint32_t s) {
+ int mti;
+ uint32_t *mt = state->key;
+
+ mt[0] = s & 0xffffffffUL;
+ for (mti = 1; mti < RK_STATE_LEN; mti++) {
+ /*
+ * See Knuth TAOCP Vol2. 3rd Ed. P.106 for multiplier.
+ * In the previous versions, MSBs of the seed affect
+ * only MSBs of the array mt[].
+ * 2002/01/09 modified by Makoto Matsumoto
+ */
+ mt[mti] = (1812433253UL * (mt[mti - 1] ^ (mt[mti - 1] >> 30)) + mti);
+ /* for > 32 bit machines */
+ mt[mti] &= 0xffffffffUL;
+ }
+ state->pos = mti;
+ return;
+}
+
+/*
+ * initialize by an array with array-length
+ * init_key is the array for initializing keys
+ * key_length is its length
+ */
+void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key,
+ int key_length) {
+ /* was signed in the original code. RDH 12/16/2002 */
+ int i = 1;
+ int j = 0;
+ uint32_t *mt = state->key;
+ int k;
+
+ init_genrand(state, 19650218UL);
+ k = (RK_STATE_LEN > key_length ? RK_STATE_LEN : key_length);
+ for (; k; k--) {
+ /* non linear */
+ mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1664525UL)) +
+ init_key[j] + j;
+ /* for > 32 bit machines */
+ mt[i] &= 0xffffffffUL;
+ i++;
+ j++;
+ if (i >= RK_STATE_LEN) {
+ mt[0] = mt[RK_STATE_LEN - 1];
+ i = 1;
+ }
+ if (j >= key_length) {
+ j = 0;
+ }
+ }
+ for (k = RK_STATE_LEN - 1; k; k--) {
+ mt[i] = (mt[i] ^ ((mt[i - 1] ^ (mt[i - 1] >> 30)) * 1566083941UL)) -
+ i; /* non linear */
+ mt[i] &= 0xffffffffUL; /* for WORDSIZE > 32 machines */
+ i++;
+ if (i >= RK_STATE_LEN) {
+ mt[0] = mt[RK_STATE_LEN - 1];
+ i = 1;
+ }
+ }
+
+ mt[0] = 0x80000000UL; /* MSB is 1; assuring non-zero initial array */
+}
+
+void mt19937_gen(mt19937_state *state) {
+ uint32_t y;
+ int i;
+
+ for (i = 0; i < N - M; i++) {
+ y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK);
+ state->key[i] = state->key[i + M] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
+ }
+ for (; i < N - 1; i++) {
+ y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK);
+ state->key[i] = state->key[i + (M - N)] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
+ }
+ y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK);
+ state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
+
+ state->pos = 0;
+}
+
+extern inline uint64_t mt19937_next64(mt19937_state *state);
+
+extern inline uint32_t mt19937_next32(mt19937_state *state);
+
+extern inline double mt19937_next_double(mt19937_state *state);
+
+void mt19937_jump(mt19937_state *state) { mt19937_jump_state(state, poly); }
diff --git a/numpy/random/src/mt19937/mt19937.h b/numpy/random/src/mt19937/mt19937.h
new file mode 100644
index 000000000..1b39e0b64
--- /dev/null
+++ b/numpy/random/src/mt19937/mt19937.h
@@ -0,0 +1,61 @@
+#pragma once
+#include <math.h>
+#include <stdint.h>
+
+#ifdef _WIN32
+#define inline __forceinline
+#endif
+
+#define RK_STATE_LEN 624
+
+#define N 624
+#define M 397
+#define MATRIX_A 0x9908b0dfUL
+#define UPPER_MASK 0x80000000UL
+#define LOWER_MASK 0x7fffffffUL
+
+typedef struct s_mt19937_state {
+ uint32_t key[RK_STATE_LEN];
+ int pos;
+} mt19937_state;
+
+extern void mt19937_seed(mt19937_state *state, uint32_t seed);
+
+extern void mt19937_gen(mt19937_state *state);
+
+/* Slightly optimized reference implementation of the Mersenne Twister */
+static inline uint32_t mt19937_next(mt19937_state *state) {
+ uint32_t y;
+
+ if (state->pos == RK_STATE_LEN) {
+ // Move to function to help inlining
+ mt19937_gen(state);
+ }
+ y = state->key[state->pos++];
+
+ /* Tempering */
+ y ^= (y >> 11);
+ y ^= (y << 7) & 0x9d2c5680UL;
+ y ^= (y << 15) & 0xefc60000UL;
+ y ^= (y >> 18);
+
+ return y;
+}
+
+extern void mt19937_init_by_array(mt19937_state *state, uint32_t *init_key,
+ int key_length);
+
+static inline uint64_t mt19937_next64(mt19937_state *state) {
+ return (uint64_t)mt19937_next(state) << 32 | mt19937_next(state);
+}
+
+static inline uint32_t mt19937_next32(mt19937_state *state) {
+ return mt19937_next(state);
+}
+
+static inline double mt19937_next_double(mt19937_state *state) {
+ int32_t a = mt19937_next(state) >> 5, b = mt19937_next(state) >> 6;
+ return (a * 67108864.0 + b) / 9007199254740992.0;
+}
+
+void mt19937_jump(mt19937_state *state);
diff --git a/numpy/random/src/mt19937/randomkit.c b/numpy/random/src/mt19937/randomkit.c
new file mode 100644
index 000000000..f8ed4b49e
--- /dev/null
+++ b/numpy/random/src/mt19937/randomkit.c
@@ -0,0 +1,578 @@
+/* Random kit 1.3 */
+
+/*
+ * Copyright (c) 2003-2005, Jean-Sebastien Roy (js@jeannot.org)
+ *
+ * The rk_random and rk_seed functions algorithms and the original design of
+ * the Mersenne Twister RNG:
+ *
+ * Copyright (C) 1997 - 2002, Makoto Matsumoto and Takuji Nishimura,
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. The names of its contributors may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+ * OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Original algorithm for the implementation of rk_interval function from
+ * Richard J. Wagner's implementation of the Mersenne Twister RNG, optimised by
+ * Magnus Jonsson.
+ *
+ * Constants used in the rk_double implementation by Isaku Wada.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/* static char const rcsid[] =
+ "@(#) $Jeannot: randomkit.c,v 1.28 2005/07/21 22:14:09 js Exp $"; */
+
+#ifdef _WIN32
+/*
+ * Windows
+ * XXX: we have to use this ugly defined(__GNUC__) because it is not easy to
+ * detect the compiler used in distutils itself
+ */
+#if (defined(__GNUC__) && defined(NPY_NEEDS_MINGW_TIME_WORKAROUND))
+
+/*
+ * FIXME: ideally, we should set this to the real version of MSVCRT. We need
+ * something higher than 0x601 to enable _ftime64 and co
+ */
+#define __MSVCRT_VERSION__ 0x0700
+#include <sys/timeb.h>
+#include <time.h>
+
+/*
+ * mingw msvcr lib import wrongly export _ftime, which does not exist in the
+ * actual msvc runtime for version >= 8; we make it an alias to _ftime64, which
+ * is available in those versions of the runtime
+ */
+#define _FTIME(x) _ftime64((x))
+#else
+#include <sys/timeb.h>
+#include <time.h>
+
+#define _FTIME(x) _ftime((x))
+#endif
+
+#ifndef RK_NO_WINCRYPT
+/* Windows crypto */
+#ifndef _WIN32_WINNT
+#define _WIN32_WINNT 0x0400
+#endif
+#include <wincrypt.h>
+#include <windows.h>
+
+#endif
+
+/*
+ * Do not move this include. randomkit.h must be included
+ * after windows timeb.h is included.
+ */
+#include "randomkit.h"
+
+#else
+/* Unix */
+#include "randomkit.h"
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#endif
+
+#include <assert.h>
+#include <errno.h>
+#include <limits.h>
+#include <math.h>
+#include <stddef.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#ifndef RK_DEV_URANDOM
+#define RK_DEV_URANDOM "/dev/urandom"
+#endif
+
+#ifndef RK_DEV_RANDOM
+#define RK_DEV_RANDOM "/dev/random"
+#endif
+
+char *rk_strerror[RK_ERR_MAX] = {"no error", "random device unvavailable"};
+
+/* static functions */
+static unsigned long rk_hash(unsigned long key);
+
+void rk_seed(unsigned long seed, rk_state *state) {
+ int pos;
+ seed &= 0xffffffffUL;
+
+ /* Knuth's PRNG as used in the Mersenne Twister reference implementation */
+ for (pos = 0; pos < RK_STATE_LEN; pos++) {
+ state->key[pos] = seed;
+ seed = (1812433253UL * (seed ^ (seed >> 30)) + pos + 1) & 0xffffffffUL;
+ }
+ state->pos = RK_STATE_LEN;
+ state->gauss = 0;
+ state->has_gauss = 0;
+ state->has_binomial = 0;
+}
+
+/* Thomas Wang 32 bits integer hash function */
+unsigned long rk_hash(unsigned long key) {
+ key += ~(key << 15);
+ key ^= (key >> 10);
+ key += (key << 3);
+ key ^= (key >> 6);
+ key += ~(key << 11);
+ key ^= (key >> 16);
+ return key;
+}
+
+rk_error rk_randomseed(rk_state *state) {
+#ifndef _WIN32
+ struct timeval tv;
+#else
+ struct _timeb tv;
+#endif
+ int i;
+
+ if (rk_devfill(state->key, sizeof(state->key), 0) == RK_NOERR) {
+ /* ensures non-zero key */
+ state->key[0] |= 0x80000000UL;
+ state->pos = RK_STATE_LEN;
+ state->gauss = 0;
+ state->has_gauss = 0;
+ state->has_binomial = 0;
+
+ for (i = 0; i < 624; i++) {
+ state->key[i] &= 0xffffffffUL;
+ }
+ return RK_NOERR;
+ }
+
+#ifndef _WIN32
+ gettimeofday(&tv, NULL);
+ rk_seed(rk_hash(getpid()) ^ rk_hash(tv.tv_sec) ^ rk_hash(tv.tv_usec) ^
+ rk_hash(clock()),
+ state);
+#else
+ _FTIME(&tv);
+ rk_seed(rk_hash(tv.time) ^ rk_hash(tv.millitm) ^ rk_hash(clock()), state);
+#endif
+
+ return RK_ENODEV;
+}
+
+/* Magic Mersenne Twister constants */
+#define N 624
+#define M 397
+#define MATRIX_A 0x9908b0dfUL
+#define UPPER_MASK 0x80000000UL
+#define LOWER_MASK 0x7fffffffUL
+
+/*
+ * Slightly optimised reference implementation of the Mersenne Twister
+ * Note that regardless of the precision of long, only 32 bit random
+ * integers are produced
+ */
+unsigned long rk_random(rk_state *state) {
+ unsigned long y;
+
+ if (state->pos == RK_STATE_LEN) {
+ int i;
+
+ for (i = 0; i < N - M; i++) {
+ y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK);
+ state->key[i] = state->key[i + M] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
+ }
+ for (; i < N - 1; i++) {
+ y = (state->key[i] & UPPER_MASK) | (state->key[i + 1] & LOWER_MASK);
+ state->key[i] =
+ state->key[i + (M - N)] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
+ }
+ y = (state->key[N - 1] & UPPER_MASK) | (state->key[0] & LOWER_MASK);
+ state->key[N - 1] = state->key[M - 1] ^ (y >> 1) ^ (-(y & 1) & MATRIX_A);
+
+ state->pos = 0;
+ }
+ y = state->key[state->pos++];
+
+ /* Tempering */
+ y ^= (y >> 11);
+ y ^= (y << 7) & 0x9d2c5680UL;
+ y ^= (y << 15) & 0xefc60000UL;
+ y ^= (y >> 18);
+
+ return y;
+}
+
+/*
+ * Returns an unsigned 64 bit random integer.
+ */
+NPY_INLINE static npy_uint64 rk_uint64(rk_state *state) {
+ npy_uint64 upper = (npy_uint64)rk_random(state) << 32;
+ npy_uint64 lower = (npy_uint64)rk_random(state);
+ return upper | lower;
+}
+
+/*
+ * Returns an unsigned 32 bit random integer.
+ */
+NPY_INLINE static npy_uint32 rk_uint32(rk_state *state) {
+ return (npy_uint32)rk_random(state);
+}
+
+/*
+ * Fills an array with cnt random npy_uint64 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void rk_random_uint64(npy_uint64 off, npy_uint64 rng, npy_intp cnt,
+ npy_uint64 *out, rk_state *state) {
+ npy_uint64 val, mask = rng;
+ npy_intp i;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+ mask |= mask >> 32;
+
+ for (i = 0; i < cnt; i++) {
+ if (rng <= 0xffffffffUL) {
+ while ((val = (rk_uint32(state) & mask)) > rng)
+ ;
+ } else {
+ while ((val = (rk_uint64(state) & mask)) > rng)
+ ;
+ }
+ out[i] = off + val;
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_uint32 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void rk_random_uint32(npy_uint32 off, npy_uint32 rng, npy_intp cnt,
+ npy_uint32 *out, rk_state *state) {
+ npy_uint32 val, mask = rng;
+ npy_intp i;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+
+ for (i = 0; i < cnt; i++) {
+ while ((val = (rk_uint32(state) & mask)) > rng)
+ ;
+ out[i] = off + val;
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_uint16 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void rk_random_uint16(npy_uint16 off, npy_uint16 rng, npy_intp cnt,
+ npy_uint16 *out, rk_state *state) {
+ npy_uint16 val, mask = rng;
+ npy_intp i;
+ npy_uint32 buf;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+
+ for (i = 0; i < cnt; i++) {
+ do {
+ if (!bcnt) {
+ buf = rk_uint32(state);
+ bcnt = 1;
+ } else {
+ buf >>= 16;
+ bcnt--;
+ }
+ val = (npy_uint16)buf & mask;
+ } while (val > rng);
+ out[i] = off + val;
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_uint8 between off and off + rng
+ * inclusive. The numbers wrap if rng is sufficiently large.
+ */
+void rk_random_uint8(npy_uint8 off, npy_uint8 rng, npy_intp cnt, npy_uint8 *out,
+ rk_state *state) {
+ npy_uint8 val, mask = rng;
+ npy_intp i;
+ npy_uint32 buf;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+
+ for (i = 0; i < cnt; i++) {
+ do {
+ if (!bcnt) {
+ buf = rk_uint32(state);
+ bcnt = 3;
+ } else {
+ buf >>= 8;
+ bcnt--;
+ }
+ val = (npy_uint8)buf & mask;
+ } while (val > rng);
+ out[i] = off + val;
+ }
+}
+
+/*
+ * Fills an array with cnt random npy_bool between off and off + rng
+ * inclusive.
+ */
+void rk_random_bool(npy_bool off, npy_bool rng, npy_intp cnt, npy_bool *out,
+ rk_state *state) {
+ npy_intp i;
+ npy_uint32 buf;
+ int bcnt = 0;
+
+ if (rng == 0) {
+ for (i = 0; i < cnt; i++) {
+ out[i] = off;
+ }
+ return;
+ }
+
+ /* If we reach here rng and mask are one and off is zero */
+ assert(rng == 1 && off == 0);
+ for (i = 0; i < cnt; i++) {
+ if (!bcnt) {
+ buf = rk_uint32(state);
+ bcnt = 31;
+ } else {
+ buf >>= 1;
+ bcnt--;
+ }
+ out[i] = (buf & 0x00000001) != 0;
+ }
+}
+
+long rk_long(rk_state *state) { return rk_ulong(state) >> 1; }
+
+unsigned long rk_ulong(rk_state *state) {
+#if ULONG_MAX <= 0xffffffffUL
+ return rk_random(state);
+#else
+ return (rk_random(state) << 32) | (rk_random(state));
+#endif
+}
+
+unsigned long rk_interval(unsigned long max, rk_state *state) {
+ unsigned long mask = max, value;
+
+ if (max == 0) {
+ return 0;
+ }
+ /* Smallest bit mask >= max */
+ mask |= mask >> 1;
+ mask |= mask >> 2;
+ mask |= mask >> 4;
+ mask |= mask >> 8;
+ mask |= mask >> 16;
+#if ULONG_MAX > 0xffffffffUL
+ mask |= mask >> 32;
+#endif
+
+ /* Search a random value in [0..mask] <= max */
+#if ULONG_MAX > 0xffffffffUL
+ if (max <= 0xffffffffUL) {
+ while ((value = (rk_random(state) & mask)) > max)
+ ;
+ } else {
+ while ((value = (rk_ulong(state) & mask)) > max)
+ ;
+ }
+#else
+ while ((value = (rk_ulong(state) & mask)) > max)
+ ;
+#endif
+ return value;
+}
+
+double rk_double(rk_state *state) {
+ /* shifts : 67108864 = 0x4000000, 9007199254740992 = 0x20000000000000 */
+ long a = rk_random(state) >> 5, b = rk_random(state) >> 6;
+ return (a * 67108864.0 + b) / 9007199254740992.0;
+}
+
+void rk_fill(void *buffer, size_t size, rk_state *state) {
+ unsigned long r;
+ unsigned char *buf = buffer;
+
+ for (; size >= 4; size -= 4) {
+ r = rk_random(state);
+ *(buf++) = r & 0xFF;
+ *(buf++) = (r >> 8) & 0xFF;
+ *(buf++) = (r >> 16) & 0xFF;
+ *(buf++) = (r >> 24) & 0xFF;
+ }
+
+ if (!size) {
+ return;
+ }
+ r = rk_random(state);
+ for (; size; r >>= 8, size--) {
+ *(buf++) = (unsigned char)(r & 0xFF);
+ }
+}
+
+rk_error rk_devfill(void *buffer, size_t size, int strong) {
+#ifndef _WIN32
+ FILE *rfile;
+ int done;
+
+ if (strong) {
+ rfile = fopen(RK_DEV_RANDOM, "rb");
+ } else {
+ rfile = fopen(RK_DEV_URANDOM, "rb");
+ }
+ if (rfile == NULL) {
+ return RK_ENODEV;
+ }
+ done = fread(buffer, size, 1, rfile);
+ fclose(rfile);
+ if (done) {
+ return RK_NOERR;
+ }
+#else
+
+#ifndef RK_NO_WINCRYPT
+ HCRYPTPROV hCryptProv;
+ BOOL done;
+
+ if (!CryptAcquireContext(&hCryptProv, NULL, NULL, PROV_RSA_FULL,
+ CRYPT_VERIFYCONTEXT) ||
+ !hCryptProv) {
+ return RK_ENODEV;
+ }
+ done = CryptGenRandom(hCryptProv, size, (unsigned char *)buffer);
+ CryptReleaseContext(hCryptProv, 0);
+ if (done) {
+ return RK_NOERR;
+ }
+#endif
+
+#endif
+ return RK_ENODEV;
+}
+
+rk_error rk_altfill(void *buffer, size_t size, int strong, rk_state *state) {
+ rk_error err;
+
+ err = rk_devfill(buffer, size, strong);
+ if (err) {
+ rk_fill(buffer, size, state);
+ }
+ return err;
+}
+
+double rk_gauss(rk_state *state) {
+ if (state->has_gauss) {
+ const double tmp = state->gauss;
+ state->gauss = 0;
+ state->has_gauss = 0;
+ return tmp;
+ } else {
+ double f, x1, x2, r2;
+
+ do {
+ x1 = 2.0 * rk_double(state) - 1.0;
+ x2 = 2.0 * rk_double(state) - 1.0;
+ r2 = x1 * x1 + x2 * x2;
+ } while (r2 >= 1.0 || r2 == 0.0);
+
+ /* Polar method, a more efficient version of the Box-Muller approach. */
+ f = sqrt(-2.0 * log(r2) / r2);
+ /* Keep for next call */
+ state->gauss = f * x1;
+ state->has_gauss = 1;
+ return f * x2;
+ }
+}
diff --git a/numpy/random/mtrand/randomkit.h b/numpy/random/src/mt19937/randomkit.h
index a24dabebf..abb082cb2 100644
--- a/numpy/random/mtrand/randomkit.h
+++ b/numpy/random/src/mt19937/randomkit.h
@@ -59,50 +59,47 @@
#ifndef _RANDOMKIT_
#define _RANDOMKIT_
-#include <stddef.h>
#include <numpy/npy_common.h>
-
+#include <stddef.h>
#define RK_STATE_LEN 624
-typedef struct rk_state_
-{
- unsigned long key[RK_STATE_LEN];
- int pos;
- int has_gauss; /* !=0: gauss contains a gaussian deviate */
- double gauss;
-
- /* The rk_state structure has been extended to store the following
- * information for the binomial generator. If the input values of n or p
- * are different than nsave and psave, then the other parameters will be
- * recomputed. RTK 2005-09-02 */
-
- int has_binomial; /* !=0: following parameters initialized for
- binomial */
- double psave;
- long nsave;
- double r;
- double q;
- double fm;
- long m;
- double p1;
- double xm;
- double xl;
- double xr;
- double c;
- double laml;
- double lamr;
- double p2;
- double p3;
- double p4;
-
-}
-rk_state;
+typedef struct rk_state_ {
+ unsigned long key[RK_STATE_LEN];
+ int pos;
+ int has_gauss; /* !=0: gauss contains a gaussian deviate */
+ double gauss;
+
+ /* The rk_state structure has been extended to store the following
+ * information for the binomial generator. If the input values of n or p
+ * are different than nsave and psave, then the other parameters will be
+ * recomputed. RTK 2005-09-02 */
+
+ int has_binomial; /* !=0: following parameters initialized for
+ binomial */
+ double psave;
+ long nsave;
+ double r;
+ double q;
+ double fm;
+ long m;
+ double p1;
+ double xm;
+ double xl;
+ double xr;
+ double c;
+ double laml;
+ double lamr;
+ double p2;
+ double p3;
+ double p4;
+
+} rk_state;
typedef enum {
- RK_NOERR = 0, /* no error */
- RK_ENODEV = 1, /* no RK_DEV_RANDOM device */
- RK_ERR_MAX = 2
+ RK_NOERR = 0, /* no error */
+ RK_ENODEV = 1, /* no RK_DEV_RANDOM device */
+ RK_ERR_MAX = 2
} rk_error;
/* error strings */
@@ -212,7 +209,7 @@ extern rk_error rk_devfill(void *buffer, size_t size, int strong);
* Returns RK_ENODEV if the device is unavailable, or RK_NOERR if it is
*/
extern rk_error rk_altfill(void *buffer, size_t size, int strong,
- rk_state *state);
+ rk_state *state);
/*
* return a random gaussian deviate with variance unity and zero mean.
diff --git a/numpy/random/src/pcg64/LICENSE.md b/numpy/random/src/pcg64/LICENSE.md
new file mode 100644
index 000000000..7aac7a51c
--- /dev/null
+++ b/numpy/random/src/pcg64/LICENSE.md
@@ -0,0 +1,22 @@
+# PCG64
+
+## The MIT License
+
+PCG Random Number Generation for C.
+
+Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/numpy/random/src/pcg64/pcg64-benchmark.c b/numpy/random/src/pcg64/pcg64-benchmark.c
new file mode 100644
index 000000000..76f3ec78c
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64-benchmark.c
@@ -0,0 +1,42 @@
+/*
+ * cl pcg64-benchmark.c pcg64.c ../splitmix64/splitmix64.c /Ox
+ * Measure-Command { .\xoroshiro128-benchmark.exe }
+ *
+ * gcc pcg64-benchmark.c pcg64.c ../splitmix64/splitmix64.c -O3 -o
+ * pcg64-benchmark
+ * time ./pcg64-benchmark
+ */
+#include "../splitmix64/splitmix64.h"
+#include "pcg64.h"
+#include <inttypes.h>
+#include <stdio.h>
+#include <time.h>
+
+#define N 1000000000
+
+int main() {
+ pcg64_random_t rng;
+ uint64_t sum = 0, count = 0;
+ uint64_t seed = 0xDEADBEAF;
+ int i;
+#if __SIZEOF_INT128__ && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+ rng.state = (__uint128_t)splitmix64_next(&seed) << 64;
+ rng.state |= splitmix64_next(&seed);
+ rng.inc = (__uint128_t)1;
+#else
+ rng.state.high = splitmix64_next(&seed);
+ rng.state.low = splitmix64_next(&seed);
+ rng.inc.high = 0;
+ rng.inc.low = 1;
+#endif
+ clock_t begin = clock();
+ for (i = 0; i < N; i++) {
+ sum += pcg64_random_r(&rng);
+ count++;
+ }
+ clock_t end = clock();
+ double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
+ printf("0x%" PRIx64 "\ncount: %" PRIu64 "\n", sum, count);
+ printf("%" PRIu64 " randoms per second\n",
+ (uint64_t)(N / time_spent) / 1000000 * 1000000);
+}
diff --git a/numpy/random/src/pcg64/pcg64-test-data-gen.c b/numpy/random/src/pcg64/pcg64-test-data-gen.c
new file mode 100644
index 000000000..0c2b079a3
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64-test-data-gen.c
@@ -0,0 +1,73 @@
+/*
+ * Generate testing csv files
+ *
+ * GCC only
+ *
+ * gcc pcg64-test-data-gen.c pcg64.orig.c ../splitmix64/splitmix64.c -o
+ * pgc64-test-data-gen
+ */
+
+#include "pcg64.orig.h"
+#include <inttypes.h>
+#include <stdio.h>
+
+#define N 1000
+
+int main() {
+ pcg64_random_t rng;
+ uint64_t state, seed = 0xDEADBEAF;
+ state = seed;
+ __uint128_t temp, s, inc;
+ int i;
+ uint64_t store[N];
+ s = (__uint128_t)seed;
+ inc = (__uint128_t)0;
+ pcg64_srandom_r(&rng, s, inc);
+ printf("0x%" PRIx64, (uint64_t)(rng.state >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.state);
+ printf("0x%" PRIx64, (uint64_t)(rng.inc >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.inc);
+ for (i = 0; i < N; i++) {
+ store[i] = pcg64_random_r(&rng);
+ }
+
+ FILE *fp;
+ fp = fopen("pcg64-testset-1.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+
+ state = seed = 0;
+ s = (__uint128_t)seed;
+ i = (__uint128_t)0;
+ pcg64_srandom_r(&rng, s, i);
+ printf("0x%" PRIx64, (uint64_t)(rng.state >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.state);
+ printf("0x%" PRIx64, (uint64_t)(rng.inc >> 64));
+ printf("%" PRIx64 "\n", (uint64_t)rng.inc);
+ for (i = 0; i < N; i++) {
+ store[i] = pcg64_random_r(&rng);
+ }
+ fp = fopen("pcg64-testset-2.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+}
diff --git a/numpy/random/src/pcg64/pcg64.c b/numpy/random/src/pcg64/pcg64.c
new file mode 100644
index 000000000..b15973aef
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.c
@@ -0,0 +1,187 @@
+/*
+ * PCG64 Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ * Copyright 2015 Robert Kern <robert.kern@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ *
+ * Relicensed MIT in May 2019
+ *
+ * The MIT License
+ *
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "pcg64.h"
+
+extern inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng);
+extern inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state);
+extern inline void pcg_setseq_128_srandom_r(pcg_state_setseq_128 *rng,
+ pcg128_t initstate,
+ pcg128_t initseq);
+extern inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128 *rng);
+extern inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(pcg_state_setseq_128 *rng,
+ uint64_t bound);
+extern inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng,
+ pcg128_t delta);
+
+/* Multi-step advance functions (jump-ahead, jump-back)
+ *
+ * The method used here is based on Brown, "Random Number Generation
+ * with Arbitrary Stride,", Transactions of the American Nuclear
+ * Society (Nov. 1994). The algorithm is very similar to fast
+ * exponentiation.
+ *
+ * Even though delta is an unsigned integer, we can pass a
+ * signed integer to go backwards, it just goes "the long way round".
+ */
+
+#ifndef PCG_EMULATED_128BIT_MATH
+
+pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult,
+ pcg128_t cur_plus) {
+ pcg128_t acc_mult = 1u;
+ pcg128_t acc_plus = 0u;
+ while (delta > 0) {
+ if (delta & 1) {
+ acc_mult *= cur_mult;
+ acc_plus = acc_plus * cur_mult + cur_plus;
+ }
+ cur_plus = (cur_mult + 1) * cur_plus;
+ cur_mult *= cur_mult;
+ delta /= 2;
+ }
+ return acc_mult * state + acc_plus;
+}
+
+#else
+
+pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta, pcg128_t cur_mult,
+ pcg128_t cur_plus) {
+ pcg128_t acc_mult = PCG_128BIT_CONSTANT(0u, 1u);
+ pcg128_t acc_plus = PCG_128BIT_CONSTANT(0u, 0u);
+ while ((delta.high > 0) || (delta.low > 0)) {
+ if (delta.low & 1) {
+ acc_mult = pcg128_mult(acc_mult, cur_mult);
+ acc_plus = pcg128_add(pcg128_mult(acc_plus, cur_mult), cur_plus);
+ }
+ cur_plus = pcg128_mult(pcg128_add(cur_mult, PCG_128BIT_CONSTANT(0u, 1u)),
+ cur_plus);
+ cur_mult = pcg128_mult(cur_mult, cur_mult);
+ delta.low >>= 1;
+ delta.low += delta.high & 1;
+ delta.high >>= 1;
+ }
+ return pcg128_add(pcg128_mult(acc_mult, state), acc_plus);
+}
+
+#endif
+
+extern inline uint64_t pcg64_next64(pcg64_state *state);
+extern inline uint32_t pcg64_next32(pcg64_state *state);
+
+extern void pcg64_advance(pcg64_state *state, uint64_t *step) {
+ pcg128_t delta;
+#ifndef PCG_EMULATED_128BIT_MATH
+ delta = (((pcg128_t)step[0]) << 64) | step[1];
+#else
+ delta.high = step[0];
+ delta.low = step[1];
+#endif
+ pcg64_advance_r(state->pcg_state, delta);
+}
+
+extern void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc) {
+ pcg128_t s, i;
+#ifndef PCG_EMULATED_128BIT_MATH
+ s = (((pcg128_t)seed[0]) << 64) | seed[1];
+ i = (((pcg128_t)inc[0]) << 64) | inc[1];
+#else
+ s.high = seed[0];
+ s.low = seed[1];
+ i.high = inc[0];
+ i.low = inc[1];
+#endif
+ pcg64_srandom_r(state->pcg_state, s, i);
+}
+
+extern void pcg64_get_state(pcg64_state *state, uint64_t *state_arr,
+ int *has_uint32, uint32_t *uinteger) {
+ /*
+ * state_arr contains state.high, state.low, inc.high, inc.low
+ * which are interpreted as the upper 64 bits (high) or lower
+ * 64 bits of a uint128_t variable
+ *
+ */
+#ifndef PCG_EMULATED_128BIT_MATH
+ state_arr[0] = (uint64_t)(state->pcg_state->state >> 64);
+ state_arr[1] = (uint64_t)(state->pcg_state->state & 0xFFFFFFFFFFFFFFFFULL);
+ state_arr[2] = (uint64_t)(state->pcg_state->inc >> 64);
+ state_arr[3] = (uint64_t)(state->pcg_state->inc & 0xFFFFFFFFFFFFFFFFULL);
+#else
+ state_arr[0] = (uint64_t)state->pcg_state->state.high;
+ state_arr[1] = (uint64_t)state->pcg_state->state.low;
+ state_arr[2] = (uint64_t)state->pcg_state->inc.high;
+ state_arr[3] = (uint64_t)state->pcg_state->inc.low;
+#endif
+ has_uint32[0] = state->has_uint32;
+ uinteger[0] = state->uinteger;
+}
+
+extern void pcg64_set_state(pcg64_state *state, uint64_t *state_arr,
+ int has_uint32, uint32_t uinteger) {
+ /*
+ * state_arr contains state.high, state.low, inc.high, inc.low
+ * which are interpreted as the upper 64 bits (high) or lower
+ * 64 bits of a uint128_t variable
+ *
+ */
+#ifndef PCG_EMULATED_128BIT_MATH
+ state->pcg_state->state = (((pcg128_t)state_arr[0]) << 64) | state_arr[1];
+ state->pcg_state->inc = (((pcg128_t)state_arr[2]) << 64) | state_arr[3];
+#else
+ state->pcg_state->state.high = state_arr[0];
+ state->pcg_state->state.low = state_arr[1];
+ state->pcg_state->inc.high = state_arr[2];
+ state->pcg_state->inc.low = state_arr[3];
+#endif
+ state->has_uint32 = has_uint32;
+ state->uinteger = uinteger;
+}
diff --git a/numpy/random/src/pcg64/pcg64.h b/numpy/random/src/pcg64/pcg64.h
new file mode 100644
index 000000000..2a7217dd9
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.h
@@ -0,0 +1,294 @@
+/*
+ * PCG64 Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ * Copyright 2015 Robert Kern <robert.kern@gmail.com>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ *
+ * Relicensed MIT in May 2019
+ *
+ * The MIT License
+ *
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+ * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+ * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef PCG64_H_INCLUDED
+#define PCG64_H_INCLUDED 1
+
+#include <inttypes.h>
+
+#ifdef _WIN32
+#include <stdlib.h>
+#define inline __forceinline
+#endif
+
+#if __GNUC_GNU_INLINE__ && !defined(__cplusplus)
+#error Nonstandard GNU inlining semantics. Compile with -std=c99 or better.
+#endif
+
+#if __cplusplus
+extern "C" {
+#endif
+
+#if defined(__SIZEOF_INT128__) && !defined(PCG_FORCE_EMULATED_128BIT_MATH)
+typedef __uint128_t pcg128_t;
+#define PCG_128BIT_CONSTANT(high, low) (((pcg128_t)(high) << 64) + low)
+#else
+typedef struct {
+ uint64_t high;
+ uint64_t low;
+} pcg128_t;
+
+static inline pcg128_t PCG_128BIT_CONSTANT(uint64_t high, uint64_t low) {
+ pcg128_t result;
+ result.high = high;
+ result.low = low;
+ return result;
+}
+
+#define PCG_EMULATED_128BIT_MATH 1
+#endif
+
+typedef struct { pcg128_t state; } pcg_state_128;
+
+typedef struct {
+ pcg128_t state;
+ pcg128_t inc;
+} pcg_state_setseq_128;
+
+#define PCG_DEFAULT_MULTIPLIER_HIGH 2549297995355413924ULL
+#define PCG_DEFAULT_MULTIPLIER_LOW 4865540595714422341ULL
+
+#define PCG_DEFAULT_MULTIPLIER_128 \
+ PCG_128BIT_CONSTANT(PCG_DEFAULT_MULTIPLIER_HIGH, PCG_DEFAULT_MULTIPLIER_LOW)
+#define PCG_DEFAULT_INCREMENT_128 \
+ PCG_128BIT_CONSTANT(6364136223846793005ULL, 1442695040888963407ULL)
+#define PCG_STATE_SETSEQ_128_INITIALIZER \
+ { \
+ PCG_128BIT_CONSTANT(0x979c9a98d8462005ULL, 0x7d3e9cb6cfe0549bULL) \
+ , PCG_128BIT_CONSTANT(0x0000000000000001ULL, 0xda3e39cb94b95bdbULL) \
+ }
+
+static inline uint64_t pcg_rotr_64(uint64_t value, unsigned int rot) {
+#ifdef _WIN32
+ return _rotr64(value, rot);
+#else
+ return (value >> rot) | (value << ((-rot) & 63));
+#endif
+}
+
+#ifdef PCG_EMULATED_128BIT_MATH
+
+static inline pcg128_t pcg128_add(pcg128_t a, pcg128_t b) {
+ pcg128_t result;
+
+ result.low = a.low + b.low;
+ result.high = a.high + b.high + (result.low < b.low);
+ return result;
+}
+
+static inline void _pcg_mult64(uint64_t x, uint64_t y, uint64_t *z1,
+ uint64_t *z0) {
+
+#if defined _WIN32 && _MSC_VER >= 1900 && _M_AMD64
+ z0[0] = _umul128(x, y, z1);
+#else
+ uint64_t x0, x1, y0, y1;
+ uint64_t w0, w1, w2, t;
+ /* Lower 64 bits are straightforward clock-arithmetic. */
+ *z0 = x * y;
+
+ x0 = x & 0xFFFFFFFFULL;
+ x1 = x >> 32;
+ y0 = y & 0xFFFFFFFFULL;
+ y1 = y >> 32;
+ w0 = x0 * y0;
+ t = x1 * y0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFULL;
+ w2 = t >> 32;
+ w1 += x0 * y1;
+ *z1 = x1 * y1 + w2 + (w1 >> 32);
+#endif
+}
+
+static inline pcg128_t pcg128_mult(pcg128_t a, pcg128_t b) {
+ uint64_t h1;
+ pcg128_t result;
+
+ h1 = a.high * b.low + a.low * b.high;
+ _pcg_mult64(a.low, b.low, &(result.high), &(result.low));
+ result.high += h1;
+ return result;
+}
+
+static inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng) {
+ rng->state = pcg128_add(pcg128_mult(rng->state, PCG_DEFAULT_MULTIPLIER_128),
+ rng->inc);
+}
+
+static inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(state.high ^ state.low, state.high >> 58u);
+}
+
+static inline void pcg_setseq_128_srandom_r(pcg_state_setseq_128 *rng,
+ pcg128_t initstate,
+ pcg128_t initseq) {
+ rng->state = PCG_128BIT_CONSTANT(0ULL, 0ULL);
+ rng->inc.high = initseq.high << 1u;
+ rng->inc.high |= initseq.low >> 63u;
+ rng->inc.low = (initseq.low << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state = pcg128_add(rng->state, initstate);
+ pcg_setseq_128_step_r(rng);
+}
+
+static inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128 *rng) {
+#if defined _WIN32 && _MSC_VER >= 1900 && _M_AMD64
+ uint64_t h1;
+ pcg128_t product;
+
+ /* Manually inline the multiplication and addition using intrinsics */
+ h1 = rng->state.high * PCG_DEFAULT_MULTIPLIER_LOW +
+ rng->state.low * PCG_DEFAULT_MULTIPLIER_HIGH;
+ product.low =
+ _umul128(rng->state.low, PCG_DEFAULT_MULTIPLIER_LOW, &(product.high));
+ product.high += h1;
+ _addcarry_u64(_addcarry_u64(0, product.low, rng->inc.low, &(rng->state.low)),
+ product.high, rng->inc.high, &(rng->state.high));
+ return _rotr64(rng->state.high ^ rng->state.low, rng->state.high >> 58u);
+#else
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+#endif
+}
+
+#else /* PCG_EMULATED_128BIT_MATH */
+
+static inline void pcg_setseq_128_step_r(pcg_state_setseq_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 + rng->inc;
+}
+
+static inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(((uint64_t)(state >> 64u)) ^ (uint64_t)state,
+ state >> 122u);
+}
+
+static inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(pcg_state_setseq_128* rng)
+{
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+
+static inline void pcg_setseq_128_srandom_r(pcg_state_setseq_128 *rng,
+ pcg128_t initstate,
+ pcg128_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_128_step_r(rng);
+}
+
+#endif /* PCG_EMULATED_128BIT_MATH */
+
+static inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+extern pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta,
+ pcg128_t cur_mult, pcg128_t cur_plus);
+
+static inline void pcg_setseq_128_advance_r(pcg_state_setseq_128 *rng,
+ pcg128_t delta) {
+ rng->state = pcg_advance_lcg_128(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_128, rng->inc);
+}
+
+typedef pcg_state_setseq_128 pcg64_random_t;
+#define pcg64_random_r pcg_setseq_128_xsl_rr_64_random_r
+#define pcg64_boundedrand_r pcg_setseq_128_xsl_rr_64_boundedrand_r
+#define pcg64_srandom_r pcg_setseq_128_srandom_r
+#define pcg64_advance_r pcg_setseq_128_advance_r
+#define PCG64_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+
+#if __cplusplus
+}
+#endif
+
+typedef struct s_pcg64_state {
+ pcg64_random_t *pcg_state;
+ int has_uint32;
+ uint32_t uinteger;
+} pcg64_state;
+
+static inline uint64_t pcg64_next64(pcg64_state *state) {
+ return pcg64_random_r(state->pcg_state);
+}
+
+static inline uint32_t pcg64_next32(pcg64_state *state) {
+ uint64_t next;
+ if (state->has_uint32) {
+ state->has_uint32 = 0;
+ return state->uinteger;
+ }
+ next = pcg64_random_r(state->pcg_state);
+ state->has_uint32 = 1;
+ state->uinteger = (uint32_t)(next >> 32);
+ return (uint32_t)(next & 0xffffffff);
+}
+
+void pcg64_advance(pcg64_state *state, uint64_t *step);
+
+void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc);
+
+void pcg64_get_state(pcg64_state *state, uint64_t *state_arr, int *has_uint32,
+ uint32_t *uinteger);
+
+void pcg64_set_state(pcg64_state *state, uint64_t *state_arr, int has_uint32,
+ uint32_t uinteger);
+
+#endif /* PCG64_H_INCLUDED */
diff --git a/numpy/random/src/pcg64/pcg64.orig.c b/numpy/random/src/pcg64/pcg64.orig.c
new file mode 100644
index 000000000..07e97e4b6
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.orig.c
@@ -0,0 +1,11 @@
+#include "pcg64.orig.h"
+
+extern inline void pcg_setseq_128_srandom_r(pcg64_random_t *rng,
+ pcg128_t initstate,
+ pcg128_t initseq);
+
+extern uint64_t pcg_rotr_64(uint64_t value, unsigned int rot);
+extern inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state);
+extern void pcg_setseq_128_step_r(struct pcg_state_setseq_128 *rng);
+extern uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(struct pcg_state_setseq_128 *rng);
diff --git a/numpy/random/src/pcg64/pcg64.orig.h b/numpy/random/src/pcg64/pcg64.orig.h
new file mode 100644
index 000000000..74be91f31
--- /dev/null
+++ b/numpy/random/src/pcg64/pcg64.orig.h
@@ -0,0 +1,2025 @@
+/*
+ * PCG Random Number Generation for C.
+ *
+ * Copyright 2014 Melissa O'Neill <oneill@pcg-random.org>
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For additional information about the PCG random number generation scheme,
+ * including its license and other licensing options, visit
+ *
+ * http://www.pcg-random.org
+ */
+
+/*
+ * This code is derived from the canonical C++ PCG implementation, which
+ * has many additional features and is preferable if you can use C++ in
+ * your project.
+ *
+ * Much of the derivation was performed mechanically. In particular, the
+ * output functions were generated by compiling the C++ output functions
+ * into LLVM bitcode and then transforming that using the LLVM C backend
+ * (from https://github.com/draperlaboratory/llvm-cbe), and then
+ * postprocessing and hand editing the output.
+ *
+ * Much of the remaining code was generated by C-preprocessor metaprogramming.
+ */
+
+#ifndef PCG_VARIANTS_H_INCLUDED
+#define PCG_VARIANTS_H_INCLUDED 1
+
+#include <inttypes.h>
+
+#if __SIZEOF_INT128__
+typedef __uint128_t pcg128_t;
+#define PCG_128BIT_CONSTANT(high, low) ((((pcg128_t)high) << 64) + low)
+#define PCG_HAS_128BIT_OPS 1
+#endif
+
+#if __GNUC_GNU_INLINE__ && !defined(__cplusplus)
+#error Nonstandard GNU inlining semantics. Compile with -std=c99 or better.
+// We could instead use macros PCG_INLINE and PCG_EXTERN_INLINE
+// but better to just reject ancient C code.
+#endif
+
+#if __cplusplus
+extern "C" {
+#endif
+
+/*
+ * Rotate helper functions.
+ */
+
+inline uint8_t pcg_rotr_8(uint8_t value, unsigned int rot) {
+/* Unfortunately, clang is kinda pathetic when it comes to properly
+ * recognizing idiomatic rotate code, so for clang we actually provide
+ * assembler directives (enabled with PCG_USE_INLINE_ASM). Boo, hiss.
+ */
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorb %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 7));
+#endif
+}
+
+inline uint16_t pcg_rotr_16(uint16_t value, unsigned int rot) {
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorw %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 15));
+#endif
+}
+
+inline uint32_t pcg_rotr_32(uint32_t value, unsigned int rot) {
+#if PCG_USE_INLINE_ASM && __clang__ && (__x86_64__ || __i386__)
+ asm("rorl %%cl, %0" : "=r"(value) : "0"(value), "c"(rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 31));
+#endif
+}
+
+inline uint64_t pcg_rotr_64(uint64_t value, unsigned int rot) {
+#if 0 && PCG_USE_INLINE_ASM && __clang__ && __x86_64__
+ // For whatever reason, clang actually *does* generate rotq by
+ // itself, so we don't need this code.
+ asm ("rorq %%cl, %0" : "=r" (value) : "0" (value), "c" (rot));
+ return value;
+#else
+ return (value >> rot) | (value << ((-rot) & 63));
+#endif
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_rotr_128(pcg128_t value, unsigned int rot) {
+ return (value >> rot) | (value << ((-rot) & 127));
+}
+#endif
+
+/*
+ * Output functions. These are the core of the PCG generation scheme.
+ */
+
+// XSH RS
+
+inline uint8_t pcg_output_xsh_rs_16_8(uint16_t state) {
+ return (uint8_t)(((state >> 7u) ^ state) >> ((state >> 14u) + 3u));
+}
+
+inline uint16_t pcg_output_xsh_rs_32_16(uint32_t state) {
+ return (uint16_t)(((state >> 11u) ^ state) >> ((state >> 30u) + 11u));
+}
+
+inline uint32_t pcg_output_xsh_rs_64_32(uint64_t state) {
+
+ return (uint32_t)(((state >> 22u) ^ state) >> ((state >> 61u) + 22u));
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsh_rs_128_64(pcg128_t state) {
+ return (uint64_t)(((state >> 43u) ^ state) >> ((state >> 124u) + 45u));
+}
+#endif
+
+// XSH RR
+
+inline uint8_t pcg_output_xsh_rr_16_8(uint16_t state) {
+ return pcg_rotr_8(((state >> 5u) ^ state) >> 5u, state >> 13u);
+}
+
+inline uint16_t pcg_output_xsh_rr_32_16(uint32_t state) {
+ return pcg_rotr_16(((state >> 10u) ^ state) >> 12u, state >> 28u);
+}
+
+inline uint32_t pcg_output_xsh_rr_64_32(uint64_t state) {
+ return pcg_rotr_32(((state >> 18u) ^ state) >> 27u, state >> 59u);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsh_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(((state >> 29u) ^ state) >> 58u, state >> 122u);
+}
+#endif
+
+// RXS M XS
+
+inline uint8_t pcg_output_rxs_m_xs_8_8(uint8_t state) {
+ uint8_t word = ((state >> ((state >> 6u) + 2u)) ^ state) * 217u;
+ return (word >> 6u) ^ word;
+}
+
+inline uint16_t pcg_output_rxs_m_xs_16_16(uint16_t state) {
+ uint16_t word = ((state >> ((state >> 13u) + 3u)) ^ state) * 62169u;
+ return (word >> 11u) ^ word;
+}
+
+inline uint32_t pcg_output_rxs_m_xs_32_32(uint32_t state) {
+ uint32_t word = ((state >> ((state >> 28u) + 4u)) ^ state) * 277803737u;
+ return (word >> 22u) ^ word;
+}
+
+inline uint64_t pcg_output_rxs_m_xs_64_64(uint64_t state) {
+ uint64_t word =
+ ((state >> ((state >> 59u) + 5u)) ^ state) * 12605985483714917081ull;
+ return (word >> 43u) ^ word;
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_output_rxs_m_xs_128_128(pcg128_t state) {
+ pcg128_t word =
+ ((state >> ((state >> 122u) + 6u)) ^ state) *
+ (PCG_128BIT_CONSTANT(17766728186571221404ULL, 12605985483714917081ULL));
+ // 327738287884841127335028083622016905945
+ return (word >> 86u) ^ word;
+}
+#endif
+
+// XSL RR (only defined for >= 64 bits)
+
+inline uint32_t pcg_output_xsl_rr_64_32(uint64_t state) {
+ return pcg_rotr_32(((uint32_t)(state >> 32u)) ^ (uint32_t)state,
+ state >> 59u);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_output_xsl_rr_128_64(pcg128_t state) {
+ return pcg_rotr_64(((uint64_t)(state >> 64u)) ^ (uint64_t)state,
+ state >> 122u);
+}
+#endif
+
+// XSL RR RR (only defined for >= 64 bits)
+
+inline uint64_t pcg_output_xsl_rr_rr_64_64(uint64_t state) {
+ uint32_t rot1 = (uint32_t)(state >> 59u);
+ uint32_t high = (uint32_t)(state >> 32u);
+ uint32_t low = (uint32_t)state;
+ uint32_t xored = high ^ low;
+ uint32_t newlow = pcg_rotr_32(xored, rot1);
+ uint32_t newhigh = pcg_rotr_32(high, newlow & 31u);
+ return (((uint64_t)newhigh) << 32u) | newlow;
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t pcg_output_xsl_rr_rr_128_128(pcg128_t state) {
+ uint32_t rot1 = (uint32_t)(state >> 122u);
+ uint64_t high = (uint64_t)(state >> 64u);
+ uint64_t low = (uint64_t)state;
+ uint64_t xored = high ^ low;
+ uint64_t newlow = pcg_rotr_64(xored, rot1);
+ uint64_t newhigh = pcg_rotr_64(high, newlow & 63u);
+ return (((pcg128_t)newhigh) << 64u) | newlow;
+}
+#endif
+
+#define PCG_DEFAULT_MULTIPLIER_8 141U
+#define PCG_DEFAULT_MULTIPLIER_16 12829U
+#define PCG_DEFAULT_MULTIPLIER_32 747796405U
+#define PCG_DEFAULT_MULTIPLIER_64 6364136223846793005ULL
+
+#define PCG_DEFAULT_INCREMENT_8 77U
+#define PCG_DEFAULT_INCREMENT_16 47989U
+#define PCG_DEFAULT_INCREMENT_32 2891336453U
+#define PCG_DEFAULT_INCREMENT_64 1442695040888963407ULL
+
+#if PCG_HAS_128BIT_OPS
+#define PCG_DEFAULT_MULTIPLIER_128 \
+ PCG_128BIT_CONSTANT(2549297995355413924ULL, 4865540595714422341ULL)
+#define PCG_DEFAULT_INCREMENT_128 \
+ PCG_128BIT_CONSTANT(6364136223846793005ULL, 1442695040888963407ULL)
+#endif
+
+ /*
+ * Static initialization constants (if you can't call srandom for some
+ * bizarre reason).
+ */
+
+#define PCG_STATE_ONESEQ_8_INITIALIZER \
+ { 0xd7U }
+#define PCG_STATE_ONESEQ_16_INITIALIZER \
+ { 0x20dfU }
+#define PCG_STATE_ONESEQ_32_INITIALIZER \
+ { 0x46b56677U }
+#define PCG_STATE_ONESEQ_64_INITIALIZER \
+ { 0x4d595df4d0f33173ULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_ONESEQ_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0xb8dc10e158a92392ULL, 0x98046df007ec0a53ULL) }
+#endif
+
+#define PCG_STATE_UNIQUE_8_INITIALIZER PCG_STATE_ONESEQ_8_INITIALIZER
+#define PCG_STATE_UNIQUE_16_INITIALIZER PCG_STATE_ONESEQ_16_INITIALIZER
+#define PCG_STATE_UNIQUE_32_INITIALIZER PCG_STATE_ONESEQ_32_INITIALIZER
+#define PCG_STATE_UNIQUE_64_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_UNIQUE_128_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#endif
+
+#define PCG_STATE_MCG_8_INITIALIZER \
+ { 0xe5U }
+#define PCG_STATE_MCG_16_INITIALIZER \
+ { 0xa5e5U }
+#define PCG_STATE_MCG_32_INITIALIZER \
+ { 0xd15ea5e5U }
+#define PCG_STATE_MCG_64_INITIALIZER \
+ { 0xcafef00dd15ea5e5ULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_MCG_128_INITIALIZER \
+ { PCG_128BIT_CONSTANT(0x0000000000000000ULL, 0xcafef00dd15ea5e5ULL) }
+#endif
+
+#define PCG_STATE_SETSEQ_8_INITIALIZER \
+ { 0x9bU, 0xdbU }
+#define PCG_STATE_SETSEQ_16_INITIALIZER \
+ { 0xe39bU, 0x5bdbU }
+#define PCG_STATE_SETSEQ_32_INITIALIZER \
+ { 0xec02d89bU, 0x94b95bdbU }
+#define PCG_STATE_SETSEQ_64_INITIALIZER \
+ { 0x853c49e6748fea9bULL, 0xda3e39cb94b95bdbULL }
+#if PCG_HAS_128BIT_OPS
+#define PCG_STATE_SETSEQ_128_INITIALIZER \
+ { \
+ PCG_128BIT_CONSTANT(0x979c9a98d8462005ULL, 0x7d3e9cb6cfe0549bULL) \
+ , PCG_128BIT_CONSTANT(0x0000000000000001ULL, 0xda3e39cb94b95bdbULL) \
+ }
+#endif
+
+/* Representations for the oneseq, mcg, and unique variants */
+
+struct pcg_state_8 {
+ uint8_t state;
+};
+
+struct pcg_state_16 {
+ uint16_t state;
+};
+
+struct pcg_state_32 {
+ uint32_t state;
+};
+
+struct pcg_state_64 {
+ uint64_t state;
+};
+
+#if PCG_HAS_128BIT_OPS
+struct pcg_state_128 {
+ pcg128_t state;
+};
+#endif
+
+/* Representations setseq variants */
+
+struct pcg_state_setseq_8 {
+ uint8_t state;
+ uint8_t inc;
+};
+
+struct pcg_state_setseq_16 {
+ uint16_t state;
+ uint16_t inc;
+};
+
+struct pcg_state_setseq_32 {
+ uint32_t state;
+ uint32_t inc;
+};
+
+struct pcg_state_setseq_64 {
+ uint64_t state;
+ uint64_t inc;
+};
+
+#if PCG_HAS_128BIT_OPS
+struct pcg_state_setseq_128 {
+ pcg128_t state;
+ pcg128_t inc;
+};
+#endif
+
+/* Multi-step advance functions (jump-ahead, jump-back) */
+
+extern uint8_t pcg_advance_lcg_8(uint8_t state, uint8_t delta, uint8_t cur_mult,
+ uint8_t cur_plus);
+extern uint16_t pcg_advance_lcg_16(uint16_t state, uint16_t delta,
+ uint16_t cur_mult, uint16_t cur_plus);
+extern uint32_t pcg_advance_lcg_32(uint32_t state, uint32_t delta,
+ uint32_t cur_mult, uint32_t cur_plus);
+extern uint64_t pcg_advance_lcg_64(uint64_t state, uint64_t delta,
+ uint64_t cur_mult, uint64_t cur_plus);
+
+#if PCG_HAS_128BIT_OPS
+extern pcg128_t pcg_advance_lcg_128(pcg128_t state, pcg128_t delta,
+ pcg128_t cur_mult, pcg128_t cur_plus);
+#endif
+
+/* Functions to advance the underlying LCG, one version for each size and
+ * each style. These functions are considered semi-private. There is rarely
+ * a good reason to call them directly.
+ */
+
+inline void pcg_oneseq_8_step_r(struct pcg_state_8 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8 + PCG_DEFAULT_INCREMENT_8;
+}
+
+inline void pcg_oneseq_8_advance_r(struct pcg_state_8 *rng, uint8_t delta) {
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ PCG_DEFAULT_INCREMENT_8);
+}
+
+inline void pcg_mcg_8_step_r(struct pcg_state_8 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8;
+}
+
+inline void pcg_mcg_8_advance_r(struct pcg_state_8 *rng, uint8_t delta) {
+ rng->state =
+ pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8, 0u);
+}
+
+inline void pcg_unique_8_step_r(struct pcg_state_8 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_8 + (uint8_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_8_advance_r(struct pcg_state_8 *rng, uint8_t delta) {
+ rng->state = pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8,
+ (uint8_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_8_step_r(struct pcg_state_setseq_8 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_8 + rng->inc;
+}
+
+inline void pcg_setseq_8_advance_r(struct pcg_state_setseq_8 *rng,
+ uint8_t delta) {
+ rng->state =
+ pcg_advance_lcg_8(rng->state, delta, PCG_DEFAULT_MULTIPLIER_8, rng->inc);
+}
+
+inline void pcg_oneseq_16_step_r(struct pcg_state_16 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_16 + PCG_DEFAULT_INCREMENT_16;
+}
+
+inline void pcg_oneseq_16_advance_r(struct pcg_state_16 *rng, uint16_t delta) {
+ rng->state = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ PCG_DEFAULT_INCREMENT_16);
+}
+
+inline void pcg_mcg_16_step_r(struct pcg_state_16 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16;
+}
+
+inline void pcg_mcg_16_advance_r(struct pcg_state_16 *rng, uint16_t delta) {
+ rng->state =
+ pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16, 0u);
+}
+
+inline void pcg_unique_16_step_r(struct pcg_state_16 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_16 + (uint16_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_16_advance_r(struct pcg_state_16 *rng, uint16_t delta) {
+ rng->state = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ (uint16_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_16_step_r(struct pcg_state_setseq_16 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_16 + rng->inc;
+}
+
+inline void pcg_setseq_16_advance_r(struct pcg_state_setseq_16 *rng,
+ uint16_t delta) {
+ rng->state = pcg_advance_lcg_16(rng->state, delta, PCG_DEFAULT_MULTIPLIER_16,
+ rng->inc);
+}
+
+inline void pcg_oneseq_32_step_r(struct pcg_state_32 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_32 + PCG_DEFAULT_INCREMENT_32;
+}
+
+inline void pcg_oneseq_32_advance_r(struct pcg_state_32 *rng, uint32_t delta) {
+ rng->state = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ PCG_DEFAULT_INCREMENT_32);
+}
+
+inline void pcg_mcg_32_step_r(struct pcg_state_32 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32;
+}
+
+inline void pcg_mcg_32_advance_r(struct pcg_state_32 *rng, uint32_t delta) {
+ rng->state =
+ pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32, 0u);
+}
+
+inline void pcg_unique_32_step_r(struct pcg_state_32 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_32 + (uint32_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_32_advance_r(struct pcg_state_32 *rng, uint32_t delta) {
+ rng->state = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ (uint32_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_32_step_r(struct pcg_state_setseq_32 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_32 + rng->inc;
+}
+
+inline void pcg_setseq_32_advance_r(struct pcg_state_setseq_32 *rng,
+ uint32_t delta) {
+ rng->state = pcg_advance_lcg_32(rng->state, delta, PCG_DEFAULT_MULTIPLIER_32,
+ rng->inc);
+}
+
+inline void pcg_oneseq_64_step_r(struct pcg_state_64 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_64 + PCG_DEFAULT_INCREMENT_64;
+}
+
+inline void pcg_oneseq_64_advance_r(struct pcg_state_64 *rng, uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ PCG_DEFAULT_INCREMENT_64);
+}
+
+inline void pcg_mcg_64_step_r(struct pcg_state_64 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64;
+}
+
+inline void pcg_mcg_64_advance_r(struct pcg_state_64 *rng, uint64_t delta) {
+ rng->state =
+ pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64, 0u);
+}
+
+inline void pcg_unique_64_step_r(struct pcg_state_64 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_64 + (uint64_t)(((intptr_t)rng) | 1u);
+}
+
+inline void pcg_unique_64_advance_r(struct pcg_state_64 *rng, uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ (uint64_t)(((intptr_t)rng) | 1u));
+}
+
+inline void pcg_setseq_64_step_r(struct pcg_state_setseq_64 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_64 + rng->inc;
+}
+
+inline void pcg_setseq_64_advance_r(struct pcg_state_setseq_64 *rng,
+ uint64_t delta) {
+ rng->state = pcg_advance_lcg_64(rng->state, delta, PCG_DEFAULT_MULTIPLIER_64,
+ rng->inc);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_step_r(struct pcg_state_128 *rng) {
+ rng->state =
+ rng->state * PCG_DEFAULT_MULTIPLIER_128 + PCG_DEFAULT_INCREMENT_128;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_advance_r(struct pcg_state_128 *rng,
+ pcg128_t delta) {
+ rng->state = pcg_advance_lcg_128(
+ rng->state, delta, PCG_DEFAULT_MULTIPLIER_128, PCG_DEFAULT_INCREMENT_128);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_step_r(struct pcg_state_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_advance_r(struct pcg_state_128 *rng, pcg128_t delta) {
+ rng->state =
+ pcg_advance_lcg_128(rng->state, delta, PCG_DEFAULT_MULTIPLIER_128, 0u);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_step_r(struct pcg_state_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 +
+ (pcg128_t)(((intptr_t)rng) | 1u);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_advance_r(struct pcg_state_128 *rng,
+ pcg128_t delta) {
+ rng->state =
+ pcg_advance_lcg_128(rng->state, delta, PCG_DEFAULT_MULTIPLIER_128,
+ (pcg128_t)(((intptr_t)rng) | 1u));
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_step_r(struct pcg_state_setseq_128 *rng) {
+ rng->state = rng->state * PCG_DEFAULT_MULTIPLIER_128 + rng->inc;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_advance_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t delta) {
+ rng->state = pcg_advance_lcg_128(rng->state, delta,
+ PCG_DEFAULT_MULTIPLIER_128, rng->inc);
+}
+#endif
+
+/* Functions to seed the RNG state, one version for each size and each
+ * style. Unlike the step functions, regular users can and should call
+ * these functions.
+ */
+
+inline void pcg_oneseq_8_srandom_r(struct pcg_state_8 *rng, uint8_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_8_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_8_step_r(rng);
+}
+
+inline void pcg_mcg_8_srandom_r(struct pcg_state_8 *rng, uint8_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_8_srandom_r(struct pcg_state_8 *rng, uint8_t initstate) {
+ rng->state = 0U;
+ pcg_unique_8_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_8_step_r(rng);
+}
+
+inline void pcg_setseq_8_srandom_r(struct pcg_state_setseq_8 *rng,
+ uint8_t initstate, uint8_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_8_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_8_step_r(rng);
+}
+
+inline void pcg_oneseq_16_srandom_r(struct pcg_state_16 *rng,
+ uint16_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_16_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_16_step_r(rng);
+}
+
+inline void pcg_mcg_16_srandom_r(struct pcg_state_16 *rng, uint16_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_16_srandom_r(struct pcg_state_16 *rng,
+ uint16_t initstate) {
+ rng->state = 0U;
+ pcg_unique_16_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_16_step_r(rng);
+}
+
+inline void pcg_setseq_16_srandom_r(struct pcg_state_setseq_16 *rng,
+ uint16_t initstate, uint16_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_16_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_16_step_r(rng);
+}
+
+inline void pcg_oneseq_32_srandom_r(struct pcg_state_32 *rng,
+ uint32_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_32_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_32_step_r(rng);
+}
+
+inline void pcg_mcg_32_srandom_r(struct pcg_state_32 *rng, uint32_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_32_srandom_r(struct pcg_state_32 *rng,
+ uint32_t initstate) {
+ rng->state = 0U;
+ pcg_unique_32_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_32_step_r(rng);
+}
+
+inline void pcg_setseq_32_srandom_r(struct pcg_state_setseq_32 *rng,
+ uint32_t initstate, uint32_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_32_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_32_step_r(rng);
+}
+
+inline void pcg_oneseq_64_srandom_r(struct pcg_state_64 *rng,
+ uint64_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_64_step_r(rng);
+}
+
+inline void pcg_mcg_64_srandom_r(struct pcg_state_64 *rng, uint64_t initstate) {
+ rng->state = initstate | 1u;
+}
+
+inline void pcg_unique_64_srandom_r(struct pcg_state_64 *rng,
+ uint64_t initstate) {
+ rng->state = 0U;
+ pcg_unique_64_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_64_step_r(rng);
+}
+
+inline void pcg_setseq_64_srandom_r(struct pcg_state_setseq_64 *rng,
+ uint64_t initstate, uint64_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_64_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_64_step_r(rng);
+}
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_oneseq_128_srandom_r(struct pcg_state_128 *rng,
+ pcg128_t initstate) {
+ rng->state = 0U;
+ pcg_oneseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_oneseq_128_step_r(rng);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_mcg_128_srandom_r(struct pcg_state_128 *rng,
+ pcg128_t initstate) {
+ rng->state = initstate | 1u;
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_unique_128_srandom_r(struct pcg_state_128 *rng,
+ pcg128_t initstate) {
+ rng->state = 0U;
+ pcg_unique_128_step_r(rng);
+ rng->state += initstate;
+ pcg_unique_128_step_r(rng);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline void pcg_setseq_128_srandom_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t initstate, pcg128_t initseq) {
+ rng->state = 0U;
+ rng->inc = (initseq << 1u) | 1u;
+ pcg_setseq_128_step_r(rng);
+ rng->state += initstate;
+ pcg_setseq_128_step_r(rng);
+}
+#endif
+
+/* Now, finally we create each of the individual generators. We provide
+ * a random_r function that provides a random number of the appropriate
+ * type (using the full range of the type) and a boundedrand_r version
+ * that provides
+ *
+ * Implementation notes for boundedrand_r:
+ *
+ * To avoid bias, we need to make the range of the RNG a multiple of
+ * bound, which we do by dropping output less than a threshold.
+ * Let's consider a 32-bit case... A naive scheme to calculate the
+ * threshold would be to do
+ *
+ * uint32_t threshold = 0x100000000ull % bound;
+ *
+ * but 64-bit div/mod is slower than 32-bit div/mod (especially on
+ * 32-bit platforms). In essence, we do
+ *
+ * uint32_t threshold = (0x100000000ull-bound) % bound;
+ *
+ * because this version will calculate the same modulus, but the LHS
+ * value is less than 2^32.
+ *
+ * (Note that using modulo is only wise for good RNGs, poorer RNGs
+ * such as raw LCGs do better using a technique based on division.)
+ * Empricical tests show that division is preferable to modulus for
+ * reducting the range of an RNG. It's faster, and sometimes it can
+ * even be statistically prefereable.
+ */
+
+/* Generation functions for XSH RS */
+
+inline uint8_t pcg_oneseq_16_xsh_rs_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_16_xsh_rs_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rs_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rs_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rs_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rs_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsh_rs_64_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsh_rs_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_unique_16_xsh_rs_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_unique_16_xsh_rs_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_unique_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_unique_32_xsh_rs_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_unique_32_xsh_rs_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_64_xsh_rs_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsh_rs_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsh_rs_64_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsh_rs_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t
+pcg_setseq_16_xsh_rs_8_random_r(struct pcg_state_setseq_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_16_xsh_rs_8_boundedrand_r(struct pcg_state_setseq_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rs_16_random_r(struct pcg_state_setseq_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rs_16_boundedrand_r(struct pcg_state_setseq_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rs_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rs_32_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rs_64_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rs_64_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_mcg_16_xsh_rs_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_mcg_16_step_r(rng);
+ return pcg_output_xsh_rs_16_8(oldstate);
+}
+
+inline uint8_t pcg_mcg_16_xsh_rs_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_mcg_16_xsh_rs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_mcg_32_xsh_rs_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_mcg_32_step_r(rng);
+ return pcg_output_xsh_rs_32_16(oldstate);
+}
+
+inline uint16_t pcg_mcg_32_xsh_rs_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_mcg_32_xsh_rs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_mcg_64_xsh_rs_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsh_rs_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsh_rs_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsh_rs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rs_64_random_r(struct pcg_state_128 *rng) {
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsh_rs_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rs_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsh_rs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSH RR */
+
+inline uint8_t pcg_oneseq_16_xsh_rr_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_16_xsh_rr_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rr_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_oneseq_32_xsh_rr_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsh_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsh_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsh_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_unique_16_xsh_rr_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_unique_16_xsh_rr_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_unique_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_unique_32_xsh_rr_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_unique_32_xsh_rr_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_64_xsh_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsh_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsh_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsh_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t
+pcg_setseq_16_xsh_rr_8_random_r(struct pcg_state_setseq_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_16_xsh_rr_8_boundedrand_r(struct pcg_state_setseq_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rr_16_random_r(struct pcg_state_setseq_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_32_xsh_rr_16_boundedrand_r(struct pcg_state_setseq_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rr_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsh_rr_32_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rr_64_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsh_rr_64_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t pcg_mcg_16_xsh_rr_8_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_mcg_16_step_r(rng);
+ return pcg_output_xsh_rr_16_8(oldstate);
+}
+
+inline uint8_t pcg_mcg_16_xsh_rr_8_boundedrand_r(struct pcg_state_16 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_mcg_16_xsh_rr_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_mcg_32_xsh_rr_16_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_mcg_32_step_r(rng);
+ return pcg_output_xsh_rr_32_16(oldstate);
+}
+
+inline uint16_t pcg_mcg_32_xsh_rr_16_boundedrand_r(struct pcg_state_32 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_mcg_32_xsh_rr_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_mcg_64_xsh_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsh_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsh_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsh_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsh_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsh_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsh_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for RXS M XS (no MCG versions because they
+ * don't make sense when you want to use the entire state)
+ */
+
+inline uint8_t pcg_oneseq_8_rxs_m_xs_8_random_r(struct pcg_state_8 *rng) {
+ uint8_t oldstate = rng->state;
+ pcg_oneseq_8_step_r(rng);
+ return pcg_output_rxs_m_xs_8_8(oldstate);
+}
+
+inline uint8_t pcg_oneseq_8_rxs_m_xs_8_boundedrand_r(struct pcg_state_8 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_oneseq_8_rxs_m_xs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t pcg_oneseq_16_rxs_m_xs_16_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_oneseq_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_oneseq_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_16 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_oneseq_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_oneseq_32_rxs_m_xs_32_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_oneseq_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_oneseq_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_32 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t pcg_oneseq_64_rxs_m_xs_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_oneseq_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_rxs_m_xs_128_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_oneseq_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint16_t pcg_unique_16_rxs_m_xs_16_random_r(struct pcg_state_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_unique_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_unique_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_16 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_unique_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t pcg_unique_32_rxs_m_xs_32_random_r(struct pcg_state_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_unique_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_unique_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_32 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t pcg_unique_64_rxs_m_xs_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_unique_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_rxs_m_xs_128_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_unique_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint8_t
+pcg_setseq_8_rxs_m_xs_8_random_r(struct pcg_state_setseq_8 *rng) {
+ uint8_t oldstate = rng->state;
+ pcg_setseq_8_step_r(rng);
+ return pcg_output_rxs_m_xs_8_8(oldstate);
+}
+
+inline uint8_t
+pcg_setseq_8_rxs_m_xs_8_boundedrand_r(struct pcg_state_setseq_8 *rng,
+ uint8_t bound) {
+ uint8_t threshold = ((uint8_t)(-bound)) % bound;
+ for (;;) {
+ uint8_t r = pcg_setseq_8_rxs_m_xs_8_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint16_t
+pcg_setseq_16_rxs_m_xs_16_random_r(struct pcg_state_setseq_16 *rng) {
+ uint16_t oldstate = rng->state;
+ pcg_setseq_16_step_r(rng);
+ return pcg_output_rxs_m_xs_16_16(oldstate);
+}
+
+inline uint16_t
+pcg_setseq_16_rxs_m_xs_16_boundedrand_r(struct pcg_state_setseq_16 *rng,
+ uint16_t bound) {
+ uint16_t threshold = ((uint16_t)(-bound)) % bound;
+ for (;;) {
+ uint16_t r = pcg_setseq_16_rxs_m_xs_16_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint32_t
+pcg_setseq_32_rxs_m_xs_32_random_r(struct pcg_state_setseq_32 *rng) {
+ uint32_t oldstate = rng->state;
+ pcg_setseq_32_step_r(rng);
+ return pcg_output_rxs_m_xs_32_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_32_rxs_m_xs_32_boundedrand_r(struct pcg_state_setseq_32 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_32_rxs_m_xs_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+inline uint64_t
+pcg_setseq_64_rxs_m_xs_64_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_rxs_m_xs_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_setseq_64_rxs_m_xs_64_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_64_rxs_m_xs_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_rxs_m_xs_128_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_rxs_m_xs_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_rxs_m_xs_128_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_setseq_128_rxs_m_xs_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSL RR (only defined for "large" types) */
+
+inline uint32_t pcg_oneseq_64_xsl_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_oneseq_64_xsl_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_oneseq_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_oneseq_128_xsl_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_oneseq_128_xsl_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t pcg_unique_64_xsl_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_unique_64_xsl_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_unique_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_unique_128_xsl_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_unique_128_xsl_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t
+pcg_setseq_64_xsl_rr_32_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t
+pcg_setseq_64_xsl_rr_32_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_setseq_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsl_rr_64_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t
+pcg_setseq_128_xsl_rr_64_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint32_t pcg_mcg_64_xsl_rr_32_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_mcg_64_step_r(rng);
+ return pcg_output_xsl_rr_64_32(oldstate);
+}
+
+inline uint32_t pcg_mcg_64_xsl_rr_32_boundedrand_r(struct pcg_state_64 *rng,
+ uint32_t bound) {
+ uint32_t threshold = -bound % bound;
+ for (;;) {
+ uint32_t r = pcg_mcg_64_xsl_rr_32_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsl_rr_64_random_r(struct pcg_state_128 *rng) {
+ pcg_mcg_128_step_r(rng);
+ return pcg_output_xsl_rr_128_64(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline uint64_t pcg_mcg_128_xsl_rr_64_boundedrand_r(struct pcg_state_128 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_mcg_128_xsl_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+/* Generation functions for XSL RR RR (only defined for "large" types) */
+
+inline uint64_t pcg_oneseq_64_xsl_rr_rr_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_oneseq_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_oneseq_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_oneseq_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_xsl_rr_rr_128_random_r(struct pcg_state_128 *rng) {
+ pcg_oneseq_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_oneseq_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_oneseq_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint64_t pcg_unique_64_xsl_rr_rr_64_random_r(struct pcg_state_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_unique_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_unique_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_unique_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_xsl_rr_rr_128_random_r(struct pcg_state_128 *rng) {
+ pcg_unique_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_unique_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_unique_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+inline uint64_t
+pcg_setseq_64_xsl_rr_rr_64_random_r(struct pcg_state_setseq_64 *rng) {
+ uint64_t oldstate = rng->state;
+ pcg_setseq_64_step_r(rng);
+ return pcg_output_xsl_rr_rr_64_64(oldstate);
+}
+
+inline uint64_t
+pcg_setseq_64_xsl_rr_rr_64_boundedrand_r(struct pcg_state_setseq_64 *rng,
+ uint64_t bound) {
+ uint64_t threshold = -bound % bound;
+ for (;;) {
+ uint64_t r = pcg_setseq_64_xsl_rr_rr_64_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_xsl_rr_rr_128_random_r(struct pcg_state_setseq_128 *rng) {
+ pcg_setseq_128_step_r(rng);
+ return pcg_output_xsl_rr_rr_128_128(rng->state);
+}
+#endif
+
+#if PCG_HAS_128BIT_OPS
+inline pcg128_t
+pcg_setseq_128_xsl_rr_rr_128_boundedrand_r(struct pcg_state_setseq_128 *rng,
+ pcg128_t bound) {
+ pcg128_t threshold = -bound % bound;
+ for (;;) {
+ pcg128_t r = pcg_setseq_128_xsl_rr_rr_128_random_r(rng);
+ if (r >= threshold)
+ return r % bound;
+ }
+}
+#endif
+
+//// Typedefs
+typedef struct pcg_state_setseq_64 pcg32_random_t;
+typedef struct pcg_state_64 pcg32s_random_t;
+typedef struct pcg_state_64 pcg32u_random_t;
+typedef struct pcg_state_64 pcg32f_random_t;
+//// random_r
+#define pcg32_random_r pcg_setseq_64_xsh_rr_32_random_r
+#define pcg32s_random_r pcg_oneseq_64_xsh_rr_32_random_r
+#define pcg32u_random_r pcg_unique_64_xsh_rr_32_random_r
+#define pcg32f_random_r pcg_mcg_64_xsh_rs_32_random_r
+//// boundedrand_r
+#define pcg32_boundedrand_r pcg_setseq_64_xsh_rr_32_boundedrand_r
+#define pcg32s_boundedrand_r pcg_oneseq_64_xsh_rr_32_boundedrand_r
+#define pcg32u_boundedrand_r pcg_unique_64_xsh_rr_32_boundedrand_r
+#define pcg32f_boundedrand_r pcg_mcg_64_xsh_rs_32_boundedrand_r
+//// srandom_r
+#define pcg32_srandom_r pcg_setseq_64_srandom_r
+#define pcg32s_srandom_r pcg_oneseq_64_srandom_r
+#define pcg32u_srandom_r pcg_unique_64_srandom_r
+#define pcg32f_srandom_r pcg_mcg_64_srandom_r
+//// advance_r
+#define pcg32_advance_r pcg_setseq_64_advance_r
+#define pcg32s_advance_r pcg_oneseq_64_advance_r
+#define pcg32u_advance_r pcg_unique_64_advance_r
+#define pcg32f_advance_r pcg_mcg_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+//// Typedefs
+typedef struct pcg_state_setseq_128 pcg64_random_t;
+typedef struct pcg_state_128 pcg64s_random_t;
+typedef struct pcg_state_128 pcg64u_random_t;
+typedef struct pcg_state_128 pcg64f_random_t;
+//// random_r
+#define pcg64_random_r pcg_setseq_128_xsl_rr_64_random_r
+#define pcg64s_random_r pcg_oneseq_128_xsl_rr_64_random_r
+#define pcg64u_random_r pcg_unique_128_xsl_rr_64_random_r
+#define pcg64f_random_r pcg_mcg_128_xsl_rr_64_random_r
+//// boundedrand_r
+#define pcg64_boundedrand_r pcg_setseq_128_xsl_rr_64_boundedrand_r
+#define pcg64s_boundedrand_r pcg_oneseq_128_xsl_rr_64_boundedrand_r
+#define pcg64u_boundedrand_r pcg_unique_128_xsl_rr_64_boundedrand_r
+#define pcg64f_boundedrand_r pcg_mcg_128_xsl_rr_64_boundedrand_r
+//// srandom_r
+#define pcg64_srandom_r pcg_setseq_128_srandom_r
+#define pcg64s_srandom_r pcg_oneseq_128_srandom_r
+#define pcg64u_srandom_r pcg_unique_128_srandom_r
+#define pcg64f_srandom_r pcg_mcg_128_srandom_r
+//// advance_r
+#define pcg64_advance_r pcg_setseq_128_advance_r
+#define pcg64s_advance_r pcg_oneseq_128_advance_r
+#define pcg64u_advance_r pcg_unique_128_advance_r
+#define pcg64f_advance_r pcg_mcg_128_advance_r
+#endif
+
+//// Typedefs
+typedef struct pcg_state_8 pcg8si_random_t;
+typedef struct pcg_state_16 pcg16si_random_t;
+typedef struct pcg_state_32 pcg32si_random_t;
+typedef struct pcg_state_64 pcg64si_random_t;
+//// random_r
+#define pcg8si_random_r pcg_oneseq_8_rxs_m_xs_8_random_r
+#define pcg16si_random_r pcg_oneseq_16_rxs_m_xs_16_random_r
+#define pcg32si_random_r pcg_oneseq_32_rxs_m_xs_32_random_r
+#define pcg64si_random_r pcg_oneseq_64_rxs_m_xs_64_random_r
+//// boundedrand_r
+#define pcg8si_boundedrand_r pcg_oneseq_8_rxs_m_xs_8_boundedrand_r
+#define pcg16si_boundedrand_r pcg_oneseq_16_rxs_m_xs_16_boundedrand_r
+#define pcg32si_boundedrand_r pcg_oneseq_32_rxs_m_xs_32_boundedrand_r
+#define pcg64si_boundedrand_r pcg_oneseq_64_rxs_m_xs_64_boundedrand_r
+//// srandom_r
+#define pcg8si_srandom_r pcg_oneseq_8_srandom_r
+#define pcg16si_srandom_r pcg_oneseq_16_srandom_r
+#define pcg32si_srandom_r pcg_oneseq_32_srandom_r
+#define pcg64si_srandom_r pcg_oneseq_64_srandom_r
+//// advance_r
+#define pcg8si_advance_r pcg_oneseq_8_advance_r
+#define pcg16si_advance_r pcg_oneseq_16_advance_r
+#define pcg32si_advance_r pcg_oneseq_32_advance_r
+#define pcg64si_advance_r pcg_oneseq_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+typedef struct pcg_state_128 pcg128si_random_t;
+#define pcg128si_random_r pcg_oneseq_128_rxs_m_xs_128_random_r
+#define pcg128si_boundedrand_r pcg_oneseq_128_rxs_m_xs_128_boundedrand_r
+#define pcg128si_srandom_r pcg_oneseq_128_srandom_r
+#define pcg128si_advance_r pcg_oneseq_128_advance_r
+#endif
+
+//// Typedefs
+typedef struct pcg_state_setseq_8 pcg8i_random_t;
+typedef struct pcg_state_setseq_16 pcg16i_random_t;
+typedef struct pcg_state_setseq_32 pcg32i_random_t;
+typedef struct pcg_state_setseq_64 pcg64i_random_t;
+//// random_r
+#define pcg8i_random_r pcg_setseq_8_rxs_m_xs_8_random_r
+#define pcg16i_random_r pcg_setseq_16_rxs_m_xs_16_random_r
+#define pcg32i_random_r pcg_setseq_32_rxs_m_xs_32_random_r
+#define pcg64i_random_r pcg_setseq_64_rxs_m_xs_64_random_r
+//// boundedrand_r
+#define pcg8i_boundedrand_r pcg_setseq_8_rxs_m_xs_8_boundedrand_r
+#define pcg16i_boundedrand_r pcg_setseq_16_rxs_m_xs_16_boundedrand_r
+#define pcg32i_boundedrand_r pcg_setseq_32_rxs_m_xs_32_boundedrand_r
+#define pcg64i_boundedrand_r pcg_setseq_64_rxs_m_xs_64_boundedrand_r
+//// srandom_r
+#define pcg8i_srandom_r pcg_setseq_8_srandom_r
+#define pcg16i_srandom_r pcg_setseq_16_srandom_r
+#define pcg32i_srandom_r pcg_setseq_32_srandom_r
+#define pcg64i_srandom_r pcg_setseq_64_srandom_r
+//// advance_r
+#define pcg8i_advance_r pcg_setseq_8_advance_r
+#define pcg16i_advance_r pcg_setseq_16_advance_r
+#define pcg32i_advance_r pcg_setseq_32_advance_r
+#define pcg64i_advance_r pcg_setseq_64_advance_r
+
+#if PCG_HAS_128BIT_OPS
+typedef struct pcg_state_setseq_128 pcg128i_random_t;
+#define pcg128i_random_r pcg_setseq_128_rxs_m_xs_128_random_r
+#define pcg128i_boundedrand_r pcg_setseq_128_rxs_m_xs_128_boundedrand_r
+#define pcg128i_srandom_r pcg_setseq_128_srandom_r
+#define pcg128i_advance_r pcg_setseq_128_advance_r
+#endif
+
+extern uint32_t pcg32_random();
+extern uint32_t pcg32_boundedrand(uint32_t bound);
+extern void pcg32_srandom(uint64_t seed, uint64_t seq);
+extern void pcg32_advance(uint64_t delta);
+
+#if PCG_HAS_128BIT_OPS
+extern uint64_t pcg64_random();
+extern uint64_t pcg64_boundedrand(uint64_t bound);
+extern void pcg64_srandom(pcg128_t seed, pcg128_t seq);
+extern void pcg64_advance(pcg128_t delta);
+#endif
+
+/*
+ * Static initialization constants (if you can't call srandom for some
+ * bizarre reason).
+ */
+
+#define PCG32_INITIALIZER PCG_STATE_SETSEQ_64_INITIALIZER
+#define PCG32U_INITIALIZER PCG_STATE_UNIQUE_64_INITIALIZER
+#define PCG32S_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#define PCG32F_INITIALIZER PCG_STATE_MCG_64_INITIALIZER
+
+#if PCG_HAS_128BIT_OPS
+#define PCG64_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+#define PCG64U_INITIALIZER PCG_STATE_UNIQUE_128_INITIALIZER
+#define PCG64S_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#define PCG64F_INITIALIZER PCG_STATE_MCG_128_INITIALIZER
+#endif
+
+#define PCG8SI_INITIALIZER PCG_STATE_ONESEQ_8_INITIALIZER
+#define PCG16SI_INITIALIZER PCG_STATE_ONESEQ_16_INITIALIZER
+#define PCG32SI_INITIALIZER PCG_STATE_ONESEQ_32_INITIALIZER
+#define PCG64SI_INITIALIZER PCG_STATE_ONESEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG128SI_INITIALIZER PCG_STATE_ONESEQ_128_INITIALIZER
+#endif
+
+#define PCG8I_INITIALIZER PCG_STATE_SETSEQ_8_INITIALIZER
+#define PCG16I_INITIALIZER PCG_STATE_SETSEQ_16_INITIALIZER
+#define PCG32I_INITIALIZER PCG_STATE_SETSEQ_32_INITIALIZER
+#define PCG64I_INITIALIZER PCG_STATE_SETSEQ_64_INITIALIZER
+#if PCG_HAS_128BIT_OPS
+#define PCG128I_INITIALIZER PCG_STATE_SETSEQ_128_INITIALIZER
+#endif
+
+#if __cplusplus
+}
+#endif
+
+#endif // PCG_VARIANTS_H_INCLUDED
diff --git a/numpy/random/src/philox/LICENSE.md b/numpy/random/src/philox/LICENSE.md
new file mode 100644
index 000000000..9738e44de
--- /dev/null
+++ b/numpy/random/src/philox/LICENSE.md
@@ -0,0 +1,31 @@
+# PHILOX
+
+Copyright 2010-2012, D. E. Shaw Research.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+ notice, this list of conditions, and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions, and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+* Neither the name of D. E. Shaw Research nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/numpy/random/src/philox/philox-benchmark.c b/numpy/random/src/philox/philox-benchmark.c
new file mode 100644
index 000000000..df5814d5f
--- /dev/null
+++ b/numpy/random/src/philox/philox-benchmark.c
@@ -0,0 +1,38 @@
+/*
+ * Simple benchamrk command
+ *
+ * cl philox-benchmark.c /Ox
+ *
+ * gcc philox-benchmark.c -O3 -o philox-benchmark
+ *
+ * Requires the Random123 directory containing header files to be located in the
+ * same directory (not included).
+ */
+#include "Random123/philox.h"
+#include <inttypes.h>
+#include <stdio.h>
+#include <time.h>
+
+#define N 1000000000
+
+int main() {
+ philox4x64_ctr_t ctr = {{0, 0, 0, 0}};
+ philox4x64_key_t key = {{0, 0xDEADBEAF}};
+ philox4x64_ctr_t out;
+ uint64_t count = 0, sum = 0;
+ int i, j;
+ clock_t begin = clock();
+ for (i = 0; i < N / 4UL; i++) {
+ ctr.v[0]++;
+ out = philox4x64_R(philox4x64_rounds, ctr, key);
+ for (j = 0; j < 4; j++) {
+ sum += out.v[j];
+ count++;
+ }
+ }
+ clock_t end = clock();
+ double time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
+ printf("0x%" PRIx64 "\ncount: %" PRIu64 "\n", sum, count);
+ printf("%" PRIu64 " randoms per second\n",
+ (uint64_t)(N / time_spent) / 1000000 * 1000000);
+}
diff --git a/numpy/random/src/philox/philox-test-data-gen.c b/numpy/random/src/philox/philox-test-data-gen.c
new file mode 100644
index 000000000..a5fcaa690
--- /dev/null
+++ b/numpy/random/src/philox/philox-test-data-gen.c
@@ -0,0 +1,82 @@
+/*
+ * Generate testing csv files
+ *
+ * cl philox-test-data-gen.c /Ox
+ * philox-test-data-gen.exe
+ *
+ * gcc philox-test-data-gen.c -o philox-test-data-gen
+ * ./philox-test-data-gen
+ *
+ * Requires the Random123 directory containing header files to be located in the
+ * same directory (not included).
+ *
+ */
+
+#include "../splitmix64/splitmix64.h"
+#include "Random123/philox.h"
+#include <inttypes.h>
+#include <stdio.h>
+
+#define N 1000
+
+int main() {
+ philox4x64_ctr_t ctr = {{0, 0, 0, 0}};
+ philox4x64_key_t key = {{0, 0}};
+ uint64_t state, seed = 0xDEADBEAF;
+ philox4x64_ctr_t out;
+ uint64_t store[N];
+ state = seed;
+ int i, j;
+ for (i = 0; i < 2; i++) {
+ key.v[i] = splitmix64_next(&state);
+ }
+ for (i = 0; i < N / 4UL; i++) {
+ ctr.v[0]++;
+ out = philox4x64_R(philox4x64_rounds, ctr, key);
+ for (j = 0; j < 4; j++) {
+ store[i * 4 + j] = out.v[j];
+ }
+ }
+
+ FILE *fp;
+ fp = fopen("philox-testset-1.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+
+ ctr.v[0] = 0;
+ state = seed = 0;
+ for (i = 0; i < 2; i++) {
+ key.v[i] = splitmix64_next(&state);
+ }
+ for (i = 0; i < N / 4UL; i++) {
+ ctr.v[0]++;
+ out = philox4x64_R(philox4x64_rounds, ctr, key);
+ for (j = 0; j < 4; j++) {
+ store[i * 4 + j] = out.v[j];
+ }
+ }
+
+ fp = fopen("philox-testset-2.csv", "w");
+ if (fp == NULL) {
+ printf("Couldn't open file\n");
+ return -1;
+ }
+ fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
+ for (i = 0; i < N; i++) {
+ fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
+ if (i == 999) {
+ printf("%d, 0x%" PRIx64 "\n", i, store[i]);
+ }
+ }
+ fclose(fp);
+}
diff --git a/numpy/random/src/philox/philox.c b/numpy/random/src/philox/philox.c
new file mode 100644
index 000000000..6f2fad5a4
--- /dev/null
+++ b/numpy/random/src/philox/philox.c
@@ -0,0 +1,29 @@
+#include "philox.h"
+
+extern NPY_INLINE uint64_t philox_next64(philox_state *state);
+
+extern NPY_INLINE uint32_t philox_next32(philox_state *state);
+
+extern void philox_jump(philox_state *state) {
+ /* Advances state as-if 2^128 draws were made */
+ state->ctr->v[2]++;
+ if (state->ctr->v[2] == 0) {
+ state->ctr->v[3]++;
+ }
+}
+
+extern void philox_advance(uint64_t *step, philox_state *state) {
+ int i, carry = 0;
+ uint64_t v_orig;
+ for (i = 0; i < 4; i++) {
+ if (carry == 1) {
+ state->ctr->v[i]++;
+ carry = state->ctr->v[i] == 0 ? 1 : 0;
+ }
+ v_orig = state->ctr->v[i];
+ state->ctr->v[i] += step[i];
+ if (state->ctr->v[i] < v_orig && carry == 0) {
+ carry = 1;
+ }
+ }
+}
diff --git a/numpy/random/src/philox/philox.h b/numpy/random/src/philox/philox.h
new file mode 100644
index 000000000..c72424a97
--- /dev/null
+++ b/numpy/random/src/philox/philox.h
@@ -0,0 +1,248 @@
+#ifndef _RANDOMDGEN__PHILOX_H_
+#define _RANDOMDGEN__PHILOX_H_
+
+#include "numpy/npy_common.h"
+#include <inttypes.h>
+
+#define PHILOX_BUFFER_SIZE 4L
+
+struct r123array2x64 {
+ uint64_t v[2];
+};
+struct r123array4x64 {
+ uint64_t v[4];
+};
+
+enum r123_enum_philox4x64 { philox4x64_rounds = 10 };
+typedef struct r123array4x64 philox4x64_ctr_t;
+typedef struct r123array2x64 philox4x64_key_t;
+typedef struct r123array2x64 philox4x64_ukey_t;
+
+static NPY_INLINE struct r123array2x64
+_philox4x64bumpkey(struct r123array2x64 key) {
+ key.v[0] += (0x9E3779B97F4A7C15ULL);
+ key.v[1] += (0xBB67AE8584CAA73BULL);
+ return key;
+}
+
+/* Prefer uint128 if available: GCC, clang, ICC */
+#ifdef __SIZEOF_INT128__
+static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
+ __uint128_t product = ((__uint128_t)a) * ((__uint128_t)b);
+ *hip = product >> 64;
+ return (uint64_t)product;
+}
+#else
+#ifdef _WIN32
+#include <intrin.h>
+#if defined(_WIN64) && defined(_M_AMD64)
+#pragma intrinsic(_umul128)
+#else
+#pragma intrinsic(__emulu)
+static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
+
+ uint64_t a_lo, a_hi, b_lo, b_hi, a_x_b_hi, a_x_b_mid, a_x_b_lo, b_x_a_mid,
+ carry_bit;
+ a_lo = (uint32_t)a;
+ a_hi = a >> 32;
+ b_lo = (uint32_t)b;
+ b_hi = b >> 32;
+
+ a_x_b_hi = __emulu(a_hi, b_hi);
+ a_x_b_mid = __emulu(a_hi, b_lo);
+ b_x_a_mid = __emulu(b_hi, a_lo);
+ a_x_b_lo = __emulu(a_lo, b_lo);
+
+ carry_bit = ((uint64_t)(uint32_t)a_x_b_mid + (uint64_t)(uint32_t)b_x_a_mid +
+ (a_x_b_lo >> 32)) >>
+ 32;
+
+ *high = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
+
+ return a_x_b_lo + ((a_x_b_mid + b_x_a_mid) << 32);
+}
+#endif
+static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
+ return _umul128(a, b, hip);
+}
+#else
+static NPY_INLINE uint64_t _umul128(uint64_t a, uint64_t b, uint64_t *high) {
+
+ uint64_t a_lo, a_hi, b_lo, b_hi, a_x_b_hi, a_x_b_mid, a_x_b_lo, b_x_a_mid,
+ carry_bit;
+ a_lo = (uint32_t)a;
+ a_hi = a >> 32;
+ b_lo = (uint32_t)b;
+ b_hi = b >> 32;
+
+ a_x_b_hi = a_hi * b_hi;
+ a_x_b_mid = a_hi * b_lo;
+ b_x_a_mid = b_hi * a_lo;
+ a_x_b_lo = a_lo * b_lo;
+
+ carry_bit = ((uint64_t)(uint32_t)a_x_b_mid + (uint64_t)(uint32_t)b_x_a_mid +
+ (a_x_b_lo >> 32)) >>
+ 32;
+
+ *high = a_x_b_hi + (a_x_b_mid >> 32) + (b_x_a_mid >> 32) + carry_bit;
+
+ return a_x_b_lo + ((a_x_b_mid + b_x_a_mid) << 32);
+}
+static NPY_INLINE uint64_t mulhilo64(uint64_t a, uint64_t b, uint64_t *hip) {
+ return _umul128(a, b, hip);
+}
+#endif
+#endif
+
+static NPY_INLINE struct r123array4x64 _philox4x64round(struct r123array4x64 ctr,
+ struct r123array2x64 key);
+
+static NPY_INLINE struct r123array4x64 _philox4x64round(struct r123array4x64 ctr,
+ struct r123array2x64 key) {
+ uint64_t hi0;
+ uint64_t hi1;
+ uint64_t lo0 = mulhilo64((0xD2E7470EE14C6C93ULL), ctr.v[0], &hi0);
+ uint64_t lo1 = mulhilo64((0xCA5A826395121157ULL), ctr.v[2], &hi1);
+ struct r123array4x64 out = {
+ {hi1 ^ ctr.v[1] ^ key.v[0], lo1, hi0 ^ ctr.v[3] ^ key.v[1], lo0}};
+ return out;
+}
+
+static NPY_INLINE philox4x64_key_t philox4x64keyinit(philox4x64_ukey_t uk) {
+ return uk;
+}
+static NPY_INLINE philox4x64_ctr_t philox4x64_R(unsigned int R,
+ philox4x64_ctr_t ctr,
+ philox4x64_key_t key);
+
+static NPY_INLINE philox4x64_ctr_t philox4x64_R(unsigned int R,
+ philox4x64_ctr_t ctr,
+ philox4x64_key_t key) {
+ if (R > 0) {
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 1) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 2) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 3) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 4) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 5) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 6) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 7) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 8) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 9) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 10) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 11) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 12) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 13) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 14) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ if (R > 15) {
+ key = _philox4x64bumpkey(key);
+ ctr = _philox4x64round(ctr, key);
+ }
+ return ctr;
+}
+
+typedef struct s_philox_state {
+ philox4x64_ctr_t *ctr;
+ philox4x64_key_t *key;
+ int buffer_pos;
+ uint64_t buffer[PHILOX_BUFFER_SIZE];
+ int has_uint32;
+ uint32_t uinteger;
+} philox_state;
+
+static NPY_INLINE uint64_t philox_next(philox_state *state) {
+ uint64_t out;
+ int i;
+ philox4x64_ctr_t ct;
+
+ if (state->buffer_pos < PHILOX_BUFFER_SIZE) {
+ out = state->buffer[state->buffer_pos];
+ state->buffer_pos++;
+ return out;
+ }
+ /* generate 4 new uint64_t */
+ state->ctr->v[0]++;
+ /* Handle carry */
+ if (state->ctr->v[0] == 0) {
+ state->ctr->v[1]++;
+ if (state->ctr->v[1] == 0) {
+ state->ctr->v[2]++;
+ if (state->ctr->v[2] == 0) {
+ state->ctr->v[3]++;
+ }
+ }
+ }
+ ct = philox4x64_R(philox4x64_rounds, *state->ctr, *state->key);
+ for (i = 0; i < 4; i++) {
+ state->buffer[i] = ct.v[i];
+ }
+ state->buffer_pos = 1;
+ return state->buffer[0];
+}
+
+static NPY_INLINE uint64_t philox_next64(philox_state *state) {
+ return philox_next(state);
+}
+
+static NPY_INLINE uint32_t philox_next32(philox_state *state) {
+ uint64_t next;
+
+ if (state->has_uint32) {
+ state->has_uint32 = 0;
+ return state->uinteger;
+ }
+ next = philox_next(state);
+
+ state->has_uint32 = 1;
+ state->uinteger = (uint32_t)(next >> 32);
+ return (uint32_t)(next & 0xffffffff);
+}
+
+extern void philox_jump(philox_state *state);
+
+extern void philox_advance(uint64_t *step, philox_state *state);
+
+#endif
diff --git a/numpy/random/src/sfc64/LICENSE.md b/numpy/random/src/sfc64/LICENSE.md
new file mode 100644
index 000000000..21dd604af
--- /dev/null
+++ b/numpy/random/src/sfc64/LICENSE.md
@@ -0,0 +1,27 @@
+# SFC64
+
+## The MIT License
+
+Adapted from a C++ implementation of Chris Doty-Humphrey's SFC PRNG.
+
+https://gist.github.com/imneme/f1f7821f07cf76504a97f6537c818083
+
+Copyright (c) 2018 Melissa E. O'Neill
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/numpy/random/src/sfc64/sfc64.c b/numpy/random/src/sfc64/sfc64.c
new file mode 100644
index 000000000..5546fff08
--- /dev/null
+++ b/numpy/random/src/sfc64/sfc64.c
@@ -0,0 +1,39 @@
+#include "sfc64.h"
+
+extern void sfc64_set_seed(sfc64_state *state, uint64_t *seed) {
+ /* Conservatively stick with the original formula. With SeedSequence, it
+ * might be fine to just set the state with 4 uint64s and be done.
+ */
+ int i;
+
+ state->s[0] = seed[0];
+ state->s[1] = seed[1];
+ state->s[2] = seed[2];
+ state->s[3] = 1;
+
+ for (i=0; i<12; i++) {
+ (void)sfc64_next(state->s);
+ }
+}
+
+extern void sfc64_get_state(sfc64_state *state, uint64_t *state_arr, int *has_uint32,
+ uint32_t *uinteger) {
+ int i;
+
+ for (i=0; i<4; i++) {
+ state_arr[i] = state->s[i];
+ }
+ has_uint32[0] = state->has_uint32;
+ uinteger[0] = state->uinteger;
+}
+
+extern void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32,
+ uint32_t uinteger) {
+ int i;
+
+ for (i=0; i<4; i++) {
+ state->s[i] = state_arr[i];
+ }
+ state->has_uint32 = has_uint32;
+ state->uinteger = uinteger;
+}
diff --git a/numpy/random/src/sfc64/sfc64.h b/numpy/random/src/sfc64/sfc64.h
new file mode 100644
index 000000000..75c4118d3
--- /dev/null
+++ b/numpy/random/src/sfc64/sfc64.h
@@ -0,0 +1,60 @@
+#ifndef _RANDOMDGEN__SFC64_H_
+#define _RANDOMDGEN__SFC64_H_
+
+#include "numpy/npy_common.h"
+#include <inttypes.h>
+#ifdef _WIN32
+#include <stdlib.h>
+#endif
+
+typedef struct s_sfc64_state {
+ uint64_t s[4];
+ int has_uint32;
+ uint32_t uinteger;
+} sfc64_state;
+
+
+static NPY_INLINE uint64_t rotl(const uint64_t value, unsigned int rot) {
+#ifdef _WIN32
+ return _rotl64(value, rot);
+#else
+ return (value << rot) | (value >> ((-rot) & 63));
+#endif
+}
+
+static NPY_INLINE uint64_t sfc64_next(uint64_t *s) {
+ const uint64_t tmp = s[0] + s[1] + s[3]++;
+
+ s[0] = s[1] ^ (s[1] >> 11);
+ s[1] = s[2] + (s[2] << 3);
+ s[2] = rotl(s[2], 24) + tmp;
+
+ return tmp;
+}
+
+
+static NPY_INLINE uint64_t sfc64_next64(sfc64_state *state) {
+ return sfc64_next(&state->s[0]);
+}
+
+static NPY_INLINE uint32_t sfc64_next32(sfc64_state *state) {
+ uint64_t next;
+ if (state->has_uint32) {
+ state->has_uint32 = 0;
+ return state->uinteger;
+ }
+ next = sfc64_next(&state->s[0]);
+ state->has_uint32 = 1;
+ state->uinteger = (uint32_t)(next >> 32);
+ return (uint32_t)(next & 0xffffffff);
+}
+
+void sfc64_set_seed(sfc64_state *state, uint64_t *seed);
+
+void sfc64_get_state(sfc64_state *state, uint64_t *state_arr, int *has_uint32,
+ uint32_t *uinteger);
+
+void sfc64_set_state(sfc64_state *state, uint64_t *state_arr, int has_uint32,
+ uint32_t uinteger);
+
+#endif
diff --git a/numpy/random/src/splitmix64/LICENSE.md b/numpy/random/src/splitmix64/LICENSE.md
new file mode 100644
index 000000000..3c4d73b92
--- /dev/null
+++ b/numpy/random/src/splitmix64/LICENSE.md
@@ -0,0 +1,9 @@
+# SPLITMIX64
+
+Written in 2015 by Sebastiano Vigna (vigna@acm.org)
+
+To the extent possible under law, the author has dedicated all copyright
+and related and neighboring rights to this software to the public domain
+worldwide. This software is distributed without any warranty.
+
+See <http://creativecommons.org/publicdomain/zero/1.0/>. \ No newline at end of file
diff --git a/numpy/random/src/splitmix64/splitmix64.c b/numpy/random/src/splitmix64/splitmix64.c
new file mode 100644
index 000000000..79a845982
--- /dev/null
+++ b/numpy/random/src/splitmix64/splitmix64.c
@@ -0,0 +1,29 @@
+/* Written in 2015 by Sebastiano Vigna (vigna@acm.org)
+
+To the extent possible under law, the author has dedicated all copyright
+and related and neighboring rights to this software to the public domain
+worldwide. This software is distributed without any warranty.
+
+See <http://creativecommons.org/publicdomain/zero/1.0/>.
+
+Modified 2018 by Kevin Sheppard. Modifications licensed under the NCSA
+license.
+*/
+
+/* This is a fixed-increment version of Java 8's SplittableRandom generator
+ See http://dx.doi.org/10.1145/2714064.2660195 and
+ http://docs.oracle.com/javase/8/docs/api/java/util/SplittableRandom.html
+
+ It is a very fast generator passing BigCrush, and it can be useful if
+ for some reason you absolutely want 64 bits of state; otherwise, we
+ rather suggest to use a xoroshiro128+ (for moderately parallel
+ computations) or xorshift1024* (for massively parallel computations)
+ generator. */
+
+#include "splitmix64.h"
+
+extern inline uint64_t splitmix64_next(uint64_t *state);
+
+extern inline uint64_t splitmix64_next64(splitmix64_state *state);
+
+extern inline uint32_t splitmix64_next32(splitmix64_state *state);
diff --git a/numpy/random/src/splitmix64/splitmix64.h b/numpy/random/src/splitmix64/splitmix64.h
new file mode 100644
index 000000000..d5877905e
--- /dev/null
+++ b/numpy/random/src/splitmix64/splitmix64.h
@@ -0,0 +1,30 @@
+#include <inttypes.h>
+
+typedef struct s_splitmix64_state {
+ uint64_t state;
+ int has_uint32;
+ uint32_t uinteger;
+} splitmix64_state;
+
+static inline uint64_t splitmix64_next(uint64_t *state) {
+ uint64_t z = (state[0] += 0x9e3779b97f4a7c15);
+ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+ return z ^ (z >> 31);
+}
+
+static inline uint64_t splitmix64_next64(splitmix64_state *state) {
+ return splitmix64_next(&state->state);
+}
+
+static inline uint32_t splitmix64_next32(splitmix64_state *state) {
+ uint64_t next;
+ if (state->has_uint32) {
+ state->has_uint32 = 0;
+ return state->uinteger;
+ }
+ next = splitmix64_next64(state);
+ state->has_uint32 = 1;
+ state->uinteger = (uint32_t)(next >> 32);
+ return (uint32_t)(next & 0xffffffff);
+}
diff --git a/numpy/random/src/splitmix64/splitmix64.orig.c b/numpy/random/src/splitmix64/splitmix64.orig.c
new file mode 100644
index 000000000..df6133aab
--- /dev/null
+++ b/numpy/random/src/splitmix64/splitmix64.orig.c
@@ -0,0 +1,28 @@
+/* Written in 2015 by Sebastiano Vigna (vigna@acm.org)
+
+To the extent possible under law, the author has dedicated all copyright
+and related and neighboring rights to this software to the public domain
+worldwide. This software is distributed without any warranty.
+
+See <http://creativecommons.org/publicdomain/zero/1.0/>. */
+
+#include <stdint.h>
+
+/* This is a fixed-increment version of Java 8's SplittableRandom generator
+ See http://dx.doi.org/10.1145/2714064.2660195 and
+ http://docs.oracle.com/javase/8/docs/api/java/util/SplittableRandom.html
+
+ It is a very fast generator passing BigCrush, and it can be useful if
+ for some reason you absolutely want 64 bits of state; otherwise, we
+ rather suggest to use a xoroshiro128+ (for moderately parallel
+ computations) or xorshift1024* (for massively parallel computations)
+ generator. */
+
+uint64_t x; /* The state can be seeded with any value. */
+
+uint64_t next() {
+ uint64_t z = (x += 0x9e3779b97f4a7c15);
+ z = (z ^ (z >> 30)) * 0xbf58476d1ce4e5b9;
+ z = (z ^ (z >> 27)) * 0x94d049bb133111eb;
+ return z ^ (z >> 31);
+}
diff --git a/numpy/core/_aliased_types.py b/numpy/random/tests/data/__init__.py
index e69de29bb..e69de29bb 100644
--- a/numpy/core/_aliased_types.py
+++ b/numpy/random/tests/data/__init__.py
diff --git a/numpy/random/tests/data/mt19937-testset-1.csv b/numpy/random/tests/data/mt19937-testset-1.csv
new file mode 100644
index 000000000..b97bfa66f
--- /dev/null
+++ b/numpy/random/tests/data/mt19937-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xc816921f
+1, 0xb3623c6d
+2, 0x5fa391bb
+3, 0x40178d9
+4, 0x7dcc9811
+5, 0x548eb8e6
+6, 0x92ba3125
+7, 0x65fde68d
+8, 0x2f81ec95
+9, 0xbd94f7a2
+10, 0xdc4d9bcc
+11, 0xa672bf13
+12, 0xb41113e
+13, 0xec7e0066
+14, 0x50239372
+15, 0xd9d66b1d
+16, 0xab72a161
+17, 0xddc2e29f
+18, 0x7ea29ab4
+19, 0x80d141ba
+20, 0xb1c7edf1
+21, 0x44d29203
+22, 0xe224d98
+23, 0x5b3e9d26
+24, 0x14fd567c
+25, 0x27d98c96
+26, 0x838779fc
+27, 0x92a138a
+28, 0x5d08965b
+29, 0x531e0ad6
+30, 0x984ee8f4
+31, 0x1ed78539
+32, 0x32bd6d8d
+33, 0xc37c8516
+34, 0x9aef5c6b
+35, 0x3aacd139
+36, 0xd96ed154
+37, 0x489cd1ed
+38, 0x2cba4b3b
+39, 0x76c6ae72
+40, 0x2dae02b9
+41, 0x52ac5fd6
+42, 0xc2b5e265
+43, 0x630e6a28
+44, 0x3f560d5d
+45, 0x9315bdf3
+46, 0xf1055aba
+47, 0x840e42c6
+48, 0xf2099c6b
+49, 0x15ff7696
+50, 0x7948d146
+51, 0x97342961
+52, 0x7a7a21c
+53, 0xc66f4fb1
+54, 0x23c4103e
+55, 0xd7321f98
+56, 0xeb7efb75
+57, 0xe02490b5
+58, 0x2aa02de
+59, 0x8bee0bf7
+60, 0xfc2da059
+61, 0xae835034
+62, 0x678f2075
+63, 0x6d03094b
+64, 0x56455e05
+65, 0x18b32373
+66, 0x8ff0356b
+67, 0x1fe442fb
+68, 0x3f1ab6c3
+69, 0xb6fd21b
+70, 0xfc310eb2
+71, 0xb19e9a4d
+72, 0x17ddee72
+73, 0xfd534251
+74, 0x9e500564
+75, 0x9013a036
+76, 0xcf08f118
+77, 0x6b6d5969
+78, 0x3ccf1977
+79, 0x7cc11497
+80, 0x651c6ac9
+81, 0x4d6b104b
+82, 0x9a28314e
+83, 0x14c237be
+84, 0x9cfc8d52
+85, 0x2947fad5
+86, 0xd71eff49
+87, 0x5188730e
+88, 0x4b894614
+89, 0xf4fa2a34
+90, 0x42f7cc69
+91, 0x4089c9e8
+92, 0xbf0bbfe4
+93, 0x3cea65c
+94, 0xc6221207
+95, 0x1bb71a8f
+96, 0x54843fe7
+97, 0xbc59de4c
+98, 0x79c6ee64
+99, 0x14e57a26
+100, 0x68d88fe
+101, 0x2b86ef64
+102, 0x8ffff3c1
+103, 0x5bdd573f
+104, 0x85671813
+105, 0xefe32ca2
+106, 0x105ded1e
+107, 0x90ca2769
+108, 0xb33963ac
+109, 0x363fbbc3
+110, 0x3b3763ae
+111, 0x1d50ab88
+112, 0xc9ec01eb
+113, 0xc8bbeada
+114, 0x5d704692
+115, 0x5fd9e40
+116, 0xe61c125
+117, 0x2fe05792
+118, 0xda8afb72
+119, 0x4cbaa653
+120, 0xdd2243df
+121, 0x896fd3f5
+122, 0x5bc23db
+123, 0xa1c4e807
+124, 0x57d1a24d
+125, 0x66503ddc
+126, 0xcf7c0838
+127, 0x19e034fc
+128, 0x66807450
+129, 0xfc219b3b
+130, 0xe8a843e7
+131, 0x9ce61f08
+132, 0x92b950d6
+133, 0xce955ec4
+134, 0xda0d1f0d
+135, 0x960c6250
+136, 0x39552432
+137, 0xde845e84
+138, 0xff3b4b11
+139, 0x5d918e6f
+140, 0xbb930df2
+141, 0x7cfb0993
+142, 0x5400e1e9
+143, 0x3bfa0954
+144, 0x7e2605fb
+145, 0x11941591
+146, 0x887e6994
+147, 0xdc8bed45
+148, 0x45b3fb50
+149, 0xfbdf8358
+150, 0x41507468
+151, 0x34c87166
+152, 0x17f64d77
+153, 0x3bbaf4f8
+154, 0x4f26f37e
+155, 0x4a56ebf2
+156, 0x81100f1
+157, 0x96d94eae
+158, 0xca88fda5
+159, 0x2eef3a60
+160, 0x952afbd3
+161, 0x2bec88c7
+162, 0x52335c4b
+163, 0x8296db8e
+164, 0x4da7d00a
+165, 0xc00ac899
+166, 0xadff8c72
+167, 0xbecf26cf
+168, 0x8835c83c
+169, 0x1d13c804
+170, 0xaa940ddc
+171, 0x68222cfe
+172, 0x4569c0e1
+173, 0x29077976
+174, 0x32d4a5af
+175, 0xd31fcdef
+176, 0xdc60682b
+177, 0x7c95c368
+178, 0x75a70213
+179, 0x43021751
+180, 0x5e52e0a6
+181, 0xf7e190b5
+182, 0xee3e4bb
+183, 0x2fe3b150
+184, 0xcf419c07
+185, 0x478a4570
+186, 0xe5c3ea50
+187, 0x417f30a8
+188, 0xf0cfdaa0
+189, 0xd1f7f738
+190, 0x2c70fc23
+191, 0x54fc89f9
+192, 0x444dcf01
+193, 0xec2a002d
+194, 0xef0c3a88
+195, 0xde21be9
+196, 0x88ab3296
+197, 0x3028897c
+198, 0x264b200b
+199, 0xd8ae0706
+200, 0x9eef901a
+201, 0xbd1b96e0
+202, 0xea71366c
+203, 0x1465b694
+204, 0x5a794650
+205, 0x83df52d4
+206, 0x8262413d
+207, 0x5bc148c0
+208, 0xe0ecd80c
+209, 0x40649571
+210, 0xb4d2ee5f
+211, 0xedfd7d09
+212, 0xa082e25f
+213, 0xc62992d1
+214, 0xbc7e65ee
+215, 0x5499cf8a
+216, 0xac28f775
+217, 0x649840fb
+218, 0xd4c54805
+219, 0x1d166ba6
+220, 0xbeb1171f
+221, 0x45b66703
+222, 0x78c03349
+223, 0x38d2a6ff
+224, 0x935cae8b
+225, 0x1d07dc3f
+226, 0x6c1ed365
+227, 0x579fc585
+228, 0x1320c0ec
+229, 0x632757eb
+230, 0xd265a397
+231, 0x70e9b6c2
+232, 0xc81e322c
+233, 0xa27153cf
+234, 0x2118ba19
+235, 0x514ec400
+236, 0x2bd0ecd6
+237, 0xc3e7dae3
+238, 0xfa39355e
+239, 0x48f23cc1
+240, 0xbcf75948
+241, 0x53ccc70c
+242, 0x75346423
+243, 0x951181e0
+244, 0x348e90df
+245, 0x14365d7f
+246, 0xfbc95d7a
+247, 0xdc98a9e6
+248, 0xed202df7
+249, 0xa59ec913
+250, 0x6b6e9ae2
+251, 0x1697f265
+252, 0x15d322d0
+253, 0xa2e7ee0a
+254, 0x88860b7e
+255, 0x455d8b9d
+256, 0x2f5c59cb
+257, 0xac49c9f1
+258, 0xa6a6a039
+259, 0xc057f56b
+260, 0xf1ff1208
+261, 0x5eb8dc9d
+262, 0xe6702509
+263, 0xe238b0ed
+264, 0x5ae32e3d
+265, 0xa88ebbdf
+266, 0xef885ae7
+267, 0xafa6d49b
+268, 0xc94499e0
+269, 0x1a196325
+270, 0x88938da3
+271, 0x14f4345
+272, 0xd8e33637
+273, 0xa3551bd5
+274, 0x73fe35c7
+275, 0x9561e94b
+276, 0xd673bf68
+277, 0x16134872
+278, 0x68c42f9f
+279, 0xdf7574c8
+280, 0x8809bab9
+281, 0x1432cf69
+282, 0xafb66bf1
+283, 0xc184aa7b
+284, 0xedbf2007
+285, 0xbd420ce1
+286, 0x761033a0
+287, 0xff7e351f
+288, 0xd6c3780e
+289, 0x5844416f
+290, 0xc6c0ee1c
+291, 0xd2e147db
+292, 0x92ac601a
+293, 0x393e846b
+294, 0x18196cca
+295, 0x54a22be
+296, 0x32bab1c4
+297, 0x60365183
+298, 0x64fa342
+299, 0xca24a493
+300, 0xd8cc8b83
+301, 0x3faf102b
+302, 0x6e09bb58
+303, 0x812f0ea
+304, 0x592c95d8
+305, 0xe45ea4c5
+306, 0x23aebf83
+307, 0xbd9691d4
+308, 0xf47b4baa
+309, 0x4ac7b487
+310, 0xcce18803
+311, 0x3377556e
+312, 0x3ff8e6b6
+313, 0x99d22063
+314, 0x23250bec
+315, 0x4e1f9861
+316, 0x8554249b
+317, 0x8635c2fc
+318, 0xe8426e8a
+319, 0x966c29d8
+320, 0x270b6082
+321, 0x3180a8a1
+322, 0xe7e1668b
+323, 0x7f868dc
+324, 0xcf4c17cf
+325, 0xe31de4d1
+326, 0xc8c8aff4
+327, 0xae8db704
+328, 0x3c928cc2
+329, 0xe12cd48
+330, 0xb33ecd04
+331, 0xb93d7cbe
+332, 0x49c69d6a
+333, 0x7d3bce64
+334, 0x86bc219
+335, 0x8408233b
+336, 0x44dc7479
+337, 0xdf80d538
+338, 0xf3db02c3
+339, 0xbbbd31d7
+340, 0x121281f
+341, 0x7521e9a3
+342, 0x8859675a
+343, 0x75aa6502
+344, 0x430ed15b
+345, 0xecf0a28d
+346, 0x659774fd
+347, 0xd58a2311
+348, 0x512389a9
+349, 0xff65e1ff
+350, 0xb6ddf222
+351, 0xe3458895
+352, 0x8b13cd6e
+353, 0xd4a22870
+354, 0xe604c50c
+355, 0x27f54f26
+356, 0x8f7f422f
+357, 0x9735b4cf
+358, 0x414072b0
+359, 0x76a1c6d5
+360, 0xa2208c06
+361, 0x83cd0f61
+362, 0x6c4f7ead
+363, 0x6553cf76
+364, 0xeffcf44
+365, 0x7f434a3f
+366, 0x9dc364bd
+367, 0x3cdf52ed
+368, 0xad597594
+369, 0x9c3e211b
+370, 0x6c04a33f
+371, 0x885dafa6
+372, 0xbbdaca71
+373, 0x7ae5dd5c
+374, 0x37675644
+375, 0x251853c6
+376, 0x130b086b
+377, 0x143fa54b
+378, 0x54cdc282
+379, 0x9faff5b3
+380, 0x502a5c8b
+381, 0xd9524550
+382, 0xae221aa6
+383, 0x55cf759b
+384, 0x24782da4
+385, 0xd715d815
+386, 0x250ea09a
+387, 0x4e0744ac
+388, 0x11e15814
+389, 0xabe5f9df
+390, 0xc8146350
+391, 0xfba67d9b
+392, 0x2b82e42f
+393, 0xd4ea96fc
+394, 0x5ffc179e
+395, 0x1598bafe
+396, 0x7fb6d662
+397, 0x1a12a0db
+398, 0x450cee4a
+399, 0x85f8e12
+400, 0xce71b594
+401, 0xd4bb1d19
+402, 0x968f379d
+403, 0x54cc1d52
+404, 0x467e6066
+405, 0x7da5f9a9
+406, 0x70977034
+407, 0x49e65c4b
+408, 0xd08570d1
+409, 0x7acdf60b
+410, 0xdffa038b
+411, 0x9ce14e4c
+412, 0x107cbbf8
+413, 0xdd746ca0
+414, 0xc6370a46
+415, 0xe7f83312
+416, 0x373fa9ce
+417, 0xd822a2c6
+418, 0x1d4efea6
+419, 0xc53dcadb
+420, 0x9b4e898f
+421, 0x71daa6bf
+422, 0x7a0bc78b
+423, 0xd7b86f50
+424, 0x1b8b3286
+425, 0xcf9425dd
+426, 0xd5263220
+427, 0x4ea0b647
+428, 0xc767fe64
+429, 0xcfc5e67
+430, 0xcc6a2942
+431, 0xa51eff00
+432, 0x76092e1b
+433, 0xf606e80f
+434, 0x824b5e20
+435, 0xebb55e14
+436, 0x783d96a6
+437, 0x10696512
+438, 0x17ee510a
+439, 0x3ab70a1f
+440, 0xcce6b210
+441, 0x8f72f0fb
+442, 0xf0610b41
+443, 0x83d01fb5
+444, 0x6b3de36
+445, 0xe4c2e84f
+446, 0x9c43bb15
+447, 0xddf2905
+448, 0x7dd63556
+449, 0x3662ca09
+450, 0xfb81f35b
+451, 0xc2c8a72a
+452, 0x8e93c37
+453, 0xa93da2d4
+454, 0xa03af8f1
+455, 0x8d75159a
+456, 0x15f010b0
+457, 0xa296ab06
+458, 0xe55962ba
+459, 0xeae700a9
+460, 0xe388964a
+461, 0x917f2bec
+462, 0x1c203fea
+463, 0x792a01ba
+464, 0xa93a80ac
+465, 0x9eb8a197
+466, 0x56c0bc73
+467, 0xb8f05799
+468, 0xf429a8c8
+469, 0xb92cee42
+470, 0xf8864ec
+471, 0x62f2518a
+472, 0x3a7bfa3e
+473, 0x12e56e6d
+474, 0xd7a18313
+475, 0x41fa3899
+476, 0xa09c4956
+477, 0xebcfd94a
+478, 0xc485f90b
+479, 0x4391ce40
+480, 0x742a3333
+481, 0xc932f9e5
+482, 0x75c6c263
+483, 0x80937f0
+484, 0xcf21833c
+485, 0x16027520
+486, 0xd42e669f
+487, 0xb0f01fb7
+488, 0xb35896f1
+489, 0x763737a9
+490, 0x1bb20209
+491, 0x3551f189
+492, 0x56bc2602
+493, 0xb6eacf4
+494, 0x42ec4d11
+495, 0x245cc68
+496, 0xc27ac43b
+497, 0x9d903466
+498, 0xce3f0c05
+499, 0xb708c31c
+500, 0xc0fd37eb
+501, 0x95938b2c
+502, 0xf20175a7
+503, 0x4a86ee9b
+504, 0xbe039a58
+505, 0xd41cabe7
+506, 0x83bc99ba
+507, 0x761d60e1
+508, 0x7737cc2e
+509, 0x2b82fc4b
+510, 0x375aa401
+511, 0xfe9597a0
+512, 0x5543806a
+513, 0x44f31238
+514, 0x7df31538
+515, 0x74cfa770
+516, 0x8755d881
+517, 0x1fde665a
+518, 0xda8bf315
+519, 0x973d8e95
+520, 0x72205228
+521, 0x8fe59717
+522, 0x7bb90b34
+523, 0xef6ed945
+524, 0x16fd4a38
+525, 0x5db44de1
+526, 0xf09f93b3
+527, 0xe84824cc
+528, 0x945bb50e
+529, 0xd0be4aa5
+530, 0x47c277c2
+531, 0xd3800c28
+532, 0xac1c33ec
+533, 0xd3dacce
+534, 0x811c8387
+535, 0x6761b36
+536, 0x70d3882f
+537, 0xd6e62e3a
+538, 0xea25daa2
+539, 0xb07f39d1
+540, 0x391d89d7
+541, 0x84b6fb5e
+542, 0x3dda3fca
+543, 0x229e80a4
+544, 0x3d94a4b7
+545, 0x5d3d576a
+546, 0xad7818a0
+547, 0xce23b03a
+548, 0x7aa2079c
+549, 0x9a6be555
+550, 0x83f3b34a
+551, 0x1848f9d9
+552, 0xd8fefc1c
+553, 0x48e6ce48
+554, 0x52e55750
+555, 0xf41a71cf
+556, 0xba08e259
+557, 0xfaf06a15
+558, 0xeaaac0fb
+559, 0x34f90098
+560, 0xb1dfffbb
+561, 0x718daec2
+562, 0xab4dda21
+563, 0xd27cc1ee
+564, 0x4aafbc4c
+565, 0x356dfb4f
+566, 0x83fcdfd6
+567, 0x8f0bcde0
+568, 0x4363f844
+569, 0xadc0f4d5
+570, 0x3bde994e
+571, 0x3884d452
+572, 0x21876b4a
+573, 0x9c985398
+574, 0xca55a226
+575, 0x3a88c583
+576, 0x916dc33c
+577, 0x8f67d1d7
+578, 0x3b26a667
+579, 0xe4ddeb4b
+580, 0x1a9d8c33
+581, 0x81c9b74f
+582, 0x9ed1e9df
+583, 0x6e61aecf
+584, 0x95e95a5d
+585, 0x68864ff5
+586, 0xb8fa5b9
+587, 0x72b1b3de
+588, 0x5e18a86b
+589, 0xd7f2337d
+590, 0xd70e0925
+591, 0xb573a4c1
+592, 0xc77b3f8a
+593, 0x389b20de
+594, 0x16cf6afb
+595, 0xa39bd275
+596, 0xf491cf01
+597, 0x6f88a802
+598, 0x8510af05
+599, 0xe7cd549a
+600, 0x8603179a
+601, 0xef43f191
+602, 0xf9b64c60
+603, 0xb00254a7
+604, 0xd7c06a2d
+605, 0x17e9380b
+606, 0x529e727b
+607, 0xaaa8fe0a
+608, 0xfb64ff4c
+609, 0xcd75af26
+610, 0xfb717c87
+611, 0xa0789899
+612, 0x10391ec9
+613, 0x7e9b40b3
+614, 0x18536554
+615, 0x728c05f7
+616, 0x787dca98
+617, 0xad948d1
+618, 0x44c18def
+619, 0x3303f2ec
+620, 0xa15acb5
+621, 0xb58d38f4
+622, 0xfe041ef8
+623, 0xd151a956
+624, 0x7b9168e8
+625, 0x5ebeca06
+626, 0x90fe95df
+627, 0xf76875aa
+628, 0xb2e0d664
+629, 0x2e3253b7
+630, 0x68e34469
+631, 0x1f0c2d89
+632, 0x13a34ac2
+633, 0x5ffeb841
+634, 0xe381e91c
+635, 0xb8549a92
+636, 0x3f35cf1
+637, 0xda0f9dcb
+638, 0xdd9828a6
+639, 0xe1428f29
+640, 0xf4db80b5
+641, 0xdac30af5
+642, 0x1af1dd17
+643, 0x9a540254
+644, 0xcab68a38
+645, 0x33560361
+646, 0x2fbf3886
+647, 0xbc785923
+648, 0xe081cd10
+649, 0x8e473356
+650, 0xd102c357
+651, 0xeea4fe48
+652, 0x248d3453
+653, 0x1da79ac
+654, 0x815a65ff
+655, 0x27693e76
+656, 0xb7d5af40
+657, 0x6d245d30
+658, 0x9e06fa8f
+659, 0xb0570dcb
+660, 0x469f0005
+661, 0x3e0ca132
+662, 0xd89bbf3
+663, 0xd61ccd47
+664, 0x6383878
+665, 0x62b5956
+666, 0x4dc83675
+667, 0x93fd8492
+668, 0x5a0091f5
+669, 0xc9f9bc3
+670, 0xa26e7778
+671, 0xeabf2d01
+672, 0xe612dc06
+673, 0x85d89ff9
+674, 0xd1763179
+675, 0xcb88947b
+676, 0x9e8757a5
+677, 0xe100e85c
+678, 0x904166eb
+679, 0x4996243d
+680, 0x4038e1cb
+681, 0x2be2c63d
+682, 0x77017e81
+683, 0x3b1f556b
+684, 0x1c785c77
+685, 0x6869b8bd
+686, 0xe1217ed4
+687, 0x4012ab2f
+688, 0xc06c0d8e
+689, 0x2122eb68
+690, 0xad1783fd
+691, 0x5f0c80e3
+692, 0x828f7efa
+693, 0x29328399
+694, 0xeadf1087
+695, 0x85dc0037
+696, 0x9691ef26
+697, 0xc0947a53
+698, 0x2a178d2a
+699, 0x2a2c7e8f
+700, 0x90378380
+701, 0xaad8d326
+702, 0x9cf1c3c8
+703, 0x84eccd44
+704, 0x79e61808
+705, 0x8b3f454e
+706, 0x209e6e1
+707, 0x51f88378
+708, 0xc210226f
+709, 0xd982adb5
+710, 0x55d44a31
+711, 0x9817d443
+712, 0xa328c626
+713, 0x13455966
+714, 0xb8f681d3
+715, 0x2a3c713b
+716, 0xc186959b
+717, 0x814a74b0
+718, 0xed7bc90
+719, 0xa88d3d6d
+720, 0x88a9f561
+721, 0x73aa1c0a
+722, 0xdfeff404
+723, 0xec037e4b
+724, 0xa5c209f0
+725, 0xb3a223b4
+726, 0x24ce3709
+727, 0x3184c790
+728, 0xa1398c62
+729, 0x2f92034e
+730, 0xbb37a79a
+731, 0x605287b4
+732, 0x8faa772c
+733, 0x6ce56c1d
+734, 0xc035fb4c
+735, 0x7cf5b316
+736, 0x6502645
+737, 0xa283d810
+738, 0x778bc2f1
+739, 0xfdf99313
+740, 0x1f513265
+741, 0xbd3837e2
+742, 0x9b84a9a
+743, 0x2139ce91
+744, 0x61a8e890
+745, 0xf9ff12db
+746, 0xb43d2ea7
+747, 0x88532e61
+748, 0x175a6655
+749, 0x7a6c4f72
+750, 0x6dafc1b7
+751, 0x449b1459
+752, 0x514f654f
+753, 0x9a6731e2
+754, 0x8632da43
+755, 0xc81b0422
+756, 0x81fe9005
+757, 0x15b79618
+758, 0xb5fa629f
+759, 0x987a474f
+760, 0x1c74f54e
+761, 0xf9743232
+762, 0xec4b55f
+763, 0x87d761e5
+764, 0xd1ad78b7
+765, 0x453d9350
+766, 0xc7a7d85
+767, 0xb2576ff5
+768, 0xcdde49b7
+769, 0x8e1f763e
+770, 0x1338583e
+771, 0xfd65b9dc
+772, 0x4f19c4f4
+773, 0x3a52d73d
+774, 0xd3509c4c
+775, 0xda24fe31
+776, 0xe2de56ba
+777, 0x2db5e540
+778, 0x23172734
+779, 0x4db572f
+780, 0xeb941718
+781, 0x84c2649a
+782, 0x3b1e5b6a
+783, 0x4c9c61b9
+784, 0x3bccd11
+785, 0xb4d7b78e
+786, 0x48580ae5
+787, 0xd273ab68
+788, 0x25c11615
+789, 0x470b53f6
+790, 0x329c2068
+791, 0x1693721b
+792, 0xf8c9aacf
+793, 0x4c3d5693
+794, 0xd778284e
+795, 0xae1cb24f
+796, 0x3c11d1b3
+797, 0xddd2b0c0
+798, 0x90269fa7
+799, 0x5666e0a2
+800, 0xf9f195a4
+801, 0x61d78eb2
+802, 0xada5a7c0
+803, 0xaa272fbe
+804, 0xba3bae2f
+805, 0xd0b70fc2
+806, 0x529f32b
+807, 0xda7a3e21
+808, 0x9a776a20
+809, 0xb21f9635
+810, 0xb3acc14e
+811, 0xac55f56
+812, 0x29dccf41
+813, 0x32dabdb3
+814, 0xaa032f58
+815, 0xfa406af4
+816, 0xce3c415d
+817, 0xb44fb4d9
+818, 0x32248d1c
+819, 0x680c6440
+820, 0xae2337b
+821, 0x294cb597
+822, 0x5bca48fe
+823, 0xaef19f40
+824, 0xad60406
+825, 0x4781f090
+826, 0xfd691ffc
+827, 0xb6568268
+828, 0xa56c72cb
+829, 0xf8a9e0fc
+830, 0x9af4fd02
+831, 0x2cd30932
+832, 0x776cefd7
+833, 0xe31f476e
+834, 0x6d94a437
+835, 0xb3cab598
+836, 0xf582d13f
+837, 0x3bf8759d
+838, 0xc3777dc
+839, 0x5e425ea8
+840, 0x1c7ff4ed
+841, 0x1c2e97d1
+842, 0xc062d2b4
+843, 0x46dc80e0
+844, 0xbcdb47e6
+845, 0x32282fe0
+846, 0xaba89063
+847, 0x5e94e9bb
+848, 0x3e667f78
+849, 0xea6eb21a
+850, 0xe56e54e8
+851, 0xa0383510
+852, 0x6768fe2b
+853, 0xb53ac3e0
+854, 0x779569a0
+855, 0xeca83c6a
+856, 0x24db4d2d
+857, 0x4585f696
+858, 0xf84748b2
+859, 0xf6a4dd5b
+860, 0x31fb524d
+861, 0x67ab39fe
+862, 0x5882a899
+863, 0x9a05fcf6
+864, 0x712b5674
+865, 0xe8c6958f
+866, 0x4b448bb3
+867, 0x530b9abf
+868, 0xb491f491
+869, 0x98352c62
+870, 0x2d0a50e3
+871, 0xeb4384da
+872, 0x36246f07
+873, 0xcbc5c1a
+874, 0xae24031d
+875, 0x44d11ed6
+876, 0xf07f1608
+877, 0xf296aadd
+878, 0x3bcfe3be
+879, 0x8fa1e7df
+880, 0xfd317a6e
+881, 0xe4975c44
+882, 0x15205892
+883, 0xa762d4df
+884, 0xf1167365
+885, 0x6811cc00
+886, 0x8315f23
+887, 0xe045b4b1
+888, 0xa8496414
+889, 0xbed313ae
+890, 0xcdae3ddb
+891, 0xa9c22c9
+892, 0x275fab1a
+893, 0xedd65fa
+894, 0x4c188229
+895, 0x63a83e58
+896, 0x18aa9207
+897, 0xa41f2e78
+898, 0xd9f63653
+899, 0xbe2be73b
+900, 0xa3364d39
+901, 0x896d5428
+902, 0xc737539e
+903, 0x745a78c6
+904, 0xf0b2b042
+905, 0x510773b4
+906, 0x92ad8e37
+907, 0x27f2f8c4
+908, 0x23704cc8
+909, 0x3d95a77f
+910, 0xf08587a4
+911, 0xbd696a25
+912, 0x948924f3
+913, 0x8cddb634
+914, 0xcd2a4910
+915, 0x8e0e300e
+916, 0x83815a9b
+917, 0x67383510
+918, 0x3c18f0d0
+919, 0xc7a7bccc
+920, 0x7cc2d3a2
+921, 0x52eb2eeb
+922, 0xe4a257e5
+923, 0xec76160e
+924, 0x63f9ad68
+925, 0x36d0bbbf
+926, 0x957bc4e4
+927, 0xc9ed90ff
+928, 0x4cb6059d
+929, 0x2f86eca1
+930, 0x3e3665a3
+931, 0x9b7eb6f4
+932, 0x492e7e18
+933, 0xa098aa51
+934, 0x7eb568b2
+935, 0x3fd639ba
+936, 0x7bebcf1
+937, 0x99c844ad
+938, 0x43cb5ec7
+939, 0x8dfbbef5
+940, 0x5be413ff
+941, 0xd93b976d
+942, 0xc1c7a86d
+943, 0x1f0e93d0
+944, 0x498204a2
+945, 0xe8fe832a
+946, 0x2236bd7
+947, 0x89953769
+948, 0x2acc3491
+949, 0x2c4f22c6
+950, 0xd7996277
+951, 0x3bcdc349
+952, 0xfc286630
+953, 0x5f8909fd
+954, 0x242677c0
+955, 0x4cb34104
+956, 0xa6ff8100
+957, 0x39ea47ec
+958, 0x9bd54140
+959, 0x7502ffe8
+960, 0x7ebef8ae
+961, 0x1ed8abe4
+962, 0xfaba8450
+963, 0xc197b65f
+964, 0x19431455
+965, 0xe229c176
+966, 0xeb2967da
+967, 0xe0c5dc05
+968, 0xa84e3227
+969, 0x10dd9e0f
+970, 0xbdb70b02
+971, 0xce24808a
+972, 0x423edab8
+973, 0x194caf71
+974, 0x144f150d
+975, 0xf811c2d2
+976, 0xc224ee85
+977, 0x2b217a5b
+978, 0xf78a5a79
+979, 0x6554a4b1
+980, 0x769582df
+981, 0xf4b2cf93
+982, 0x89648483
+983, 0xb3283a3e
+984, 0x82b895db
+985, 0x79388ef0
+986, 0x54bc42a6
+987, 0xc4dd39d9
+988, 0x45b33b7d
+989, 0x8703b2c1
+990, 0x1cc94806
+991, 0xe0f43e49
+992, 0xcaa7b6bc
+993, 0x4f88e9af
+994, 0x1477cce5
+995, 0x347dd115
+996, 0x36e335fa
+997, 0xb93c9a31
+998, 0xaac3a175
+999, 0x68a19647
diff --git a/numpy/random/tests/data/mt19937-testset-2.csv b/numpy/random/tests/data/mt19937-testset-2.csv
new file mode 100644
index 000000000..cdb8e4794
--- /dev/null
+++ b/numpy/random/tests/data/mt19937-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x7ab4ea94
+1, 0x9b561119
+2, 0x4957d02e
+3, 0x7dd3fdc2
+4, 0x5affe54
+5, 0x5a01741c
+6, 0x8b9e8c1f
+7, 0xda5bf11a
+8, 0x509226
+9, 0x64e2ea17
+10, 0x82c6dab5
+11, 0xe4302515
+12, 0x8198b873
+13, 0xc3ec9a82
+14, 0x829dff28
+15, 0x5278e44f
+16, 0x994a7d2c
+17, 0xf1c89398
+18, 0xaf2fddec
+19, 0x22abc6ee
+20, 0x963dbd43
+21, 0xc29edffb
+22, 0x41c1ce07
+23, 0x9c90034d
+24, 0x1f17a796
+25, 0x3833caa8
+26, 0xb8795528
+27, 0xebc595a2
+28, 0xf8f5b5dd
+29, 0xc2881f72
+30, 0x18e5d3f0
+31, 0x9b19ac7a
+32, 0xb9992436
+33, 0xc00052b3
+34, 0xb63f4475
+35, 0x962642d9
+36, 0x63506c10
+37, 0x2be6b127
+38, 0x569bdbc6
+39, 0x7f185e01
+40, 0xebb55f53
+41, 0x1c30198c
+42, 0x7c8d75c6
+43, 0xd3f2186b
+44, 0xaca5b9b1
+45, 0xbc49ff45
+46, 0xc4a802af
+47, 0x2cecd86f
+48, 0x8e0da529
+49, 0x1f22b00e
+50, 0x4559ea80
+51, 0x60f587d8
+52, 0x7c7460e9
+53, 0x67be0a4a
+54, 0x987a0183
+55, 0x7bd30f1
+56, 0xab18c4ac
+57, 0xffdbfb64
+58, 0x9ea917f9
+59, 0x1239dab7
+60, 0x38efabeb
+61, 0x5da91888
+62, 0x8f49ed62
+63, 0x83f60b1e
+64, 0x5950a3fc
+65, 0xd8911104
+66, 0x19e8859e
+67, 0x1a4d89ec
+68, 0x968ca180
+69, 0x9e1b6da3
+70, 0x3d99c2c
+71, 0x55f76289
+72, 0x8fa28b9e
+73, 0x9fe01d33
+74, 0xdade4e38
+75, 0x1ea04290
+76, 0xa7263313
+77, 0xaafc762e
+78, 0x460476d6
+79, 0x31226e12
+80, 0x451d3f05
+81, 0xd0d2764b
+82, 0xd06e1ab3
+83, 0x1394e3f4
+84, 0x2fc04ea3
+85, 0x5b8401c
+86, 0xebd6c929
+87, 0xe881687c
+88, 0x94bdd66a
+89, 0xabf85983
+90, 0x223ad12d
+91, 0x2aaeeaa3
+92, 0x1f704934
+93, 0x2db2efb6
+94, 0xf49b8dfb
+95, 0x5bdbbb9d
+96, 0xba0cd0db
+97, 0x4ec4674e
+98, 0xad0129e
+99, 0x7a66129b
+100, 0x50d12c5e
+101, 0x85b1d335
+102, 0x3efda58a
+103, 0xecd886fb
+104, 0x8ecadd3d
+105, 0x60ebac0f
+106, 0x5e10fe79
+107, 0xa84f7e5d
+108, 0x43931288
+109, 0xfacf448
+110, 0x4ee01997
+111, 0xcdc0a651
+112, 0x33c87037
+113, 0x8b50fc03
+114, 0xf52aad34
+115, 0xda6cd856
+116, 0x7585bea0
+117, 0xe947c762
+118, 0x4ddff5d8
+119, 0xe0e79b3b
+120, 0xb804cf09
+121, 0x84765c44
+122, 0x3ff666b4
+123, 0xe31621ad
+124, 0x816f2236
+125, 0x228176bc
+126, 0xfdc14904
+127, 0x635f5077
+128, 0x6981a817
+129, 0xfd9a0300
+130, 0xd3fa8a24
+131, 0xd67c1a77
+132, 0x903fe97a
+133, 0xf7c4a4d5
+134, 0x109f2058
+135, 0x48ab87fe
+136, 0xfd6f1928
+137, 0x707e9452
+138, 0xf327db9e
+139, 0x7b80d76d
+140, 0xfb6ba193
+141, 0x454a1ad0
+142, 0xe20b51e
+143, 0xb774d085
+144, 0x6b1ed574
+145, 0xb1e77de4
+146, 0xe2a83b37
+147, 0x33d3176f
+148, 0x2f0ca0fc
+149, 0x17f51e2
+150, 0x7c1fbf55
+151, 0xf09e9cd0
+152, 0xe3d9bacd
+153, 0x4244db0a
+154, 0x876c09fc
+155, 0x9db4fc2f
+156, 0xd3771d60
+157, 0x25fc6a75
+158, 0xb309915c
+159, 0xc50ee027
+160, 0xaa5b7b38
+161, 0x4c650ded
+162, 0x1acb2879
+163, 0x50db5887
+164, 0x90054847
+165, 0xfef23e5b
+166, 0x2dd7b7d5
+167, 0x990b8c2e
+168, 0x6001a601
+169, 0xb5d314c4
+170, 0xfbfb7bf9
+171, 0x1aba997d
+172, 0x814e7304
+173, 0x989d956a
+174, 0x86d5a29c
+175, 0x70a9fa08
+176, 0xc4ccba87
+177, 0x7e9cb366
+178, 0xee18eb0a
+179, 0x44f5be58
+180, 0x91d4af2d
+181, 0x5ab6e593
+182, 0x9fd6bb4d
+183, 0x85894ce
+184, 0x728a2401
+185, 0xf006f6d4
+186, 0xd782741e
+187, 0x842cd5bd
+188, 0xfb5883aa
+189, 0x7e5a471
+190, 0x83ff6965
+191, 0xc9675c6b
+192, 0xb6ced3c7
+193, 0x3de6425b
+194, 0x25e14db4
+195, 0x69ca3dec
+196, 0x81342d13
+197, 0xd7cd8417
+198, 0x88d15e69
+199, 0xefba17c9
+200, 0x43d595e6
+201, 0x89d4cf25
+202, 0x7cae9b9b
+203, 0x2242c621
+204, 0x27fc3598
+205, 0x467b1d84
+206, 0xe84d4622
+207, 0xa26bf980
+208, 0x80411010
+209, 0xe2c2bfea
+210, 0xbc6ca25a
+211, 0x3ddb592a
+212, 0xdd46eb9e
+213, 0xdfe8f657
+214, 0x2cedc974
+215, 0xf0dc546b
+216, 0xd46be68f
+217, 0x26d8a5aa
+218, 0x76e96ba3
+219, 0x7d5b5353
+220, 0xf532237c
+221, 0x6478b79
+222, 0x9b81a5e5
+223, 0x5fc68e5c
+224, 0x68436e70
+225, 0x2a0043f9
+226, 0x108d523c
+227, 0x7a4c32a3
+228, 0x9c84c742
+229, 0x6f813dae
+230, 0xfcc5bbcc
+231, 0x215b6f3a
+232, 0x84cb321d
+233, 0x7913a248
+234, 0xb1e6b585
+235, 0x49376b31
+236, 0x1dc896b0
+237, 0x347051ad
+238, 0x5524c042
+239, 0xda0eef9d
+240, 0xf2e73342
+241, 0xbeee2f9d
+242, 0x7c702874
+243, 0x9eb3bd34
+244, 0x97b09700
+245, 0xcdbab1d4
+246, 0x4a2f6ed1
+247, 0x2047bda5
+248, 0x3ecc7005
+249, 0x8d0d5e67
+250, 0x40876fb5
+251, 0xb5fd2187
+252, 0xe915d8af
+253, 0x9a2351c7
+254, 0xccc658ae
+255, 0xebb1eddc
+256, 0xc4a83671
+257, 0xffb2548f
+258, 0xe4fe387a
+259, 0x477aaab4
+260, 0x8475a4e4
+261, 0xf8823e46
+262, 0xe4130f71
+263, 0xbdb54482
+264, 0x98fe0462
+265, 0xf36b27b8
+266, 0xed7733da
+267, 0x5f428afc
+268, 0x43a3a21a
+269, 0xf8370b55
+270, 0xfade1de1
+271, 0xd9a038ea
+272, 0x3c69af23
+273, 0x24df7dd0
+274, 0xf66d9353
+275, 0x71d811be
+276, 0xcc4d024b
+277, 0xb8c30bf0
+278, 0x4198509d
+279, 0x8b37ba36
+280, 0xa41ae29a
+281, 0x8cf7799e
+282, 0x5cd0136a
+283, 0xa11324ef
+284, 0x2f8b6d4b
+285, 0x3657cf17
+286, 0x35b6873f
+287, 0xee6e5bd7
+288, 0xbeeaa98
+289, 0x9ad3c581
+290, 0xe2376c3f
+291, 0x738027cc
+292, 0x536ac839
+293, 0xf066227
+294, 0x6c9cb0f9
+295, 0x84082ae6
+296, 0xab38ae9d
+297, 0x493eade9
+298, 0xcb630b3a
+299, 0x64d44250
+300, 0xe5efb557
+301, 0xea2424d9
+302, 0x11a690ba
+303, 0x30a48ae4
+304, 0x58987e53
+305, 0x94ec6076
+306, 0x5d3308fa
+307, 0xf1635ebb
+308, 0x56a5ab90
+309, 0x2b2f2ee4
+310, 0x6f9e6483
+311, 0x8b93e327
+312, 0xa7ce140b
+313, 0x4c8aa42
+314, 0x7657bb3f
+315, 0xf250fd75
+316, 0x1edfcb0f
+317, 0xdb42ace3
+318, 0xf8147e16
+319, 0xd1992bd
+320, 0x64bb14d1
+321, 0x423e724d
+322, 0x7b172f7c
+323, 0x17171696
+324, 0x4acaf83b
+325, 0x7a83527e
+326, 0xfc980c60
+327, 0xc8b56bb
+328, 0x2453f77f
+329, 0x85ad1bf9
+330, 0x62a85dfe
+331, 0x48238c4d
+332, 0xbb3ec1eb
+333, 0x4c1c039c
+334, 0x1f37f571
+335, 0x98aecb63
+336, 0xc3b3ddd6
+337, 0xd22dad4
+338, 0xe49671a3
+339, 0xe3baf945
+340, 0xb9e21680
+341, 0xda562856
+342, 0xe8b88ce4
+343, 0x86f88de2
+344, 0x986faf76
+345, 0x6f0025c3
+346, 0x3fe21234
+347, 0xd8d3f729
+348, 0xc2d11c6f
+349, 0xd4f9e8f
+350, 0xf61a0aa
+351, 0xc48bb313
+352, 0xe944e940
+353, 0xf1801b2e
+354, 0x253590be
+355, 0x981f069d
+356, 0x891454d8
+357, 0xa4f824ad
+358, 0x6dd2cc48
+359, 0x3018827e
+360, 0x3fb329e6
+361, 0x65276517
+362, 0x8d2c0dd2
+363, 0xc965b48e
+364, 0x85d14d90
+365, 0x5a51623c
+366, 0xa9573d6a
+367, 0x82d00edf
+368, 0x5ed7ce07
+369, 0x1d946abc
+370, 0x24fa567b
+371, 0x83ef5ecc
+372, 0x9001724a
+373, 0xc4fe48f3
+374, 0x1e07c25c
+375, 0xf4d5e65e
+376, 0xb734f6e9
+377, 0x327a2df8
+378, 0x766d59b7
+379, 0x625e6b61
+380, 0xe82f32d7
+381, 0x1566c638
+382, 0x2e815871
+383, 0x606514aa
+384, 0x36b7386e
+385, 0xcaa8ce08
+386, 0xb453fe9c
+387, 0x48574e23
+388, 0x71f0da06
+389, 0xa8a79463
+390, 0x6b590210
+391, 0x86e989db
+392, 0x42899f4f
+393, 0x7a654ef9
+394, 0x4c4fe932
+395, 0x77b2fd10
+396, 0xb6b4565c
+397, 0xa2e537a3
+398, 0xef5a3dca
+399, 0x41235ea8
+400, 0x95c90541
+401, 0x50ad32c4
+402, 0xc1b8e0a4
+403, 0x498e9aab
+404, 0xffc965f1
+405, 0x72633485
+406, 0x3a731aef
+407, 0x7cfddd0b
+408, 0xb04d4129
+409, 0x184fc28e
+410, 0x424369b0
+411, 0xf9ae13a1
+412, 0xaf357c8d
+413, 0x7a19228e
+414, 0xb46de2a8
+415, 0xeff2ac76
+416, 0xa6c9357b
+417, 0x614f19c1
+418, 0x8ee1a53f
+419, 0xbe1257b1
+420, 0xf72651fe
+421, 0xd347c298
+422, 0x96dd2f23
+423, 0x5bb1d63e
+424, 0x32e10887
+425, 0x36a144da
+426, 0x9d70e791
+427, 0x5e535a25
+428, 0x214253da
+429, 0x2e43dd40
+430, 0xfc0413f4
+431, 0x1f5ea409
+432, 0x1754c126
+433, 0xcdbeebbe
+434, 0x1fb44a14
+435, 0xaec7926
+436, 0xb9d9a1e
+437, 0x9e4a6577
+438, 0x8b1f04c5
+439, 0x19854e8a
+440, 0x531080cd
+441, 0xc0cbd73
+442, 0x20399d77
+443, 0x7d8e9ed5
+444, 0x66177598
+445, 0x4d18a5c2
+446, 0xe08ebf58
+447, 0xb1f9c87b
+448, 0x66bedb10
+449, 0x26670d21
+450, 0x7a7892da
+451, 0x69b69d86
+452, 0xd04f1d1c
+453, 0xaf469625
+454, 0x7946b813
+455, 0x1ee596bd
+456, 0x7f365d85
+457, 0x795b662b
+458, 0x194ad02d
+459, 0x5a9649b5
+460, 0x6085e278
+461, 0x2cf54550
+462, 0x9c77ea0b
+463, 0x3c6ff8b
+464, 0x2141cd34
+465, 0xb90bc671
+466, 0x35037c4b
+467, 0xd04c0d76
+468, 0xc75bff8
+469, 0x8f52003b
+470, 0xfad3d031
+471, 0x667024bc
+472, 0xcb04ea36
+473, 0x3e03d587
+474, 0x2644d3a0
+475, 0xa8fe99ba
+476, 0x2b9a55fc
+477, 0x45c4d44a
+478, 0xd059881
+479, 0xe07fcd20
+480, 0x4e22046c
+481, 0x7c2cbf81
+482, 0xbf7f23de
+483, 0x69d924c3
+484, 0xe53cd01
+485, 0x3879017c
+486, 0xa590e558
+487, 0x263bc076
+488, 0x245465b1
+489, 0x449212c6
+490, 0x249dcb29
+491, 0x703d42d7
+492, 0x140eb9ec
+493, 0xc86c5741
+494, 0x7992aa5b
+495, 0xb8b76a91
+496, 0x771dac3d
+497, 0x4ecd81e3
+498, 0xe5ac30b3
+499, 0xf4d7a5a6
+500, 0xac24b97
+501, 0x63494d78
+502, 0x627ffa89
+503, 0xfa4f330
+504, 0x8098a1aa
+505, 0xcc0c61dc
+506, 0x34749fa0
+507, 0x7f217822
+508, 0x418d6f15
+509, 0xa4b6e51e
+510, 0x1036de68
+511, 0x1436986e
+512, 0x44df961d
+513, 0x368e4651
+514, 0x6a9e5d8c
+515, 0x27d1597e
+516, 0xa1926c62
+517, 0x8d1f2b55
+518, 0x5797eb42
+519, 0xa90f9e81
+520, 0x57547b10
+521, 0xdbbcca8e
+522, 0x9edd2d86
+523, 0xbb0a7527
+524, 0x7662380c
+525, 0xe7c98590
+526, 0x950fbf3f
+527, 0xdc2b76b3
+528, 0x8a945102
+529, 0x3f0a1a85
+530, 0xeb215834
+531, 0xc59f2802
+532, 0xe2a4610
+533, 0x8b5a8665
+534, 0x8b2d9933
+535, 0x40a4f0bc
+536, 0xaab5bc67
+537, 0x1442a69e
+538, 0xdf531193
+539, 0x698d3db4
+540, 0x2d40324e
+541, 0x1a25feb2
+542, 0xe8cc898f
+543, 0xf12e98f5
+544, 0xc03ad34c
+545, 0xf62fceff
+546, 0xdd827e1e
+547, 0x7d8ccb3b
+548, 0xab2d6bc1
+549, 0xc323a124
+550, 0x8184a19a
+551, 0xc3c4e934
+552, 0x5487424d
+553, 0xd6a81a44
+554, 0x90a8689d
+555, 0xe69c4c67
+556, 0xbdae02dd
+557, 0x72a18a79
+558, 0x2a88e907
+559, 0x31cf4b5d
+560, 0xb157772f
+561, 0x206ba601
+562, 0x18529232
+563, 0x7dac90d8
+564, 0x3a5f8a09
+565, 0x9f4b64a3
+566, 0xae373af9
+567, 0x1d79447c
+568, 0x2a23684b
+569, 0x41fb7ba4
+570, 0x55e4bb9e
+571, 0xd7619d3e
+572, 0xc04e4dd8
+573, 0x8418d516
+574, 0x2b2ca585
+575, 0xfa8eedf
+576, 0x5bafd977
+577, 0x31974fb0
+578, 0x9eb6697b
+579, 0xc8be22f5
+580, 0x173b126a
+581, 0x8809becf
+582, 0x3e41efe1
+583, 0x3d6cbbb8
+584, 0x278c81d8
+585, 0xa6f08434
+586, 0xa0e6601d
+587, 0x2fccd88d
+588, 0x3cbc8beb
+589, 0x5f65d864
+590, 0xa1ff8ddf
+591, 0x609dcb7c
+592, 0x4a4e1663
+593, 0xeae5531
+594, 0x962a7c85
+595, 0x1e110607
+596, 0x8c5db5d0
+597, 0xc7f2337e
+598, 0xc94fcc9c
+599, 0xe7f62629
+600, 0x6c9aa9f8
+601, 0x2e27fe0e
+602, 0x4d0dae12
+603, 0x9eecf588
+604, 0x977ba3f2
+605, 0xed0a51af
+606, 0x3f3ec633
+607, 0xc174b2ec
+608, 0x590be8a9
+609, 0x4f630d18
+610, 0xf579e989
+611, 0xe2a55584
+612, 0xee11edcd
+613, 0x150a4833
+614, 0xc0a0535c
+615, 0xb5e00993
+616, 0xb6435700
+617, 0xa98dbff
+618, 0x315716af
+619, 0x94395776
+620, 0x6cbd48d9
+621, 0xab17f8fc
+622, 0xa794ffb7
+623, 0x6b55e231
+624, 0x89ff5783
+625, 0x431dcb26
+626, 0x270f9bf8
+627, 0x2af1b8d0
+628, 0x881745ed
+629, 0x17e1be4e
+630, 0x132a0ec4
+631, 0x5712df17
+632, 0x2dfb3334
+633, 0xf5a35519
+634, 0xcafbdac6
+635, 0x73b6189d
+636, 0x10107cac
+637, 0x18c1045e
+638, 0xbc19bbad
+639, 0x8b4f05ac
+640, 0x5830d038
+641, 0x468cd98a
+642, 0x5b83a201
+643, 0xf0ccdd9c
+644, 0xcb20c4bd
+645, 0x1ff186c9
+646, 0xcdddb47f
+647, 0x5c65ce6
+648, 0xb748c580
+649, 0x23b6f262
+650, 0xe2ba8e5c
+651, 0x9a164a03
+652, 0x62d3322e
+653, 0x918d8b43
+654, 0x45c8b49d
+655, 0xce172c6e
+656, 0x23febc6
+657, 0x84fdc5b7
+658, 0xe7d1fd82
+659, 0xf0ddf3a6
+660, 0x87050436
+661, 0x13d46375
+662, 0x5b191c78
+663, 0x2cbd99c0
+664, 0x7686c7f
+665, 0xcff56c84
+666, 0x7f9b4486
+667, 0xefc997fe
+668, 0x984d4588
+669, 0xfa44f36a
+670, 0x7a5276c1
+671, 0xcfde6176
+672, 0xcacf7b1d
+673, 0xcffae9a7
+674, 0xe98848d5
+675, 0xd4346001
+676, 0xa2196cac
+677, 0x217f07dc
+678, 0x42d5bef
+679, 0x6f2e8838
+680, 0x4677a24
+681, 0x4ad9cd54
+682, 0x43df42af
+683, 0x2dde417
+684, 0xaef5acb1
+685, 0xf377f4b3
+686, 0x7d870d40
+687, 0xe53df1c2
+688, 0xaeb5be50
+689, 0x7c92eac0
+690, 0x4f00838c
+691, 0x91e05e84
+692, 0x23856c80
+693, 0xc4266fa6
+694, 0x912fddb
+695, 0x34d42d22
+696, 0x6c02ffa
+697, 0xe47d093
+698, 0x183c55b3
+699, 0xc161d142
+700, 0x3d43ff5f
+701, 0xc944a36
+702, 0x27bb9fc6
+703, 0x75c91080
+704, 0x2460d0dc
+705, 0xd2174558
+706, 0x68062dbf
+707, 0x778e5c6e
+708, 0xa4dc9a
+709, 0x7a191e69
+710, 0xc084b2ba
+711, 0xbb391d2
+712, 0x88849be
+713, 0x69c02714
+714, 0x69d4a389
+715, 0x8f51854d
+716, 0xaf10bb82
+717, 0x4d5d1c77
+718, 0x53b53109
+719, 0xa0a92aa0
+720, 0x83ecb757
+721, 0x5325752a
+722, 0x114e466e
+723, 0x4b3f2780
+724, 0xa7a6a39c
+725, 0x5e723357
+726, 0xa6b8be9b
+727, 0x157c32ff
+728, 0x8b898012
+729, 0xd7ff2b1e
+730, 0x69cd8444
+731, 0x6ad8030c
+732, 0xa08a49ec
+733, 0xfbc055d3
+734, 0xedf17e46
+735, 0xc9526200
+736, 0x3849b88a
+737, 0x2746860b
+738, 0xae13d0c1
+739, 0x4f15154f
+740, 0xd65c3975
+741, 0x6a377278
+742, 0x54d501f7
+743, 0x81a054ea
+744, 0x143592ba
+745, 0x97714ad6
+746, 0x4f9926d9
+747, 0x4f7ac56d
+748, 0xe87ca939
+749, 0x58b76f6f
+750, 0x60901ad8
+751, 0x3e401bb6
+752, 0xa058468e
+753, 0xc0bb14f6
+754, 0x2cb8f02a
+755, 0x7c2cf756
+756, 0x34c31de5
+757, 0x9b243e83
+758, 0xa5c85ab4
+759, 0x2741e3b3
+760, 0x1249000e
+761, 0x3fc4e72b
+762, 0xa3e038a2
+763, 0x952dd92c
+764, 0x2b821966
+765, 0xfa81b365
+766, 0x530919b9
+767, 0x4486d66f
+768, 0xccf4f3c1
+769, 0xa8bddd1d
+770, 0xcc295eb9
+771, 0xfccbe42f
+772, 0x38bacd8d
+773, 0x2261854f
+774, 0x56068c62
+775, 0x9bdaeb8
+776, 0x555fa5b6
+777, 0x20fe615e
+778, 0x49fb23d3
+779, 0xd093bad6
+780, 0x54919e86
+781, 0x7373eb24
+782, 0xfbaa7a98
+783, 0x5f62fb39
+784, 0xe03bc9ec
+785, 0xa5074d41
+786, 0xa1cefb1
+787, 0x13912d74
+788, 0xf6421b8
+789, 0xfcb48812
+790, 0x8f1db50b
+791, 0xc1654b87
+792, 0x948b43c2
+793, 0xf503ef77
+794, 0x117d891d
+795, 0x5493ffa
+796, 0x171313b1
+797, 0xa4b62e1e
+798, 0x77454ea6
+799, 0xbea0aff0
+800, 0x13c36389
+801, 0xe3b60bac
+802, 0xa176bed3
+803, 0x2863d428
+804, 0xe2314f46
+805, 0xa85cd3d4
+806, 0x7866e57
+807, 0x8f03f5bc
+808, 0x239ae
+809, 0x46f279fb
+810, 0xcca00559
+811, 0xaa07a104
+812, 0x89123d08
+813, 0x2e6856ba
+814, 0x43a9780d
+815, 0x676cff25
+816, 0x6744b87d
+817, 0xee260d4f
+818, 0xb98d8b77
+819, 0x9b0ca455
+820, 0x659f6fe
+821, 0x28d20d1c
+822, 0x601f2657
+823, 0xdec3073e
+824, 0x61263863
+825, 0x1a13435a
+826, 0x27497d1e
+827, 0x17a8458e
+828, 0xdddc407d
+829, 0x4bb2e8ac
+830, 0x16b2aedb
+831, 0x77ccd696
+832, 0x9d108fcd
+833, 0x25ad233e
+834, 0xaa9bc370
+835, 0xa873ab50
+836, 0xaf19c9d9
+837, 0x696e1e6b
+838, 0x1fdc4bf4
+839, 0x4c2ebc81
+840, 0xde4929ed
+841, 0xf4d0c10c
+842, 0xb6595b76
+843, 0x75cbb1b3
+844, 0xbcb6de49
+845, 0xe23157fd
+846, 0x5e596078
+847, 0xa69b0d29
+848, 0x2118a41
+849, 0x7088c16
+850, 0xc75e1e1
+851, 0x6a4af2d6
+852, 0xf19c6521
+853, 0xaff7b3b1
+854, 0x615295c7
+855, 0xbda3a8d7
+856, 0x5b5ca72e
+857, 0xdad9d80f
+858, 0xfa81c084
+859, 0xf4703fa
+860, 0x3ca54540
+861, 0xa8961d51
+862, 0x53d1ecc2
+863, 0x808d83b6
+864, 0x68e8c48e
+865, 0x89be2039
+866, 0x9088ea11
+867, 0xb8665d12
+868, 0x91272f9
+869, 0x53dddff2
+870, 0xb7a54ab
+871, 0xd2b645ca
+872, 0x99fb8590
+873, 0x5315c8e
+874, 0x2a913806
+875, 0x7f15eb2b
+876, 0xa7f1cc5d
+877, 0xbb2ee836
+878, 0xd9fafd60
+879, 0x17448d6f
+880, 0x999ec436
+881, 0x482ec606
+882, 0x9b403c0e
+883, 0x569eb51b
+884, 0xb275d1a6
+885, 0xadd29c31
+886, 0xb7ebdb15
+887, 0xdfef3662
+888, 0x51aba6db
+889, 0x6d41946d
+890, 0x77bf8896
+891, 0xcafa6fab
+892, 0x976ab40f
+893, 0x49a6d86b
+894, 0x56639e55
+895, 0x9945b996
+896, 0x81459b50
+897, 0xbce97542
+898, 0xe397c9c9
+899, 0x247a5955
+900, 0xb72b1573
+901, 0x86306f86
+902, 0x34f65dc5
+903, 0x909360c0
+904, 0xf3f696ef
+905, 0xcb9faae5
+906, 0x93daecd9
+907, 0xde1af7af
+908, 0x43a1f2d
+909, 0x6d75cde5
+910, 0x9e412b6
+911, 0x5673fed
+912, 0x16bb511a
+913, 0x35ef4cca
+914, 0x4e615aca
+915, 0x5cdaf47a
+916, 0x26676047
+917, 0x8c199325
+918, 0x2adf0cb9
+919, 0x84f2e6fd
+920, 0x5e627f64
+921, 0xb7cee354
+922, 0x542ab4a6
+923, 0xe59cd83b
+924, 0x89cc3f10
+925, 0x92b0f5f
+926, 0xc1328370
+927, 0x8208d9f7
+928, 0x68eb00cf
+929, 0xfadd4ac4
+930, 0x2517784f
+931, 0x4042b99
+932, 0x75ce0230
+933, 0x97c5a1b4
+934, 0x1a97f709
+935, 0x4c62781e
+936, 0xf530a83
+937, 0x75776413
+938, 0x321c7240
+939, 0x6afe4e36
+940, 0xad00a2b4
+941, 0xbc05477d
+942, 0xb0911e80
+943, 0x9935b87d
+944, 0xd535eec5
+945, 0x149af45e
+946, 0x786934b0
+947, 0xbc13cdac
+948, 0x208bfa2e
+949, 0xcf4b39cc
+950, 0x6ac6c172
+951, 0xbfa9a37
+952, 0x42d28db6
+953, 0x2bf1ea63
+954, 0xbed6e677
+955, 0x50325d27
+956, 0xa79d3b8b
+957, 0x52448bb1
+958, 0xefaad1bd
+959, 0x833a2e54
+960, 0xd9de549a
+961, 0x9f59672f
+962, 0x9d5f5f16
+963, 0x1c914489
+964, 0xc08fa058
+965, 0xb188698b
+966, 0xdc4672b5
+967, 0x594f720e
+968, 0x56ed428f
+969, 0x9b0898af
+970, 0x8a64d3d5
+971, 0x773308d6
+972, 0x84d62098
+973, 0x46da7cf9
+974, 0x1114eae7
+975, 0xf9f2a092
+976, 0x5363a28
+977, 0xf2db7b3a
+978, 0x102c71a9
+979, 0xe8e76aaf
+980, 0x77a97b3b
+981, 0x77b090d
+982, 0x1099620e
+983, 0xa6daaae6
+984, 0x86ff4713
+985, 0xc0ef85b8
+986, 0xf621d409
+987, 0xfd1561e2
+988, 0x4bcc687d
+989, 0x596f760
+990, 0x7c8819f9
+991, 0x8cb865b8
+992, 0xadea115a
+993, 0x56609348
+994, 0xb321ac14
+995, 0x1bac7db2
+996, 0x5fe6ee2
+997, 0xe9bfe072
+998, 0x15549e74
+999, 0xad8c191b
diff --git a/numpy/random/tests/data/pcg64-testset-1.csv b/numpy/random/tests/data/pcg64-testset-1.csv
new file mode 100644
index 000000000..0c8271fab
--- /dev/null
+++ b/numpy/random/tests/data/pcg64-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0x60d24054e17a0698
+1, 0xd5e79d89856e4f12
+2, 0xd254972fe64bd782
+3, 0xf1e3072a53c72571
+4, 0xd7c1d7393d4115c9
+5, 0x77b75928b763e1e2
+6, 0xee6dee05190f7909
+7, 0x15f7b1c51d7fa319
+8, 0x27e44105f26ac2d7
+9, 0xcc0d88b29e5b415
+10, 0xe07b1a90c685e361
+11, 0xd2e430240de95e38
+12, 0x3260bca9a24ca9da
+13, 0x9b3cf2e92385adb7
+14, 0x30b5514548271976
+15, 0xa3a1fa16c124faf9
+16, 0xf53e17e918e45bb6
+17, 0x26f19faaeb833bfc
+18, 0x95e1d605730cce1b
+19, 0xa7b520c5c093c1aa
+20, 0x4b68c010c9b106a3
+21, 0x25e19fe91df703f0
+22, 0x898364bb0bf593cb
+23, 0x5bd6ab7dbaa125db
+24, 0xd1fe47f25152045c
+25, 0x3bb11919addf2409
+26, 0x26a8cb7b3f54af8
+27, 0xe6a27ee11200aa24
+28, 0x7cb585ab01e22000
+29, 0x78e60028676d2ef3
+30, 0x5c32535e5a899528
+31, 0x83e8b6f8c4a46fb3
+32, 0xe56ef7668a161246
+33, 0x36dcbc15aeb73055
+34, 0x5ea247f0bd188acb
+35, 0x438b547b84601a80
+36, 0x8acda2a1273e9e3d
+37, 0x2b05e30a4b40c24c
+38, 0xfd87236bd13af032
+39, 0x471df211d8d985ef
+40, 0x18e8a5609a793292
+41, 0x46f0951fab6dc4e3
+42, 0x6c199c4e700f6795
+43, 0xf04aa16bfb7d22cb
+44, 0xd763d269fbaffc89
+45, 0x9991930cefbe5c2b
+46, 0xb2a11b953f824c96
+47, 0x63fd9f52172c44b0
+48, 0x183bdad907b1d848
+49, 0xe17953cddb931c52
+50, 0x515cf16726ec205a
+51, 0x88c327605150711a
+52, 0xc7090dd79cbc8dc3
+53, 0xcb487cedeb00a350
+54, 0xc8abf254d87b657
+55, 0xd43cc4cbfb493d1a
+56, 0x8705452e5d9ed1e
+57, 0xcecd11446769cf43
+58, 0xde72156c8d65bc69
+59, 0x796a8f0f47d52ee8
+60, 0xb4c0da443917d6c3
+61, 0xe07ad7568a8e3dc3
+62, 0xc24a8da39ce6dc21
+63, 0x92b21ea80a8556eb
+64, 0x572f21e531edf3af
+65, 0x9b917ed56bbed198
+66, 0xe65fd8ddc5ab3d7d
+67, 0xf55a80a8ec84fa18
+68, 0x18fc22e1a5227b61
+69, 0x72305dc7eeaa79d3
+70, 0x47ce58a36e7592cf
+71, 0x14c6374340c0f7cc
+72, 0x6f98273d4eb5a2c
+73, 0x59a8702c46fe8f8a
+74, 0xb67cbd8113cfe57f
+75, 0xaa03c5db5f5b7690
+76, 0x3fb0f77ea4568013
+77, 0x756530990398b26e
+78, 0x4c1952b2a3a6a343
+79, 0x1da15c5383074582
+80, 0xb405b21c81c274f7
+81, 0xbe664677a16788b
+82, 0x9d2e37550bcee656
+83, 0x8b4589f0d9defe02
+84, 0x2935f018ee06a59
+85, 0x3834bf88be97ed11
+86, 0xa610d049cea79b6d
+87, 0xd49ffc0d09a59ea9
+88, 0x4073365b76567adf
+89, 0x499eefb9bb7513e2
+90, 0x74a743ee6b0138a9
+91, 0x3bf0880f2d947594
+92, 0x555d1c0498600a99
+93, 0x923b32a88ef2ffa4
+94, 0x7325411065fbedea
+95, 0x9f4129ff8b79d300
+96, 0xab2b0a9b8a3785dc
+97, 0x11734bdfba3a1713
+98, 0xc8333398841ba585
+99, 0xee2409cc234e6742
+100, 0xf6638e700872ecd2
+101, 0x10875300c13cd284
+102, 0x27a9bbed7c15b2d3
+103, 0x3c87f8fef31ce9bd
+104, 0x92be263cd0914a95
+105, 0xa7b0f11bc742307e
+106, 0x4a56f788cc1c1a3c
+107, 0x4a130fa32257a48b
+108, 0x5d4d9eda16e90286
+109, 0x7cc2af564844bedc
+110, 0x2532867bfe7cda1a
+111, 0xb1c504676611fd17
+112, 0xce8e86cfb4189aee
+113, 0x99685898980d1970
+114, 0x8c3b67db23bcf1e
+115, 0x73e14c93905b135f
+116, 0xf0271b64ac2bd4d3
+117, 0xf4beba82f3ec1b2d
+118, 0x1cdbf3ee9f210af
+119, 0x2e938557c09c3ea6
+120, 0x2d314ccfa6ffd81d
+121, 0x31ad47079950ade4
+122, 0x342b27547b900872
+123, 0x171b0e20b9ef1a76
+124, 0xdf10ce6318b03654
+125, 0x1d625df4aa718897
+126, 0x8712715a9f6e02ec
+127, 0xb4a072da725bca3b
+128, 0x19d346cb7734bd42
+129, 0xfd4281d311cb2958
+130, 0x58274c9519fc8789
+131, 0x4cacf29d885fd544
+132, 0x784b14d1c2523b80
+133, 0x2d25242131bb2373
+134, 0xcd2a5e43a7d9abf9
+135, 0x15eda3806e650ecb
+136, 0xdaac5e277d764d96
+137, 0xdc5a5dd59aaa94e0
+138, 0x40d00237a46d5999
+139, 0x6205dd35a692743f
+140, 0xbbd8236740361f09
+141, 0x1625c9f4e7288bf9
+142, 0xb74f12df1479e3ce
+143, 0xb2d72a51b43d7131
+144, 0xf006a324b3707c83
+145, 0x28e8ab4abe7655b8
+146, 0xfb480093ad7ab55
+147, 0x3f8abd0d6ff8d272
+148, 0xc81a94177ac26bb7
+149, 0x3cdc178307751b14
+150, 0x9de84cc2b10ba025
+151, 0x3f8ab5aefcd046e2
+152, 0x43bdb894e1ee83b2
+153, 0xe288a40f3f06ac9d
+154, 0xdab62a7d04b4f30f
+155, 0x49f4e20295e1a805
+156, 0x3643764805e0edef
+157, 0x9449954618b6b
+158, 0x6c87e0d4508e0ce0
+159, 0x3a334be688a9dd7b
+160, 0xb35c39228776e499
+161, 0xc4118bfff938490e
+162, 0x88cbde3dcbb034b2
+163, 0xf91b287793c417c3
+164, 0x42b15f731a59f5b3
+165, 0xffa27104bbe4814d
+166, 0x1b6789d138beccde
+167, 0x542c2c1440d0ceb9
+168, 0x367294504d18fa0d
+169, 0xf918b60e804a1b58
+170, 0xd390964e33a9d0e3
+171, 0x23bb1be7c4030fe8
+172, 0x9731054d039a8afb
+173, 0x1a6205026b9d139b
+174, 0x2fa13b318254a07e
+175, 0x69571de7d8520626
+176, 0x641a13d7c03332b7
+177, 0x76a6237818f7a441
+178, 0x4e77860d0c660d81
+179, 0x4441448a1c1cbdb2
+180, 0xccd7783a042046e5
+181, 0xf620d8e0805e3200
+182, 0x7de02971367fdd0c
+183, 0x539c263c5914cab1
+184, 0x9c3b9ba1a87bbf08
+185, 0x6d95baa34cda215f
+186, 0x2db3f83ace0bac5f
+187, 0x7f5af1da2dc670a4
+188, 0xfcc098d16c891bfb
+189, 0x81a33df1d7a5ab12
+190, 0x767b0f863c8e9882
+191, 0x7a92983830de483d
+192, 0xfa7598c37a79ac25
+193, 0xb89b3ca42ce03053
+194, 0x457a542b8efed4f7
+195, 0x571b7737fd0eeda7
+196, 0xa0f59e524485c0a
+197, 0x82dca766b7901efd
+198, 0xa68243caf6a3bd5d
+199, 0x1bac981c6c740e5e
+200, 0xbcd51bedf9103e44
+201, 0x4e197efd3ae5a7bf
+202, 0x523568efd782268b
+203, 0x5ec4ef1191fef09
+204, 0xed751ed5e31c9ab
+205, 0x44eac24de03e1b29
+206, 0x9237d57c011d3fb3
+207, 0xa8c6da0f7692f235
+208, 0x9f9eb6bc15d6cac7
+209, 0x34bb8e0c93427aad
+210, 0x115febd738eaac4a
+211, 0xa439991ed139d27a
+212, 0x45c7c2633d8710a2
+213, 0x48b7475f3405a3ce
+214, 0x80158497c77bd00b
+215, 0x935c316a5b1657cb
+216, 0x59c5d54440e9695e
+217, 0x337c78c5b3d0ede2
+218, 0x8c46bb956b93790d
+219, 0xbf1dd03e471d71c5
+220, 0x2d375e90a4bef583
+221, 0xd0365428331b3790
+222, 0xfcd3969ac827ecd4
+223, 0x392fb6c580498410
+224, 0x6d6db4ceab5ea6c0
+225, 0x9bf84f1972e24786
+226, 0x798dfd820959dcc5
+227, 0x2e425095e65e8bfb
+228, 0x8c1aa11536b1c9c3
+229, 0xd28e2ef9b12f6f74
+230, 0x86583bc98c8f78d2
+231, 0x489877530e3f93e7
+232, 0xb1d9430631104a15
+233, 0x1814f6098e6263bd
+234, 0x8e2658a4e0d4cd53
+235, 0x5afe20e2531cdb2a
+236, 0x30d02f7c4755c9bf
+237, 0xe1e217cda16ed2d2
+238, 0xccb4913a42e3b791
+239, 0xfff21363ac183226
+240, 0xe788690bbda147a7
+241, 0x76905cf5917bfc6a
+242, 0x2a8fa58f7916f52c
+243, 0xf903c0cc0357815a
+244, 0x15d20f243a4998d2
+245, 0x5b7decee5a86ea44
+246, 0x114f7fc421211185
+247, 0x328eb21715764c50
+248, 0xaffaa3f45c0678fd
+249, 0x2579e6ef50378393
+250, 0x7610ab7743c19795
+251, 0xf9923d2bd101b197
+252, 0x57e42e7a62ba7e53
+253, 0x9f1dc217b4f02901
+254, 0x88a9ebd86509b234
+255, 0x867fc926aecc8591
+256, 0xaf22c1bfef04c718
+257, 0x39f701f0313f4288
+258, 0x6171ad397e6faab2
+259, 0x239bb5b9abdec4fc
+260, 0xd9a591e25dd01c6e
+261, 0x826dc4a75b628e49
+262, 0xf112b152c408f47
+263, 0x6843a06110f86c0
+264, 0x965e56a7185c1332
+265, 0x8d84492edbc71710
+266, 0xeee8ec111cfd1319
+267, 0xf2858e94ad98e458
+268, 0xbc9589fdf5f3a97e
+269, 0xaf0ceef3bc375130
+270, 0x48f4aaf13fa75c1e
+271, 0x111e9db47bee758f
+272, 0xea3171df130164ba
+273, 0x2a7bbe30bf827ab6
+274, 0xc516c3fdbf758c35
+275, 0xec55097754b04be5
+276, 0x374a997d52b6d3e6
+277, 0x487df5456085ffbc
+278, 0x528883b84df8eafe
+279, 0x805f77ab5ba26f86
+280, 0x8eb81477dc04f213
+281, 0x471ea08ec6794d72
+282, 0x69d3667ecc4d2176
+283, 0x98b7b6e295548a66
+284, 0x3877713c173f8f2
+285, 0xa00542570d0e8de3
+286, 0xf534b1bfa4033e50
+287, 0x7e1fedeac8bf6b26
+288, 0x8043f37c89628af4
+289, 0x1dd7039ec295e86d
+290, 0xce9c05b763a40cc4
+291, 0x246926481e61028f
+292, 0xb7cb0f1babf5893b
+293, 0xefe6b777f37fc63e
+294, 0xebbcabb4cb35cdcb
+295, 0x39fa63cd711eeea9
+296, 0xad5d3ba7aaf30c8d
+297, 0x8e9e78fe46021990
+298, 0xc7eaef6e7d5a3c62
+299, 0xefccdd5495d3f386
+300, 0x2179557ee8cfc76a
+301, 0x88a77f621f0885ce
+302, 0xafda62674543d90c
+303, 0xb8e6fbe2e13e56c0
+304, 0x8bfbbe26a14f9b1a
+305, 0x1404f59f5851f8c3
+306, 0x1140c53a0489566d
+307, 0x3edf2d138b5c3f1d
+308, 0x75d6bb275d817dc
+309, 0x8e660ae27107664e
+310, 0x7a8021038ee303e1
+311, 0x2042ef5eefa9079f
+312, 0xe3e7b90bbf6d457a
+313, 0xf3f819d2bb9405b
+314, 0x522e42155cae0c10
+315, 0xf5bfbb975b40e233
+316, 0x2cf82b614dd95cfa
+317, 0x183ef4a96bc40e55
+318, 0x9f6e351c5ba4e752
+319, 0x37c1110683c90846
+320, 0x1d89b7a996d8a977
+321, 0x18a444f77c7cb4d9
+322, 0xd0a8a971b78dc893
+323, 0x860232fb9e6543f1
+324, 0x60b6097f51002555
+325, 0xca1e5214123e3894
+326, 0xe03fe695c95f99bb
+327, 0x2c7c6779d5f03622
+328, 0xafeeee42f63055d1
+329, 0x670dde905515936a
+330, 0x9a922f42b59fb094
+331, 0xddb5ff49af5a651a
+332, 0xe61b04c9e58ebbf8
+333, 0x4e459dcf272e7fc4
+334, 0xd549e92c16adceeb
+335, 0x7a17dba1299d4a9c
+336, 0x825d756109f2b585
+337, 0xba142e61a9cb203e
+338, 0xc2a19f00e9c04a30
+339, 0x2d0f8140d23d0652
+340, 0x8b866d4d4d6caaf4
+341, 0x4f11d90dd91f8217
+342, 0xf6efc37373b9e0d
+343, 0x248493d6cd6a4736
+344, 0xd12b6ae74a951a3e
+345, 0x56e34722070b70a7
+346, 0x22d3f201cc9fa0eb
+347, 0xbfdcc320008291b7
+348, 0x1a7a6922e9204fbd
+349, 0x831421e0c4945ae4
+350, 0x66316feddddf0e11
+351, 0xa8c86a1517456554
+352, 0x14a9049ad989e335
+353, 0x837022259f141ecd
+354, 0xcb71793a06c261f7
+355, 0x4aeefc07ebe09a79
+356, 0x8982f15aa3b6594b
+357, 0x67bccfa7ed9b0d5b
+358, 0xb377463b523e9dec
+359, 0x53d3d594870fecb7
+360, 0xa5274b1caec5a60a
+361, 0xd6316d0cb643db39
+362, 0xabc1a9b536de88ce
+363, 0xed2fdb1383d2a077
+364, 0x12319c6feb97221b
+365, 0x7e0f6cd40ef47403
+366, 0x86135c84fe26dbf8
+367, 0xc96622d3fbbee19b
+368, 0xe3989d8d8511573f
+369, 0x42cc365554d1fdc7
+370, 0x4c1a1eb8bbce8b4f
+371, 0xfc4e30e7ef2034c1
+372, 0xc490444317a91e76
+373, 0x7ccdf469ff5dc81c
+374, 0xf5a0da4110cc09d7
+375, 0x505227baf34c0fb5
+376, 0xbe58737e8a35cc88
+377, 0xd449bee91b3e8c41
+378, 0x3e590e23299d0e6
+379, 0x291a7d9e0a64caf7
+380, 0xdc6fafbdfebd2293
+381, 0x8223f1e259fe8a65
+382, 0x6186fbc9efd9e3df
+383, 0xfda39b07e4007ffb
+384, 0xfc19aea98574dc02
+385, 0xd0e10d354fcacd8c
+386, 0xc9619916544a55a5
+387, 0xd454d50a8c8558cd
+388, 0xcd94a246712d91e
+389, 0x76a771f5d1231cce
+390, 0xdd20cb2b7b370ee5
+391, 0xa6f4f50feca57c49
+392, 0x78c8fb431f17ab9c
+393, 0x1b692b79a59b43cc
+394, 0x4c45045d287da7e6
+395, 0x522132e18bf43928
+396, 0x25c458983138b41c
+397, 0x2a1fb426ef229796
+398, 0x74dc324c74e5dd3d
+399, 0x6df75e3eb6eb5374
+400, 0xb63f2f4f9ca25b61
+401, 0xac72286112ee54d6
+402, 0x5a966f3d0a6863c4
+403, 0x8d7046bc64a46fc2
+404, 0xa7b740fd6e3087eb
+405, 0xcdbcbe0340cfcdf5
+406, 0xcb632613bf312b65
+407, 0xa91b3f2c2aac238b
+408, 0xa06deb3f5ae555a3
+409, 0x29d72e1f8db69
+410, 0x2d004bae09728ea6
+411, 0xc6eee5dce0736cc1
+412, 0xa7493145500ff60f
+413, 0xc4d68c4aa18ab93c
+414, 0x8210c29e79d48d7f
+415, 0xd0999d7889ecbef6
+416, 0x6e3bd61e66e93566
+417, 0xe6cc13d47d7d7b1f
+418, 0x3d6f181f42e03979
+419, 0xbed4e14fd867604a
+420, 0xbe511c84067bd86d
+421, 0x49a876d89e697d38
+422, 0xc04c3dde8f889c98
+423, 0xaf293eeab0f53e3f
+424, 0x9f6291dd65732cd6
+425, 0xd7811ac01de78c01
+426, 0xe385cf0261d50ec2
+427, 0x5a64134b3542bbf
+428, 0xf9d1302bc6f13a68
+429, 0x5d2aabbea37d8c31
+430, 0xd9842e99a5192970
+431, 0x713eadc4cd30e837
+432, 0xb7b002fc72abb413
+433, 0x276cfeea526af1cf
+434, 0x8519fe79b633a0ce
+435, 0x2f0e87363705a3e2
+436, 0x9adbac0be3c371e7
+437, 0xf3f44ba899a6173c
+438, 0x782d6c29618fde2b
+439, 0x7f61062acec408f
+440, 0x6e79cd836359258f
+441, 0x5c8e9b138df5785a
+442, 0xa54359c9f39a9a84
+443, 0xeec3f033135084b0
+444, 0x883ee717787a535c
+445, 0x9a2422b513a73b00
+446, 0x2dd4beddcdd64a58
+447, 0x90c8a13202239c7b
+448, 0x85b352ab759646d9
+449, 0x139f5cb2e46c53aa
+450, 0xe1d3ba6c721c66d1
+451, 0xaa66e0edc4b60a98
+452, 0x3521275c75be29b6
+453, 0x490a5190b3edfa5d
+454, 0xd2abcdd2ccb2f14e
+455, 0x9d9be8bef4a5857d
+456, 0xde19676f13ef7755
+457, 0xdac2fee2e42615f3
+458, 0xf4239801cb02f2ab
+459, 0xaa8bf923ed91875c
+460, 0x61d18a1940e4c7c0
+461, 0x1eb6aa3d5f077a6d
+462, 0xee7374c063bf29d8
+463, 0x2f0a59e34d76268d
+464, 0xc92e80e17d1eb3e9
+465, 0xafd05b3ec3d2ca72
+466, 0x28a61ad8d6c497b8
+467, 0xa7094d6834ad7d47
+468, 0x57d80ea9eccbb4f
+469, 0xb047e0fee6cdaf16
+470, 0x44f41b5eb48c00bb
+471, 0xd6dc8e1eb9c8c9ba
+472, 0x47adfd2c638c7849
+473, 0x365d63db7d526c68
+474, 0xc21cda439016135d
+475, 0x14d10c3f0f98863c
+476, 0xa93e56f74e037602
+477, 0x3b4e9c8915bdc9
+478, 0xb46f5ae155e54aa2
+479, 0x8e470d21ce1943e1
+480, 0x60b96301b5ba2e8d
+481, 0x1b473a41d381f9ff
+482, 0xabcf5a8e3269e73f
+483, 0xd410f6e94fb21fa1
+484, 0x65d1a47eebf87e5e
+485, 0x48eaa201c61cb843
+486, 0x212c1abc2499bfc5
+487, 0x4255ad8377d2d8d
+488, 0x44caeef472010612
+489, 0xffae764524f572f2
+490, 0x78d374d20c9ee550
+491, 0x6e003206c0511cee
+492, 0x7998a159145bfb82
+493, 0x921239650bda1d4d
+494, 0xae05025509bcfdc5
+495, 0xc6430c980be407b4
+496, 0x78524f1744b153f1
+497, 0x84089e6f468181fe
+498, 0x8d0d21d7dfb6c254
+499, 0x90bad90502a33603
+500, 0x3072a403cbd16315
+501, 0xdfadddf3f1c040c2
+502, 0x22f0b0639d9ff975
+503, 0xb49e48a4cad0765b
+504, 0x95a0a04f8239709d
+505, 0x56e147a24a4c481f
+506, 0xacf16ef61dea4c7e
+507, 0x424040afd2700de6
+508, 0xc67e8096a3c717a9
+509, 0x39f164181dd0a399
+510, 0x2449cedc1d62198c
+511, 0x7a53df11a1f1a61c
+512, 0x5596f1d4a3badae3
+513, 0x38ed4c822072b3d0
+514, 0xf07ef346b3fd730a
+515, 0xfd349c35c3ed51fd
+516, 0x2f15c9c7890f8f32
+517, 0x3b470df52b173c29
+518, 0xd31bfc8981281af7
+519, 0xbbcc9bdf561215bb
+520, 0x5782fffea326574f
+521, 0xb0ebdcfcc5e03290
+522, 0x7fd89d93d2b3fbef
+523, 0x280ea1865d9ba2
+524, 0xe726959845b2c100
+525, 0xd0361f032cd7dbb1
+526, 0x3c65ec2028b81a22
+527, 0x5221e9b2188920bf
+528, 0xeb5ab27c4125ec20
+529, 0x80a32dd48b54f0a4
+530, 0x369b5ced1012bebb
+531, 0x582d35d76530bc6f
+532, 0x7b50dc9b48e1e37d
+533, 0x37fdfe8bbacf8dad
+534, 0x7a0cb7e6e93840ea
+535, 0xa1132c870be0b2ce
+536, 0x9d8ac2c68267cd1a
+537, 0x470969b647fa7df4
+538, 0xabcb7d8adf7e2d24
+539, 0xacdebec9bdf9eb1c
+540, 0xe30f4cbf7eb6a59
+541, 0x746673836c4df41d
+542, 0x75120a6b647bb326
+543, 0x2f4eab556c3f6878
+544, 0xd84651ab05405b7a
+545, 0x9e695808b9622284
+546, 0xc93b71e56aa6e1a5
+547, 0x2be7f3be4a7b7050
+548, 0x6497e910b6733241
+549, 0xcf7050dfd08076fc
+550, 0x4e3cc156eca183f7
+551, 0xf801a33d9326c265
+552, 0x6aa293c8a47d40e6
+553, 0x28c429755faa6230
+554, 0x82b818651f54e7bb
+555, 0xa84d726d7acdbead
+556, 0x5cfa535d5774965d
+557, 0x4a34b7b1cb48d53
+558, 0x86a7b5bce426de84
+559, 0xfcd2307cecdb7318
+560, 0x16dbaaa71181a038
+561, 0x88e7e8cd261c2547
+562, 0x3c09ba6d1d5ea913
+563, 0x5dd3d643734ee5b6
+564, 0x326d725fe8cbb33
+565, 0x7bcca9ca2da8e784
+566, 0x482dcf6b11d7f9a4
+567, 0x1291b605b4cd3e04
+568, 0x6988181b50e2f4a8
+569, 0x649e3c37131fc292
+570, 0x4eeb67b9e21eba54
+571, 0xc051d39073dec45f
+572, 0xc99c52e110270d67
+573, 0xcb813d5d77868add
+574, 0x423a5f13573e7ac0
+575, 0x231ac4cc4fe73616
+576, 0x4c22b888a6e600ea
+577, 0x8059a6dc7c9e25c6
+578, 0x49f498a5b8ad22de
+579, 0xf1e812cc6d1826c8
+580, 0xbbaf60abe8b11e00
+581, 0x1d31d7f4d8be9a6a
+582, 0xfeadce70a9a10c14
+583, 0xb47c635bc136996a
+584, 0xd88e694c8da030cb
+585, 0xc41bbe132aff1364
+586, 0x34249ab18a4b0800
+587, 0xf14b5c825aa736cc
+588, 0x2710be6b08df78e
+589, 0x2ab56bcc9bf9e740
+590, 0x9b7f6e591b5f648
+591, 0xfb665c3772f34135
+592, 0x628a0a5d2db5d8d5
+593, 0xb3e3f251e61b5259
+594, 0x82310ae33faf1b23
+595, 0x24af8723a65cbd0b
+596, 0x671c93282fc4ad97
+597, 0x6cabeaac77270cad
+598, 0xef4643fe38b02b7f
+599, 0x7b011549d1ac6653
+600, 0xe2af87b9fccfe89
+601, 0x36b71ad67197ac8a
+602, 0xdbba55d06f2fd93b
+603, 0xf571dbd764b7f7e5
+604, 0x38ea402501cdbd45
+605, 0xb8ab5b5b1bab2913
+606, 0xfab973c4d45f32bd
+607, 0x9364f1717c2636b9
+608, 0xfad00f4d983e00fe
+609, 0xc90c532a11aef75a
+610, 0x64a6eda96e44783c
+611, 0x35891f2eb84520be
+612, 0x28d216080caed43
+613, 0x129629cc5bd206f6
+614, 0x22c3d39822cbb4b3
+615, 0xf1efbf4cce1eaa2b
+616, 0x7070cba12524ed08
+617, 0xa7ed0be9deabf20d
+618, 0x8ddb4cd6b454f76b
+619, 0xb82814b1db37b63
+620, 0x418e83b36de01876
+621, 0x9a538c7f39c6413
+622, 0xee0cd7abf8a2ecb9
+623, 0xa9222b07e95590f3
+624, 0x6296a415d68341e6
+625, 0x981e0a5a8f811929
+626, 0x4bb372d3b0de283d
+627, 0xa9805b5971866e16
+628, 0xaf3b5f5183497657
+629, 0x2152b0fd23c3d9f
+630, 0xb730c325b7173180
+631, 0x1e3439d231608c19
+632, 0x1c5ba6031379823c
+633, 0x87f5d12d6d365cbc
+634, 0xd3bc7f29614bc594
+635, 0x63102214bb391268
+636, 0x482bbd5bba648a44
+637, 0x6a23604690759dc4
+638, 0x4091d41408d3a39e
+639, 0x7cd017f922101b15
+640, 0x7ce9004ac5f9231
+641, 0x978bc3d8ec7f7fdf
+642, 0x5bd0c4d780580c11
+643, 0x4313c068bb040153
+644, 0x3ab7dab7bc38bf80
+645, 0x3aaf9c187728deea
+646, 0x6633a4ce8efb88d9
+647, 0x7263b089878f00fc
+648, 0xd0d767e96fe00eb8
+649, 0x184a7c0c01908028
+650, 0x1ebdf41e6f76e186
+651, 0xeb740ee1d0402083
+652, 0xfccf4974edb1c339
+653, 0x16e2707aa28306d
+654, 0x1684f0bdb018c3a5
+655, 0x887b6b67b88aa862
+656, 0x923d7810a2bea33a
+657, 0x56b3560babef5d6b
+658, 0xb39a14614c54b8c6
+659, 0x33e4dc545a509fc8
+660, 0x26e21f84142da9b
+661, 0xdd07598125756855
+662, 0x572d49a071d7ae0a
+663, 0xba3c7e3baea28760
+664, 0x7ecdb2d714db4b61
+665, 0x1c62b4920e1b2fe2
+666, 0x71bfafb70092834a
+667, 0xd710a4228f60d56a
+668, 0xeb16277d4ce4e95b
+669, 0x968168c90b16d3a1
+670, 0xac3439dfe8ad0062
+671, 0x5a8226f9dd5876ad
+672, 0xb843affe917291b0
+673, 0xd76d1e67051f8259
+674, 0xb73a6638cce8ccde
+675, 0xa0e6afd3c7295f9
+676, 0xff8857b4bbb5f4c6
+677, 0x99becf78938f0426
+678, 0xfcd17edc1e70f004
+679, 0x6223b8b23f2f50
+680, 0xca875f3e84587b4c
+681, 0x7d1e81e589f87fb9
+682, 0x9eb621586aa826fc
+683, 0xf46fb9ef5b9c2086
+684, 0x2882c9b7092725f3
+685, 0x5493f099bbedcd02
+686, 0x90c1ec979ffa811d
+687, 0x963f765025bcc53
+688, 0x56194e3ec3d9d4e9
+689, 0x7ec4720954cac1f0
+690, 0xfab3145171af7f90
+691, 0x52a0b4e41a13b593
+692, 0x740e2d4d5909d126
+693, 0x98f5339c09c94a28
+694, 0x1700e462fe8dec76
+695, 0x3dbffc2aa4695ac3
+696, 0x5763edacabdfe2a1
+697, 0x7b5b623ce49ef21d
+698, 0x30addc66f49860df
+699, 0xcc7511a6c31bceda
+700, 0x1b25b61ca75db43b
+701, 0x416bc4c298e59046
+702, 0x4cd11fe2d74e4649
+703, 0xb54458a9229fc978
+704, 0x8c21a27882b6ca35
+705, 0x57887c8b5e01639b
+706, 0xf4e893da996680bb
+707, 0x8d601297702c9c0d
+708, 0x2a27904a30aa53af
+709, 0x497800f6917ea8d0
+710, 0xe96db3340ada9c00
+711, 0xcc23166f14c010ee
+712, 0x782690d78fa65ec9
+713, 0xf3e00d74a0878eda
+714, 0xa7cbb683decca0a3
+715, 0xdd2e038e683a94aa
+716, 0xe2096ff8da896ca5
+717, 0xf7c83400afdabe11
+718, 0x395b8c6f6a4086a4
+719, 0x4a164ec05bee71d4
+720, 0xe87aa5d1ca0462fe
+721, 0x8dbc5aed6dff9ceb
+722, 0x12120d1e9552707b
+723, 0x877dca6889b3e6cd
+724, 0xbd65605c01e900fb
+725, 0xbd6b82c4157c3115
+726, 0x8b60282732caf78a
+727, 0x279fcf5e5de9e57f
+728, 0x34b34ebfb6a37eae
+729, 0xd258cc1a14e03b7b
+730, 0x9a528ba3db4a13fb
+731, 0xffa0aea59d057746
+732, 0x27fa7f456cd37c4e
+733, 0xe1117a57a6fdce63
+734, 0xdc8fc903970a1551
+735, 0x492dd104f30faf29
+736, 0x110def0959e5652b
+737, 0x7f8d1997636fdd15
+738, 0xfb77b05e538a9b59
+739, 0x2e41fa35b4b01fc6
+740, 0xbc35ae69a3374085
+741, 0x192c2a681c2d9b4b
+742, 0x12566b8866c189d6
+743, 0x9d88ea785c5185c8
+744, 0x30a621ad5f983c4
+745, 0x8b875efe1206f587
+746, 0x224d25c3af6e3423
+747, 0x7503e976a1ac7bcc
+748, 0x3c98aa869e823859
+749, 0x3d8835304b646892
+750, 0xf6353330ff970bc2
+751, 0x8a673f5e2edb8acb
+752, 0xf2fdcc53493838b9
+753, 0x85ddcd526236af16
+754, 0x60afb99814c676c5
+755, 0x32a1c2749e281ca8
+756, 0x2367a92ae3bee9ca
+757, 0x219fe082703743cc
+758, 0x34d8b74dc85182a9
+759, 0xdd04164c72db23f
+760, 0xe293ac28fe2671a9
+761, 0x9ca7d169cbda6f45
+762, 0x705c47972b4240ed
+763, 0xc10eda9eeb536209
+764, 0xc36ddacd0c94e85d
+765, 0x8eb592c27e8cd0d2
+766, 0x3e815991c76e7cc4
+767, 0xac9cfce31acf7580
+768, 0xbf7a4cb31c7aee94
+769, 0x663077444aceecf6
+770, 0xe7f614ff386eb568
+771, 0x79d7a229c66912c0
+772, 0x161ed4311f63e1f3
+773, 0x308a5faeb9982ede
+774, 0x7b38ddb9b7efd10
+775, 0x1e103a2589b27ecf
+776, 0x67b02baf4259f27e
+777, 0x868921c115ea2eee
+778, 0x959791912200f71e
+779, 0x4dd55f36dec10557
+780, 0xe3464d90080cb99d
+781, 0xfb2d4f6accce652f
+782, 0x109900a9257d77ba
+783, 0x3c4bda8e2c83684c
+784, 0xc9ae040fb7f868c6
+785, 0x78098ffe994f4905
+786, 0x7a94c33eca77f0b4
+787, 0xbe6a2a95e9b5c0e8
+788, 0x797d39cf963f4837
+789, 0x8d2e249e4425d06d
+790, 0x6ae2c30cd5da06f4
+791, 0x904489de762b179f
+792, 0x84713e2dfb591e3b
+793, 0x6405a40da3f6f51b
+794, 0x976b560d663a2df1
+795, 0xed1c544784ba1e22
+796, 0xca658e995ed9344c
+797, 0x2b1c6b8e4db49025
+798, 0x52b1513da528bad
+799, 0x3c63406d256d9968
+800, 0x63a31ca3d423f85e
+801, 0xb05a81f55789a720
+802, 0xd04412992c476c8e
+803, 0x828ec2f77a150a3d
+804, 0xee50926671bb60c6
+805, 0x5aa70f93e2df61b4
+806, 0x94d60fa2e8655858
+807, 0x3f5e5b770703cc7d
+808, 0xc62dfb2688ca7784
+809, 0xaaf02e1e8ba89fe4
+810, 0x4ab74e0d8c047405
+811, 0x31ee04fbac6fcead
+812, 0x1203b78b8228f5af
+813, 0x412a70836f9aa71a
+814, 0xab51cf98c03f1819
+815, 0x783a3ce9ce137f65
+816, 0x8897085b0a072cf2
+817, 0x685dd9bde8798cb
+818, 0x9a1fac7b1705e2c1
+819, 0xf3e9ff98de48e9cb
+820, 0x5c2d3eb1a1fbe917
+821, 0x3bda718b6b54d82e
+822, 0x29f2dd18f22f0821
+823, 0xb992da1572ac3597
+824, 0xacb69e7aa14b34f7
+825, 0xcd36e3ad14f088d1
+826, 0x6aaacc96a1ec55e8
+827, 0xf8ac593f154fe68f
+828, 0x18fc9cbff012339f
+829, 0x2f3368ccbbb99899
+830, 0x7cec7d17f37031f7
+831, 0x96e86bfaadcb8fc2
+832, 0x74f9e7ee3d42a752
+833, 0xbd52f6c7d9b0733
+834, 0xa48e6d96bb6ce1c9
+835, 0xaefa058254b82133
+836, 0xb7a19edfd0929107
+837, 0x6160ce9125b26e26
+838, 0x6537dbbde1d2aed
+839, 0xc567f9a6bec52dde
+840, 0xca29fd3f22443342
+841, 0x7732aa6db6a1c476
+842, 0x8f5a4d7df6b11b3
+843, 0x76649262aa7e31e1
+844, 0x60a13eb125fbc829
+845, 0xc81e4d123dd21ac1
+846, 0x643cbb09bb72f86b
+847, 0xf971a98fb25555a6
+848, 0xffa2774c66692d56
+849, 0xcb33c16c50b13ea9
+850, 0xfabf388dffda0e9b
+851, 0x55d41ec12ca24b9f
+852, 0x91cf693a3467e807
+853, 0x6be2c00b2c31d6dd
+854, 0xc5cf513b5251ae28
+855, 0xffc4384212403dec
+856, 0x45d4e1865255a69d
+857, 0xfb1dcf956972086a
+858, 0xcae946a55c4c55b8
+859, 0x7351ac7720e385c1
+860, 0x19aa8ffd86240254
+861, 0x8f515ae78f4040da
+862, 0x1e1ed2058de50fce
+863, 0x22d006dcdb374243
+864, 0x6e0f0ede7c95b441
+865, 0x70e8aa81b53b4d25
+866, 0x998f309ea41e3814
+867, 0x89ed6598fb66f390
+868, 0xb5997dc3278060df
+869, 0xb2a021eac4f7e046
+870, 0x3705b60aa2fd0768
+871, 0xfc415079ab9200e
+872, 0xf2871ac4cf45ecc9
+873, 0x24bf758d2246175f
+874, 0xac503dd6f8141b3
+875, 0x4e879d12d9f03b3
+876, 0x82034af8cf93b644
+877, 0x59899dd7e478a6c7
+878, 0xae90addb6eb11507
+879, 0x1524ddf76730cdef
+880, 0x6fd4afd5456b1c9d
+881, 0xcddb9221ea001cbc
+882, 0x64ff400bbf2e8604
+883, 0x6dda10549b06ed9b
+884, 0xed2c85104c261527
+885, 0xc7e09217d29929a8
+886, 0x56284df611a428b1
+887, 0x1a7608289c0a61
+888, 0x7cb63db15166ff66
+889, 0xc6013c76fcdcdc72
+890, 0x8e5dd566c7a5a676
+891, 0x5a8e8565f40d133b
+892, 0xe465973455848c44
+893, 0xf92eecbfe0f3c2c0
+894, 0x7d64155d4dcc5cac
+895, 0xf17595706f988dad
+896, 0xd590a001a6a19c5c
+897, 0x82a164475758db3d
+898, 0x6b144993ea1bbe32
+899, 0x22a81a7a6e453779
+900, 0x8e8c298df1a68a73
+901, 0x78056afd6d936b4c
+902, 0xaaceef0325faaf62
+903, 0xe78bb7699f82266f
+904, 0x523a2d283c5a5166
+905, 0x7076d87088f6c6db
+906, 0x6087dd54cff5aeb2
+907, 0x7ef82e62cb851680
+908, 0x4e8bcc8ed84d03d8
+909, 0xd12fa0361df3cfd3
+910, 0xefb89c79f8127297
+911, 0xa9af4e2fbce0b1f8
+912, 0x462136685b70331e
+913, 0xe9e74c93da699b77
+914, 0x9ec69215fb11d0c3
+915, 0xc10f229939e3e111
+916, 0x3f67fa79e41d2374
+917, 0xd5e7c1a9a7185162
+918, 0xa1dcce9ec91492fe
+919, 0xd4e61f0727b5d21b
+920, 0xdf6cdce46551800a
+921, 0xa3f256ce906982d3
+922, 0x209742a6b9ffc27
+923, 0x4006c96958526a57
+924, 0x9606aebc75a1967e
+925, 0x91b9f42fb64189df
+926, 0xb27119defcb938bc
+927, 0x128cc7a84ba05597
+928, 0x6c3df613c62d0d30
+929, 0x3adf69d48b629ec7
+930, 0xda42ee493837b128
+931, 0xb8e770480e760bb5
+932, 0x9feb55d57c99c626
+933, 0x29812d80afdae3ed
+934, 0xae4222a64276a8c7
+935, 0xe3897212a5b4ed53
+936, 0x98bedfd13886e669
+937, 0xca858675d7fc0d0e
+938, 0x28a359f665354234
+939, 0xfac2ccabe4128b35
+940, 0x61373cc5d11ca180
+941, 0x7007605a4512a87a
+942, 0xe71f8eade7b30b3d
+943, 0x3a9e77f9b99bd04d
+944, 0x70d3e42488098866
+945, 0xd30fc159c7cd4d99
+946, 0xe4d3f6600d2e2d6f
+947, 0x1088324dfa955c25
+948, 0x516437acd4764623
+949, 0x38a31abe50d0aa03
+950, 0x72e1054e9dc02ba
+951, 0xe6971dd664d1a2e2
+952, 0xf6698cb095d3b702
+953, 0xad995a5a8c19bd92
+954, 0x34e53c6936f656e6
+955, 0x10de240bc07c757a
+956, 0x3e3b9a6861c2bd1c
+957, 0x9c0b0b97d3712ec9
+958, 0xabf1505a75043aed
+959, 0xbdf93d3de3274179
+960, 0x28fa5904d3f62c28
+961, 0xc3b97b39ef6c5133
+962, 0xf2b2219225b8679d
+963, 0x8be4ec0f930c0aaa
+964, 0x47de5a56aa590643
+965, 0xb6f871b304129856
+966, 0x80a61c06233ab0f9
+967, 0x3ce6c3af8101b055
+968, 0x85b911708274e7d1
+969, 0x4cab65d093a488b7
+970, 0xaabc4b10661fe28e
+971, 0x35b16dea64474a68
+972, 0x1d6eb5b093361223
+973, 0xc39107b92f0fe1fb
+974, 0x1d09e048073c4841
+975, 0xc6a02f43aca8cb2f
+976, 0xaf6613dbc7da909c
+977, 0x5ac2a40c230aa756
+978, 0x33afb5e7c01c39a5
+979, 0xc7b0b20ea8b7d0ef
+980, 0xdf7306c8ccb1bbea
+981, 0x9710efc0c188b2a0
+982, 0xd6303eadb72c873e
+983, 0xa38ca609b118f35a
+984, 0x8390613065c6e535
+985, 0xdf9a0106757e431f
+986, 0x8bcf77039788e143
+987, 0x6026806a986b378e
+988, 0x482ff3b1394cb1dc
+989, 0x2a27d0ccac9ede9c
+990, 0x53c77f26e271b3ab
+991, 0x1ba004cf276cf3f
+992, 0xc135b0517dc81f7c
+993, 0x5d137838db75e442
+994, 0x3fe505f93d1dbdd7
+995, 0x351654ae7d598294
+996, 0x173f8d182af9d84d
+997, 0xf97dfcd164fe11c5
+998, 0xcda423e5ad43b290
+999, 0xa5cb380b8de10d10
diff --git a/numpy/random/tests/data/pcg64-testset-2.csv b/numpy/random/tests/data/pcg64-testset-2.csv
new file mode 100644
index 000000000..7c13e3172
--- /dev/null
+++ b/numpy/random/tests/data/pcg64-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0xa30febcfd9c2825f
+1, 0x4510bdf882d9d721
+2, 0xa7d3da94ecde8b8
+3, 0x43b27b61342f01d
+4, 0xd0327a782cde513b
+5, 0xe9aa5979a6401c4e
+6, 0x9b4c7b7180edb27f
+7, 0xbac0495ff8829a45
+8, 0x8b2b01e7a1dc7fbf
+9, 0xef60e8078f56bfed
+10, 0xd0dbc74d4700374c
+11, 0xb37868abbe90b0
+12, 0xdb7ed8bf64e6f5f0
+13, 0x89910738de7951f
+14, 0xbacab307c3cfd379
+15, 0x2cf7c449d8b927a6
+16, 0xdcf94b3a16db7f0e
+17, 0x8a9d33d905a8792e
+18, 0x4cb9eb2014951238
+19, 0x6c353acf7b26d6f1
+20, 0x73ff53d673aa30c
+21, 0x1fd10760015eca68
+22, 0xabae0aa9021eeba8
+23, 0xa5ae363a868ee2bb
+24, 0x9d89e0f041de6631
+25, 0x6238b133c3991a65
+26, 0xff49267d75fef51a
+27, 0xfb180656ce13c53f
+28, 0xaf7fadf36128712d
+29, 0xa6847fc6f339c63e
+30, 0xb03e0b80d71ea5bc
+31, 0x63905abcb43969af
+32, 0x2295af3ee00a3bba
+33, 0xb8b375b994330415
+34, 0x867d9ef1d8716a3b
+35, 0x4f6c02f5601b4e18
+36, 0x7c5fb4c16c470d18
+37, 0xe3b57986b804b343
+38, 0xef1d79d212aca692
+39, 0x5b98774c8806209c
+40, 0x924fc76bac38a5d1
+41, 0x5266084c412ddeed
+42, 0x98240bf9b831d6a3
+43, 0x5681599e81219442
+44, 0x6441248fc2ba92bc
+45, 0xe3e9051a540349ea
+46, 0x3a2700034390baa3
+47, 0x9f893155b6d402bc
+48, 0x158207910c6d8aef
+49, 0xd5282ab7608c2cbc
+50, 0xc97f4651669dee4f
+51, 0x3d4750d95103ed60
+52, 0xe0614542caac1f04
+53, 0xefe5092144cfc6c
+54, 0x560bc486abd7e9ae
+55, 0x2678b71392daa4b8
+56, 0x734970d3dc2ba416
+57, 0xcbdbe849e51e4aaf
+58, 0x3b0b5e28b491556c
+59, 0xd51449ac45abd88
+60, 0x6790b59991f1b7ab
+61, 0x32d1c039ff2415bc
+62, 0x173b9772f24f72e0
+63, 0x9490a9ca9f883b1b
+64, 0x4c775989e6214222
+65, 0xac07db37e6ee6114
+66, 0x331371b2e3f10aee
+67, 0xf12e5326c21c28e4
+68, 0x5d77dc280c70d614
+69, 0x1b01bd17a2f281ec
+70, 0xa10d3b5882938487
+71, 0xed5a0033c394ae8f
+72, 0x70bc8ea568ea44b4
+73, 0xf4600ae77965e730
+74, 0x7ff92c0b321ce233
+75, 0x6cdbc87d0cc1d670
+76, 0x9ec64f0cf2000eb1
+77, 0xfebea50259800f68
+78, 0xf2edf9019a8fd343
+79, 0x75c584ac042e5468
+80, 0xc1fa8481d5bf9a1d
+81, 0x7f57180168514ac2
+82, 0x878100716b94f81e
+83, 0xc929406e3af17fd2
+84, 0x6a26e2c013e4bf4d
+85, 0xbc071d8848280955
+86, 0xb60d75abbfd1bdac
+87, 0xee9b76afeca9fa69
+88, 0x1d6c399d2f452810
+89, 0xbaa0bc1621e25c83
+90, 0xed6ba792f8671ba5
+91, 0xf7ca02c2ab11d8d7
+92, 0x3c3cadadf0b21e3
+93, 0xdd1784571e864e9c
+94, 0xfb2f992015157509
+95, 0xf50bb9f0d3ced743
+96, 0x261565f75c3e185f
+97, 0xf8fe33b284513e60
+98, 0xe3d2d10b5e024664
+99, 0xd28717566242cf35
+100, 0x7ae07d133ac5b789
+101, 0x3b7ccaaa53ac338e
+102, 0xcd480bace4871650
+103, 0xec6c78f923c080e9
+104, 0x44211d0ff8919d59
+105, 0x89f79af76d2a45fe
+106, 0x71583fd8a837548b
+107, 0xee57269c261511f5
+108, 0xa5ee8f3b128c5d1
+109, 0xbb64c20ed0765a17
+110, 0x9d4790ab2eeaf7e4
+111, 0x742f3db806d9e98
+112, 0xb81ec97aed6a0d1b
+113, 0x41808b34f6a8a23
+114, 0xc20913af175dfd4d
+115, 0x834427db263b22bb
+116, 0xedd9c632e611828a
+117, 0x10eac8524496f571
+118, 0xd76091b97eb00ab7
+119, 0x111298ae9fe95666
+120, 0x5824b2e2a6719c43
+121, 0x6e280ec539e934ed
+122, 0xf74fd832df90083e
+123, 0x8fee6d0f241c2e97
+124, 0x4244f331c2f19c3c
+125, 0x3dde75a845cce97f
+126, 0xe35bb8e635a9915b
+127, 0x39d2943037f7932e
+128, 0x1fe2d134201d0970
+129, 0x49d00b63c749b804
+130, 0x960c2942cd4e4e04
+131, 0x8dd8e009dbc0435f
+132, 0xcf493495c3a055cd
+133, 0x8f7b5a1c0f9fe9cd
+134, 0x49d5f90374641a25
+135, 0x69b3932073d3524c
+136, 0xd170603e7de84ee2
+137, 0xa062ba3ed3539948
+138, 0xf5861cc5b5d56c82
+139, 0x5e914998a30c7e76
+140, 0x8d77f2ad1503c0f1
+141, 0x980b6a9e3b4181fb
+142, 0xd9299cd50694c084
+143, 0x253dc0f8f1cec4c5
+144, 0x68110fb9d1b3e695
+145, 0xe8f3120d0aabc461
+146, 0xb066e7df0dfb042
+147, 0xd29ce0f797e6b60b
+148, 0x6a569bb7ca33bd42
+149, 0xd46e08b2dc2385f8
+150, 0x28c61d11d055767
+151, 0x5d73aa3d1a2bb725
+152, 0x1421191e1c14829a
+153, 0xa711bfb6423df35e
+154, 0x461af97a86308006
+155, 0xb3e1018ff3519367
+156, 0xf19cf866a268ef2b
+157, 0x207715eac9199d1d
+158, 0xdd621c410975b78c
+159, 0xf390aea68683610
+160, 0x617a2d107a0047d9
+161, 0x6e05ac416e5bebf0
+162, 0x7d253e70506c1bed
+163, 0xf9f96f4a7dd53810
+164, 0xc693b29cb1573f73
+165, 0x4f1146b0020ea544
+166, 0x45140608fbd40579
+167, 0xdcf57219828ce6be
+168, 0xe19d58cca37b5b32
+169, 0x82bda95b2a161235
+170, 0x5823c3d8a2b6c9ba
+171, 0xfeb2e74092fdf89a
+172, 0x50e1ad1abc8f869d
+173, 0x2ec63d0c105eb8da
+174, 0xe14e1c4845a3264a
+175, 0xcff53670455eb6aa
+176, 0xaafaccd24619fa3e
+177, 0xf55a988486e2422a
+178, 0xecfba16a90ff4d04
+179, 0xbf8d36c2f644757a
+180, 0xdc56ed75a0dd6249
+181, 0x3f45023eff17c3bb
+182, 0x2428bbfe90023fab
+183, 0xab892c611adcb70c
+184, 0xb6f13d8c0c2b9d74
+185, 0x2ac3fb11d224f2a8
+186, 0x65433dcfae2d9351
+187, 0xe906859ae4b45f82
+188, 0x8fb7f5f093d76a3b
+189, 0x940dd290b5e88d1a
+190, 0x31b27d21bef116e7
+191, 0x86a964e2c83b5296
+192, 0x85ffd17bc079a9e8
+193, 0x16c47c724e7ab7f1
+194, 0xfb6098a9867e7d7f
+195, 0x9246fb69092c6cb2
+196, 0x1a4033572760f32
+197, 0xc5cc568a8b273b84
+198, 0xfa6f9f2fbdd44abc
+199, 0x9701b8e087718ba3
+200, 0x51d6a7dcf73f8f3a
+201, 0x30008172cc6a972d
+202, 0xac2ab49a5ca6ac81
+203, 0x31f28ef79461e54c
+204, 0x93e35a8da8cc6132
+205, 0x9a2c58beeba3d5b9
+206, 0xf6615c1de266ac39
+207, 0x127ff9f8166b766b
+208, 0x7ffe380e80a69556
+209, 0xbe7d2c228e1542f7
+210, 0x2d5ebb4e50ba1746
+211, 0x63585761ae1bf684
+212, 0x1019eb5cee022fea
+213, 0xb9d3540ab58da30d
+214, 0x1677f4cb45620eb9
+215, 0x6524baee51783822
+216, 0xdf9f2ddcfabb0adc
+217, 0x78e8acc43b287935
+218, 0xe9a1974e999222b5
+219, 0xc41324ec2291e780
+220, 0xea52abc9ecdcbc9f
+221, 0x209d7bcd46ec6b04
+222, 0x12d504c09803db2e
+223, 0x1200e6bf21475d81
+224, 0xde6d3c2b35fd2cfc
+225, 0xa2526900ac33bd3c
+226, 0x7f1f5290fc432bc5
+227, 0x29ddfb380a3d69c8
+228, 0xac79cb6942a2909d
+229, 0x516996685b67a92a
+230, 0xb5fc39041cb828bb
+231, 0x75d9d8ca0644a276
+232, 0x81e98b76be92a3e9
+233, 0xca27888fafe12179
+234, 0x17be2ae039925765
+235, 0x9429846c0e6d0342
+236, 0x327dfd50439815e9
+237, 0xcee20cd7bc254aeb
+238, 0x7d250389f453f29e
+239, 0xfd1b232a85c95569
+240, 0x2ed55fac80f3e9e9
+241, 0xf6886c20417a1be7
+242, 0xcd08e61f0b0fdfde
+243, 0x7b33e34da5c27bff
+244, 0xd043c4b7d5603dd5
+245, 0x9a544e4c70a3b686
+246, 0xa7b60398c381f771
+247, 0xe9e7a3487c4bd4f2
+248, 0x10b58fdfe1ff112c
+249, 0xd5c1c9748c0f4ceb
+250, 0x61be9d09159d54ff
+251, 0x5356f51e8239f510
+252, 0xfe7889d9b202ecef
+253, 0xc7fc19ca5d263d5d
+254, 0x7c4c07e61dfd9f69
+255, 0x6c315fe5015f300a
+256, 0xe0a5bc00039747b4
+257, 0x16397fdcf829ee80
+258, 0xb55aee80d16a5169
+259, 0xca0609944d007eea
+260, 0xcc982249f65a02ce
+261, 0x528161feb149c148
+262, 0xcbf08ba49b41c006
+263, 0x39af1ff0b6f14138
+264, 0x5cc036be69799aec
+265, 0x6adde125b1db21c5
+266, 0x8a99d83d6b613b67
+267, 0x1cd43fca9451f74c
+268, 0x682dbb26ecc96365
+269, 0x13b4be2ceb43e3
+270, 0xbe8fbc3b6f4f581e
+271, 0xda148a2f4bda5719
+272, 0x239106ca3319f393
+273, 0xb42b4dde641f0dd5
+274, 0xd233cfdf4cb0af74
+275, 0xfb5919d905589afc
+276, 0xd802a8860c10b66a
+277, 0x6c923e1d00e7b5bc
+278, 0xfacce1134f383b89
+279, 0xf9570abda7a6d553
+280, 0x80f0f9796a208f18
+281, 0xc0e1df5280951c57
+282, 0xe9f143f08257bbe0
+283, 0x79e4c6463123d588
+284, 0xdd2118583f2b1684
+285, 0xb399ff5f2329fa18
+286, 0x4b3e9ebae96f813c
+287, 0xc484dbf247787384
+288, 0x921865eb97603f2c
+289, 0x18063c68e257d300
+290, 0x643181f345e7fc26
+291, 0x12e0b0e8eadf9fa7
+292, 0x79e613fe73dfa354
+293, 0x6db4c59203b7217a
+294, 0x6c7a0e9ba6139eaf
+295, 0x9617c7ac4e3f6d97
+296, 0x1f68a7b4fb1b4b75
+297, 0xef0b7ab24944f466
+298, 0xaf1dee1f4be1bc89
+299, 0xd2e355c959f5fd8d
+300, 0xe594c3fb95d96efc
+301, 0x9554766ca3342906
+302, 0xa4bbdc77d12842c
+303, 0xb62400211ee489a8
+304, 0x91abadaaa3bbe67c
+305, 0xd371eeb91deb42bb
+306, 0x883bab35cbd2b6e5
+307, 0xd030c3d9411a9041
+308, 0xff3c110a858ff000
+309, 0x59bdf5ca47d0bde7
+310, 0x2bc80fa3cdba1853
+311, 0x6444ccb652662cb8
+312, 0xc0c7e256b9e90339
+313, 0x70714ea9c9d72302
+314, 0x96a0142f9d897d27
+315, 0x209a9097c5a91ef7
+316, 0xb9e33afc5171e009
+317, 0x47b37af433a58d40
+318, 0x30cc4ffbfa831d26
+319, 0xdcea4a85ff815466
+320, 0x907d5bd027f2e5cc
+321, 0x7c081f6852e04a4b
+322, 0xe61950749c1d502b
+323, 0x1604e937ee69834a
+324, 0xb2372d952dd25309
+325, 0x53f6a5b834c72577
+326, 0x2ce7a74395e0b694
+327, 0xacbf9ab4fe91f225
+328, 0x5ce1e63d3a2bb90f
+329, 0x54740da3a5ed139b
+330, 0xf194ddb39f29880b
+331, 0x3305374f5d8ec08b
+332, 0x831dd0164927ff4a
+333, 0x625baa78e4458cf
+334, 0x29d27dc0a4a71152
+335, 0xe227bae9a1401034
+336, 0xca0c209831846b2b
+337, 0x8e8cc54b08b5a411
+338, 0x38f2b4acaac27db6
+339, 0x8ec88baac814e86b
+340, 0x31c08e46b007bde
+341, 0xb686c02722794c09
+342, 0xb77cf8fc682e3907
+343, 0xa56334e7f606f4b2
+344, 0x9c80b127bddd5f4f
+345, 0x12df14834cd858bf
+346, 0x3f14762a9cf5fb9f
+347, 0x930a70941ef5779e
+348, 0x64e96c849c30c080
+349, 0xfdf53bfba1300484
+350, 0xec7a9363c21bc616
+351, 0x26e9fd6a115ecb47
+352, 0x9707a84b5bc77fbb
+353, 0xb23b2737b20d5903
+354, 0x22f4825ae80f6501
+355, 0x500644b12be6a01b
+356, 0xb746645b2af082db
+357, 0xe6af051f697892f8
+358, 0x577c724248a1cfc6
+359, 0x3d2b6a434c84eed3
+360, 0xd260f5efd7328314
+361, 0x95c16cc84bb3f55c
+362, 0x7a01b2e4e0e80ca7
+363, 0x41930c3ce70a0935
+364, 0x1299bccf39d4e110
+365, 0x494883ba1a8a87f
+366, 0x9478ecfe2d918e60
+367, 0x30ec9a5670cda8af
+368, 0xf9bc877e833e2b99
+369, 0x1b83a0acfbb4a8db
+370, 0x73bc1740c0d18880
+371, 0x65086ca9773cb3e1
+372, 0x3b78c3ccd63cff2e
+373, 0xbfae748795acfb31
+374, 0xa4c9d5d56a15ba20
+375, 0xb9cb41721e52b71e
+376, 0x1532f15d4dc47748
+377, 0x5a4d647a4b9ee632
+378, 0x8513c7c5a50898d9
+379, 0x6d3d98ccd5461b2e
+380, 0xa65e99be2fe98d6
+381, 0x31abc8855334a0e5
+382, 0xf1ed22a661dca5b8
+383, 0x299e2b63229e03be
+384, 0xda201a06687bce48
+385, 0xd27794b302142c55
+386, 0x642bd3e1c7898a9d
+387, 0x777f1ff00afa1a87
+388, 0xd2f1c84fb3877baa
+389, 0xae417583289191fd
+390, 0xd641f1d88e0e2d55
+391, 0xc1f1d98fb5d18ebf
+392, 0xb0f72aecdadce97b
+393, 0xe9b8abc764f6018a
+394, 0xd2a37cff8e890594
+395, 0x2dd70d631a528771
+396, 0xbf8ba0478c18e336
+397, 0x1630bf47f372ce0a
+398, 0x6d04ea20dc3f46b8
+399, 0x6591881bf34337f2
+400, 0x33c149c7eb5b4103
+401, 0xf01a8c9857c86748
+402, 0x184348cdfc16d215
+403, 0x141168b253d2ed7
+404, 0x52aaf012ef50a6f1
+405, 0xfda1722387e16f4c
+406, 0x43c30f57d6c038fa
+407, 0xd4a8611f5f96d214
+408, 0x2c512ce17e987f2c
+409, 0x961ce450f0fa2822
+410, 0xf55a506ec6cea9cd
+411, 0xb76d694d9c7f5ef6
+412, 0xfb029216dbd8e988
+413, 0x93162501896a0081
+414, 0xfbbbd2c5ab300f5c
+415, 0xd648b6da7387d491
+416, 0xc73b4697471d9d98
+417, 0xe37412bf1c93ee76
+418, 0xa1a96d96570e6637
+419, 0x5b3ab4f82428f65c
+420, 0x873d849b188aa36f
+421, 0x39fbee0ffc9fa9ff
+422, 0xc70d21b744d677fe
+423, 0x2b8a43c23043d209
+424, 0x93c33eaa37370d16
+425, 0x8930ac1880f2b0ef
+426, 0xac01d27707036af0
+427, 0xc2af3fee504343a0
+428, 0x1c1dae2ad5535d97
+429, 0x9ffc21804b76a480
+430, 0x69f903412cc13563
+431, 0x9d3c4e2759a0c47d
+432, 0xb1a8f894be6302b9
+433, 0x95e1fd7951479506
+434, 0xbb9e6c03cd4ae8e3
+435, 0x85206010c9b737cf
+436, 0x767e813694d6238c
+437, 0x4969af329ccbb30a
+438, 0x3aa9af1075aaea5c
+439, 0xb1ff519e8118a993
+440, 0xb21a23a3c91180fe
+441, 0x320b24582ca3fd88
+442, 0xf8ca56415fb4e453
+443, 0xabd0899c07205e77
+444, 0x87fdc7a44b4ad50f
+445, 0xd75744911641a278
+446, 0x7c8c9a65df6fcb95
+447, 0x79d785e3c7a5b695
+448, 0x421e4565ba1f592f
+449, 0x27f87eb2517835cf
+450, 0xb62cc4297441c83e
+451, 0xd817a80ac815ca6d
+452, 0xad84388130df2aa8
+453, 0x5e6b1640452d6ac8
+454, 0x936285e15edce2a3
+455, 0x903bccc4969768e8
+456, 0xefc2cb7b109d3140
+457, 0x633e9dfdda2d903a
+458, 0x2a2f3225925678a1
+459, 0xe07eac91a27f8547
+460, 0xe50ced40eda78cb3
+461, 0xc5b22500e1c7441
+462, 0x32becf61bca3aa72
+463, 0xa2e37c4b30671344
+464, 0xc9f1c1910f45d544
+465, 0x9b50333b2dcdf730
+466, 0x310bfd53a1684b94
+467, 0x1e1dc21e66ac6455
+468, 0x81876c2bfb1ed5a1
+469, 0xd0c54a3e25eadc7b
+470, 0x3791b6fbbd5c7ba0
+471, 0x133be57356c599fc
+472, 0x8d1148eb8e83fdea
+473, 0x311aedba0d8b42cc
+474, 0x1142ae52745f94bb
+475, 0xc5f4ab2fbde8c4a3
+476, 0xd23be827b5b24f6d
+477, 0x65f95194cd122715
+478, 0x4b48969d73125922
+479, 0x46f165052b8ff988
+480, 0x5c689f94b9275ff4
+481, 0x93b03823ff2d536b
+482, 0x871f3775aa4e3523
+483, 0x5af829f7cc0f66a5
+484, 0xa32e05739cbeac8c
+485, 0xacff1856ddace0fe
+486, 0x8eeb5e7f991a5322
+487, 0x6325c2720e0dbdea
+488, 0x9fb817bc4fdf5200
+489, 0x9786f0d850e43d78
+490, 0x571f76dd7f9fb77a
+491, 0x4d9e94e181cbc63f
+492, 0x8bb632d3376c547a
+493, 0x9cc26d9efd1c88b9
+494, 0x9c5d49579df52b0b
+495, 0x6201abf7e1cda07b
+496, 0x90d68f0c6c884963
+497, 0xfc5b66188ef7f561
+498, 0x6d9303cf2e0e0f95
+499, 0xd7cfcff535f5ed07
+500, 0x14d1a1228daa4ac6
+501, 0xe00ef5762f66ae50
+502, 0xf113a79471582978
+503, 0x430985281785dc7a
+504, 0x31914108c206ed5
+505, 0x7ba6707b6419971c
+506, 0x2ec63b033ce112e5
+507, 0xf8bcd36ced3b41e3
+508, 0xe5cf908c8010414b
+509, 0xf5ee224b7c703e30
+510, 0x9a9733af0b12338b
+511, 0x83e18cc00ace34f8
+512, 0xd52cff39e23008b8
+513, 0xa700578136b9c0c5
+514, 0x3fa179d32ac51f99
+515, 0xef2d5eab6d4ad380
+516, 0x709024a5abd032df
+517, 0xc607c7ee349ede87
+518, 0x803d784e9731eb5f
+519, 0x2ef06f4ba769282d
+520, 0x4bc1dca1e9f07eb9
+521, 0x930c958a7a72f94d
+522, 0x249bc8db2cc7a3bf
+523, 0x3845305798f9a5d
+524, 0x6f137eca9ab6f948
+525, 0xc31f5a963d31bd67
+526, 0x9d39693d5383626f
+527, 0x52fb41c335a8b98e
+528, 0xb79d1a29a06006ec
+529, 0x7c0926a7a3eda2cc
+530, 0xffdf5214406fd53e
+531, 0xc6aa02a7e94282b9
+532, 0xd4a4431b4aa301ee
+533, 0x4271cc0f9420d3ab
+534, 0x26fccd7cc7fc2485
+535, 0x330594bb945b8d5a
+536, 0x6ea8eaad12e5cb8c
+537, 0x831c3467726bede3
+538, 0x31d1eb10017eaa61
+539, 0xc7aa75e41508f5cb
+540, 0xde51810f0cadd0b5
+541, 0x50e5b3e73692f80b
+542, 0x82107ec55636e188
+543, 0x9828ef175d843ab4
+544, 0xb8edc6a860dd421e
+545, 0x25c0c138fd537ac3
+546, 0x47e72a771e8eb563
+547, 0xbb0f8c5333f4a2cc
+548, 0x91750d2fb9b2d479
+549, 0xe662d8f6fe38df36
+550, 0x72a6d879fb5619f0
+551, 0x6817c7878dcbf077
+552, 0x4e7741cb484661e8
+553, 0x3b3b3ba0be5711bf
+554, 0xa6989f5d25868765
+555, 0x43c276398997e4e0
+556, 0xdcbe16a94da28870
+557, 0x454936980a699c99
+558, 0xac614bfa8f0266c6
+559, 0x9174841392e213d5
+560, 0xa0e2acffc5fc9d1f
+561, 0xe53a08a7a0e6521a
+562, 0x2b845cf7c24172e0
+563, 0x265a4fc5f7adec0d
+564, 0x1f34fbe5f1e49420
+565, 0x139181f6fb647f20
+566, 0x88c35d46e2fcd05e
+567, 0x2a6d5b55903c0459
+568, 0xcea28eb621ad7bf1
+569, 0x5c9cdc13e7aaa30
+570, 0x5fe63e14746e7103
+571, 0x7923e53d73835db9
+572, 0x376e661210bf1b06
+573, 0x5b1cab85450efdd5
+574, 0x3908dc096c70b452
+575, 0x4825e303cd1f396f
+576, 0xed476bfd702957c3
+577, 0x6acc013aff5db743
+578, 0x62c80b776343d488
+579, 0x9c75edcd5b012697
+580, 0xaa053362a3b9770a
+581, 0xa907e236c7c07e94
+582, 0x15b2c380451692c0
+583, 0x94f79142697bd61f
+584, 0xbc657d31ea98d44f
+585, 0xcbaa5e52517a1f5e
+586, 0x96aa2e44a7c4a03f
+587, 0x216d3c66db2b515d
+588, 0x157001807e3ca88a
+589, 0x52b3a596bdd3859a
+590, 0xed747e7fc5e3adac
+591, 0x78fd765ddb2c448d
+592, 0xe53dc7299ed8614e
+593, 0x75ad41fb1d7a790a
+594, 0xc14f6b944b0e6cb1
+595, 0x7c314b69fce3df1c
+596, 0xb56d82eb740d7abc
+597, 0x5132a93c41251fdb
+598, 0xe3ce35bd2a82f958
+599, 0x440571a981c722f2
+600, 0x194cdfd9f186bc9
+601, 0xb89e522a5db00939
+602, 0xad35f339f68df3c8
+603, 0xa82ab18420322293
+604, 0xaffa6df9b72b27c4
+605, 0x9615694d23beaa2c
+606, 0x1d82ebe563abad91
+607, 0xab50ef65fbd94385
+608, 0x1b070dbd70a9a14
+609, 0x2ececa796abbadf0
+610, 0x6bbeafe9e81ab2a2
+611, 0x60dcd0d2a9b76914
+612, 0x1e748039ef05c33f
+613, 0x6d4d17f2213ccdff
+614, 0x9fa56132957bc987
+615, 0x60a17185de2428eb
+616, 0xb56038ddf306479c
+617, 0x3b1db5df92d06d8b
+618, 0x24d1bba8bdedf580
+619, 0xbfb7e6740ebaa4d9
+620, 0xab31c4473e46f61d
+621, 0x6deb3cdd8fd5869f
+622, 0x23032e47746d72d6
+623, 0xa9e72d734e10f2e8
+624, 0xbffd199b6157bc23
+625, 0x29f8254df273fb62
+626, 0xb076142130ee55ec
+627, 0x5b0b08374126c309
+628, 0xea4536aae979521f
+629, 0xc064e7abec91a174
+630, 0x46133ef80c59d935
+631, 0xf0227e2da1b14160
+632, 0x675a76641e1af5a
+633, 0x2f50a069b33d198c
+634, 0x3ded5a65e1d657eb
+635, 0xbb6999b020694f6b
+636, 0x86b2f2b33487aed7
+637, 0x76e14e85f8bfb4cf
+638, 0x38f7f1e44bd4e0db
+639, 0xc1a7d41b7e80d4ae
+640, 0x1dfaaf80bbceb42e
+641, 0x3f51c11497720c2b
+642, 0xce6da1415ddb8b80
+643, 0x7377d8bcd359b5f3
+644, 0xe077208f3f810aca
+645, 0x9a06a8a2dacbffce
+646, 0xca1f99156b09b735
+647, 0x2ff9a93064d91451
+648, 0x50f3ea93f351a7ef
+649, 0x606fceccb07054de
+650, 0x7e83d6d2f8f6685d
+651, 0x78f3995291c5d407
+652, 0xd28d2460e22d0228
+653, 0x2c5636f68a0054dd
+654, 0xd9fafb1c56c8f6cb
+655, 0xe39889b5f9d74464
+656, 0x1355372bf5db2cc1
+657, 0x26768426b9ac323
+658, 0x4af1dbdc1111fd89
+659, 0x66973587943b927f
+660, 0xf86f5f50684dfb1d
+661, 0x1247d574ff79b534
+662, 0xc8039f3259210fe2
+663, 0x79b573235c92a9f5
+664, 0x213f642d8450e2f0
+665, 0x5db7706973376566
+666, 0x6182c12e69b373d7
+667, 0x3e5ac47300aec07f
+668, 0x4b5b6c57b1574376
+669, 0x6b7fcceefd56b17c
+670, 0xf656c3455cb9d4b8
+671, 0x7577e2e13329721f
+672, 0xf33c0c53ce956e8d
+673, 0x7d0f328ee356174
+674, 0x10ec9a168088686e
+675, 0x71ef1776d062dfa
+676, 0xaa7b590a488a6bc4
+677, 0x38612b6dd8049a1c
+678, 0x939045e36874f731
+679, 0xcb9d1d74c56d5ac9
+680, 0x54f1c1c8fef1d8ff
+681, 0x3ee4b85c8c7e939e
+682, 0xb9b4608e019f352c
+683, 0x79d4701275d12e6a
+684, 0x2632a2d9835c7f19
+685, 0x1662cd9fba293692
+686, 0xbcb70265115ee944
+687, 0xdc43fb9761468604
+688, 0xe3eec4e7d3871352
+689, 0x829531753226989d
+690, 0x2748cc67f540e074
+691, 0x39c4af25d607837d
+692, 0x741a243f4cb5df99
+693, 0xda1353287e18b49a
+694, 0xa6735689d751ea74
+695, 0x46326d587340ce0b
+696, 0xc18531df4550012b
+697, 0x6f7901e05dd4b818
+698, 0xfb966afc4c001d63
+699, 0x6dc10fca67a9cfdb
+700, 0xd6527ffadf0feaae
+701, 0x3b900172045e25d
+702, 0xb7dd594cdded6a46
+703, 0x6602aee7ec1599fc
+704, 0x7fbf12f23747546a
+705, 0x32e63f662bd2de0d
+706, 0xedf47770b67ed641
+707, 0x331bef83481c5c2a
+708, 0x8fc4256fdf05158c
+709, 0x98eba48dabccf5e0
+710, 0xdbc2f2cdb7b1c154
+711, 0x7777755616517ad3
+712, 0xd473c147d2628ac1
+713, 0x861e15d1d760b5a7
+714, 0xf4d25926405ecb07
+715, 0xb7739c69effff86e
+716, 0xe97fbafa6f96830c
+717, 0xf13e8a334e8bede1
+718, 0xcd60010cba4ee4f9
+719, 0x1f537ac2b82e6008
+720, 0x1fda8d781a89140a
+721, 0x9dc204f3f4a463f0
+722, 0x456dcd18eb56a1ab
+723, 0x629957bc87bd16a1
+724, 0x2c8000ddb8c75253
+725, 0xc31dae9ec8449284
+726, 0xdac05c8baa2b691a
+727, 0x21ff7be9ffa3e7ac
+728, 0x844f4b5ed4ee08d0
+729, 0x651f913fd636c994
+730, 0xca3e71a2110b2d49
+731, 0x7709bc42253ed09d
+732, 0xbb164d45b6569d43
+733, 0x90ec2f040c20a112
+734, 0xfa6e77e9166f5be4
+735, 0x6b6d12c1842d587d
+736, 0xfcd7ff8466e25e2a
+737, 0x6a5a2ed8bd971297
+738, 0x2ec35f6bba5adcbc
+739, 0xc83676e16651249a
+740, 0x458f6064cefe10ba
+741, 0x90d54d527e6cd028
+742, 0xa5613e88db27c388
+743, 0x331e0c7d85aa1abc
+744, 0x8cee4977e210358
+745, 0xfcae379aa6cbff8e
+746, 0xd1407afc97a57e86
+747, 0x1fab25c864f094ae
+748, 0xd914864a63004552
+749, 0x4214d226a20f1384
+750, 0x3f4e0d80c488b715
+751, 0xc5ca2f654024b7c8
+752, 0xc1e27a124e7c821c
+753, 0xd890a915ffc7918c
+754, 0x22fba040ce51a9f8
+755, 0xbf61cebd8891617a
+756, 0x7846609ee228e319
+757, 0x536d1854375509b8
+758, 0xbbfb45fc6e666f50
+759, 0xd85b4c0527f9d7d6
+760, 0x528cc9c7fa2a84c8
+761, 0x27a1baece647f2cb
+762, 0xfddf0cb92fe09dc3
+763, 0xeb5008fe965d8d96
+764, 0x4a3307937eb2e5c8
+765, 0xd07d74c240c6c363
+766, 0x16f62290179d1bbf
+767, 0xe99c9bcc9cb1ece7
+768, 0xc64f9be03c8a93be
+769, 0x32659effaf666c1f
+770, 0x4bb228cfb30b6672
+771, 0x98764870842068a5
+772, 0x5b12ef2d2cd8bdcc
+773, 0xbc79d1c1b41f28b8
+774, 0x97a517cf3279fc9a
+775, 0x34ffd46c1d4d6025
+776, 0x9c302307ee25c8f0
+777, 0x399604eed1f18a8
+778, 0x1c9b813c2043142a
+779, 0x2944ea5e55267fe9
+780, 0x5a8a9f5e728ea667
+781, 0x30c8440adb804a0
+782, 0xee0e6b627099a937
+783, 0x3d50757ada3c52da
+784, 0x4548916b32c813ab
+785, 0x602a186fe5bf109b
+786, 0xf0d440a2227ba304
+787, 0x5a10d4e0ca9ea32b
+788, 0x6e5eb90da13ba64c
+789, 0x4c6af8fd04241ab2
+790, 0xf9eb31d26e093006
+791, 0x5d674878839fe3ea
+792, 0x1562b55b2484e47c
+793, 0xa87188c099c1cb61
+794, 0xb7736b8aa02a3392
+795, 0x5f4b301125abb20f
+796, 0x361d566984637f44
+797, 0x68c4b3feac8bd0c3
+798, 0x7066c634dd2503c1
+799, 0xfecbf7c9441eb6ea
+800, 0xdbc26ae0fc81436b
+801, 0x9ef3e2b48252e7a4
+802, 0x31a49b4c339b37c7
+803, 0xb01b2a83cf346cf4
+804, 0xc24dc2347f82fbe3
+805, 0x134cad272dcd410f
+806, 0x61260742823ba59c
+807, 0x53ac4c193a97c730
+808, 0x9207c9833af34b52
+809, 0xa72e7ee77078d1f5
+810, 0x2e6f6e1b05936885
+811, 0x783b99ce5dbf9464
+812, 0xfdfeb6f0d027bb44
+813, 0x40eeb27096f92b0
+814, 0x5ef96ff5d4a4521f
+815, 0x5595806ae873718a
+816, 0x67d449eecf4ca1c3
+817, 0xde837ab611364f3f
+818, 0x7034c24d2b139be9
+819, 0xe21166603e0a9c86
+820, 0x935694435c1f0d51
+821, 0x6cb3bec90c126088
+822, 0x4096ef662b7a9f89
+823, 0xd2d85b8d238d8c15
+824, 0xa4ea533ce3ec59b2
+825, 0x3654729d80a2db29
+826, 0x214c4cc3906d29d4
+827, 0x201c447e7588e373
+828, 0xe8b8f0ae25f683eb
+829, 0x6744aaf5754e38af
+830, 0xd1ffb10d6f27a061
+831, 0xe536733a7b3a6c30
+832, 0x39f0f66e47cbf2c9
+833, 0x856a9593526fde2
+834, 0x2e2a817a0098ea4b
+835, 0xc5e1eeb551a0e3d3
+836, 0x3f21e2f5e2d50b2
+837, 0x906af56c66dd9f8c
+838, 0x30f6dbd70329fac8
+839, 0xc443dfddf3c01a60
+840, 0x7ab85d9aa9675470
+841, 0x8c9080bd39717bfc
+842, 0x4b1ccdb3c3597f6f
+843, 0x74e2542d70ab5d67
+844, 0xbb3d236aad00f74
+845, 0xcf3cadf9a2804774
+846, 0xe851d9750e42bd07
+847, 0xc0ad82029b1c371f
+848, 0x7ee119eb552d6c07
+849, 0xd8024049bd1d784a
+850, 0xfa67a899760363
+851, 0xaa7c2f438b178197
+852, 0xc473674a47ffe064
+853, 0x539fbe3fc674c270
+854, 0xdb48484748a76f3b
+855, 0xc73b2b092060d
+856, 0xa1d2a15345016f5d
+857, 0x4d0fe8599f9bba47
+858, 0xa0edc275e6f8f1d1
+859, 0x40590a8655bc8d72
+860, 0x35b4223161f05f75
+861, 0xa04c0c0f616752dc
+862, 0x7f371ed2ca45432d
+863, 0x2ff1a08f75ac6438
+864, 0xe2dc5c3682282f48
+865, 0xe1e4179fa98d9013
+866, 0x8cb083d6843a73d5
+867, 0xb4c2b5921b706854
+868, 0x738e14c0e7352445
+869, 0xcd2b646f91afd8c7
+870, 0xd5779a5b57a264fd
+871, 0xc39ff855586c7d07
+872, 0x3e3f0098c631a859
+873, 0x644e02fae032110
+874, 0xa8834613c0a45278
+875, 0x69482f2c08e10657
+876, 0xe4ee475bdb87e69a
+877, 0xdc1ef7b25c0d0019
+878, 0x88a3fa2be18d8744
+879, 0x60a02e0b21c5bec7
+880, 0xb6867b88aa19bc1a
+881, 0xb599409affcf10eb
+882, 0xaeaa1778a5e59daa
+883, 0xd7a91a52c16663e3
+884, 0x93cb269affe07b1c
+885, 0x841b6ced3a4ba815
+886, 0x84541768e1540a5c
+887, 0xe3943c84f83b3020
+888, 0x5de366fbd7b45258
+889, 0xd787cc3bde91a661
+890, 0x814071446edecb57
+891, 0x15d8c602a1141514
+892, 0x72f07bc8002d1d0d
+893, 0x4a8bd8dc9a1f0f3e
+894, 0x8723796ae0f20d35
+895, 0xda7283c2051f73b2
+896, 0x2df0cc247f90bd3b
+897, 0x79a8522b968f990a
+898, 0x951ede190c8b9d02
+899, 0xc512f1a5b14b018a
+900, 0xf0e3ddc03b9a4259
+901, 0x8cf4a35ad312e15f
+902, 0xebef28926b11094b
+903, 0x5628ba687325921c
+904, 0xc3aa75e57edc49c3
+905, 0xc38382fa98e762ba
+906, 0x8d209e896285848e
+907, 0x2c7d6adf592b4a3e
+908, 0x62de48e36f8338f3
+909, 0x4a752741e00de30e
+910, 0xf7855b70f1f6ec2b
+911, 0xa505fa4428199e43
+912, 0xe8b6b423b826bbac
+913, 0x4bd1206cf8786d05
+914, 0x6dcf040391fe3bf4
+915, 0x913f500f87e1bba3
+916, 0x5acf775aa180a5d5
+917, 0x74dd28d9432ce739
+918, 0x996c2ff2f0dc2495
+919, 0x73dbfe6c56effe4
+920, 0x56fddd25196f5e40
+921, 0xe87810158f5b7
+922, 0x7b8795e996383f1f
+923, 0x9ba5ee7c777c4c82
+924, 0x17ce3908d270fe1c
+925, 0x3df9e613c1aedfae
+926, 0xcdd26871b32fc8e1
+927, 0xd71cb13afc633979
+928, 0x63427c8ea9b1c79e
+929, 0xd070f7664d3b405d
+930, 0x46f2a9e32d9fb769
+931, 0xb4c3822a45e9fe9b
+932, 0x8ba30b97fe6f5ec7
+933, 0x70aa554ee2fc11f9
+934, 0xa80c99dbe0cfcfaf
+935, 0x36d9250cb2d68ed
+936, 0x2995e4b9e1cd1db4
+937, 0x4b3803ba57fc570f
+938, 0xae3959e7d740eaa5
+939, 0xb4cbd6662adbae08
+940, 0xae46576446e8dbc4
+941, 0xc4828e008a9a8a54
+942, 0x145d7db8e6554b2f
+943, 0x1b1b8916a730c371
+944, 0xdaf84b2bebe31963
+945, 0x5b59b80ef23a2403
+946, 0x9180c7e89cab6fd3
+947, 0x80e58f5411babf34
+948, 0xa06cf55185b9b005
+949, 0x13b2c798424173ad
+950, 0xc510f8e706311d49
+951, 0x1f974b83b6046d3a
+952, 0xae6e8e85e822d1c3
+953, 0x66f2c8dc3274a31a
+954, 0x7e04dbcbf65bd377
+955, 0xabf41ede01ec20a4
+956, 0x5efa0948f6bbb2ea
+957, 0xbc91c99d8592255
+958, 0xf6d6917911d86d75
+959, 0x85ce273d54e9097a
+960, 0xbdfd30f2420fff92
+961, 0x8802f02f610b537c
+962, 0xd1d70037ed543229
+963, 0x908aaf97f9693a46
+964, 0x1f6cfeaa0834d53a
+965, 0xa453fd1648ce04d2
+966, 0x2c38bb85ebc64af9
+967, 0xd2daff551c90c4f8
+968, 0xae5a0d949797d784
+969, 0xf0974c8552ac9593
+970, 0xa10b70499f65c693
+971, 0x39a449ebd594ddff
+972, 0x8ea090f2b17b9b49
+973, 0xc592de318090fd83
+974, 0xb63e4fbc467b6912
+975, 0x57a0c1c5ce0e4dcc
+976, 0xa7c517cf3d436b35
+977, 0xef6dcb0f3fad038b
+978, 0xaf4fb60315b91287
+979, 0x5e0776f67304f331
+980, 0xe927753b8e6f7932
+981, 0xd3df2dd92559e304
+982, 0xdaed52aa6af44413
+983, 0x1b59f4dac1e181f8
+984, 0x4a73c2293877ef39
+985, 0xca45d0d015fe44de
+986, 0x4659c8b7853735a8
+987, 0x12de6466bdf8adeb
+988, 0xaeea857a09bfec15
+989, 0xcc9cf4b3c0b88a23
+990, 0xa44ae52396a5e1bf
+991, 0x5847a724305d137f
+992, 0x8f4d4de223956182
+993, 0x58254dfada867a8
+994, 0x900a98222c2f339e
+995, 0xdb575260935d51d5
+996, 0x13fb4bfbbc0d7b53
+997, 0x62213850186bb92b
+998, 0x2a34823312c00388
+999, 0x6148329042f743b0
diff --git a/numpy/random/tests/data/philox-testset-1.csv b/numpy/random/tests/data/philox-testset-1.csv
new file mode 100644
index 000000000..e448cbf73
--- /dev/null
+++ b/numpy/random/tests/data/philox-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xedc95200e2bd66a5
+1, 0x581d4e43b7682352
+2, 0x4be7278f5e373eab
+3, 0xee47f17991a9e7ea
+4, 0x38a7d2ae422f2e2c
+5, 0xe2a6730a3b4a8a15
+6, 0x1588b7a841486442
+7, 0x13ad777246700504
+8, 0x14d157e0f5e18204
+9, 0xd87c22a7ee8c13f1
+10, 0x30cc389ce3542ba1
+11, 0xb8a53348955bb2e9
+12, 0xc08802e3c454f74f
+13, 0xb444f627671a5780
+14, 0x4b6dd42b29cbf567
+15, 0x6109c7dc0bc5f7d5
+16, 0x85c954715d6b5b1e
+17, 0x646178d3d9a3a5d5
+18, 0xebbde42b1cd83465
+19, 0x3d015102f6bc9c1a
+20, 0x720fe2ec3798d5fd
+21, 0x93120961289ceb2e
+22, 0xc9207e960a56fae2
+23, 0xa7f042f31d991b98
+24, 0x5fac117415fae74b
+25, 0xd0a970ba8dddc287
+26, 0x84b4e7e51b43106
+27, 0x6ad02bf525ea265f
+28, 0xcdc7e5992b36ef8f
+29, 0x44d4985209261d60
+30, 0x628c02d50f4b902e
+31, 0xc7b1914922d1e76d
+32, 0xfde99ff895cba51d
+33, 0x175a0be050fa985f
+34, 0x47297d3699e03228
+35, 0xccf1e9aeaa3339cd
+36, 0x9fdd18ebeeaf15b1
+37, 0x7c94c9ab68747011
+38, 0x612d8ef22c1fa80f
+39, 0x13f52b860de89ab5
+40, 0x81f264b8c139c43b
+41, 0x8d017ba4ef1e85ba
+42, 0x6d0556f46219951e
+43, 0x8ee7b85663cf67b6
+44, 0x2432fc707645fe67
+45, 0xaf814046051e5941
+46, 0x4d432a83739ac76f
+47, 0x59e5060d0983ccdd
+48, 0xdd20e828b83d9b53
+49, 0x1b891800d7385f4c
+50, 0x10e86a026c52ff5e
+51, 0xb932f11723f7b90c
+52, 0xb2413d0a1f3582d0
+53, 0xe7cd4edda65fc6b5
+54, 0x6d3808848d56593b
+55, 0x192a727c3c7f47d9
+56, 0x9659d8aea5db8c16
+57, 0x4242c79fe2c77c16
+58, 0x605f90c913827cea
+59, 0x53e153c8bfc2138a
+60, 0xed2158fbdef5910e
+61, 0xae9e6e29d4cb5060
+62, 0x7dd51afaad3b11ce
+63, 0x2b9ba533d01a5453
+64, 0x7e0e9cf2b6c72c8
+65, 0x1cc8b3c7747ed147
+66, 0x9b102651e2e11b48
+67, 0x30b0b53cbaac33ea
+68, 0x70c28aec39b99b85
+69, 0x5f1417ff536fdb75
+70, 0x3a1d91abd53acf58
+71, 0xba116a1772168259
+72, 0xf5369bc9bd284151
+73, 0x67bf11373bf183ca
+74, 0xef0b2d44dbd33dc7
+75, 0xbfd567ee1a2953ed
+76, 0x7d373f2579b5e5c6
+77, 0x756eeae7bcdd99be
+78, 0x75f16eb9faa56f3b
+79, 0x96d55ded2b54b9a5
+80, 0x94495191db692c24
+81, 0x32358bdd56bab38c
+82, 0x3f6b64078576579
+83, 0x7177e7948bc064c9
+84, 0x2cbf23f09ba9bc91
+85, 0x9b97cc31c26645f5
+86, 0x5af2d239ff9028b1
+87, 0x316fa920e0332abe
+88, 0x46535b7d1cae10a0
+89, 0x21f0a6869298022c
+90, 0xf395c623b12deb14
+91, 0x8573995180675aa7
+92, 0xc3076509f4dc42d5
+93, 0x15e11e49760c6066
+94, 0xe8a6d311e67a021d
+95, 0x7482f389c883339b
+96, 0xda6f881573cba403
+97, 0xb110ffb847e42f07
+98, 0x2c3393140605ccf9
+99, 0xba1c8ba37d8bdc33
+100, 0x59adf43db7a86fe0
+101, 0xb4fcbf6aa585ca85
+102, 0xd794a93c18033fa6
+103, 0x6e839c01985f9d4
+104, 0x64065bf28222b2c7
+105, 0x6a6359b293fa0640
+106, 0x5ff610969e383e44
+107, 0xa8172c263f05c7f7
+108, 0x62a0172e8bd75d07
+109, 0x7be66e3c453b65ac
+110, 0x6a3b8d5a14014292
+111, 0xa2583e6087450020
+112, 0xd5d3ecc480c627d2
+113, 0xa24e83f1eec8a27c
+114, 0xa23febd2a99ee75a
+115, 0x9a5fbf91c7310366
+116, 0x5b63156932e039b
+117, 0x942af3c569908505
+118, 0x89a850f71ab6a912
+119, 0xfeadc803ac132fe9
+120, 0x67bf60e758250f3
+121, 0x533c25103466a697
+122, 0xb7deede3482f9769
+123, 0x325e043b53bba915
+124, 0x9e8d9e7fde132006
+125, 0x6bacc6860bbc436e
+126, 0xb3ea0534c42b1c53
+127, 0xb2389334db583172
+128, 0xa74b1bfbf5242ee4
+129, 0x53a487e2dc51d15c
+130, 0xe5a3b538d2c7a82e
+131, 0x7b6c70bb0c4cadaf
+132, 0xae20791b2081df1
+133, 0xc685c12e3c61d32c
+134, 0x60110e6b0286e882
+135, 0x49682119c774045c
+136, 0x53dc11a3bbd072e
+137, 0xbdc87c6e732d9c2d
+138, 0xcc4620861ebac8fd
+139, 0x7e9c3558759350cc
+140, 0x157408dee34891ba
+141, 0x9bcad1855b80651b
+142, 0xd81b29141d636908
+143, 0x1ed041a9f319c69d
+144, 0x805b2f541208b490
+145, 0x484ef3bba2eb7c66
+146, 0xb6b5e37d50a99691
+147, 0xabc26a7d9e97e85f
+148, 0xcba2a3cce0417c2f
+149, 0xa030dfffd701993c
+150, 0x2bf2dc50582ebf33
+151, 0xd9df13dd3eb9993e
+152, 0x31ca28b757232ae5
+153, 0x614562a0ccf37263
+154, 0x44d635b01725afbb
+155, 0x5ae230bc9ca9cd
+156, 0xb23a124eb98705c6
+157, 0x6395675444981b11
+158, 0xd97314c34119f9ca
+159, 0x9de61048327dd980
+160, 0x16bac6bded819707
+161, 0xcea3700e3e84b8c7
+162, 0xaa96955e2ee9c408
+163, 0x95361dcc93b5bc99
+164, 0x306921aed3713287
+165, 0x4df87f3130cd302a
+166, 0x37c451daeb6a4af5
+167, 0x8dbbe35f911d5cc1
+168, 0x518157ce61cb10f9
+169, 0x669f577aebc7b35b
+170, 0x4b0a5824a8786040
+171, 0x519bc3528de379f5
+172, 0x6128012516b54e02
+173, 0x98e4f165e5e6a6dd
+174, 0x6404d03618a9b882
+175, 0x15b6aeb3d9cd8dc5
+176, 0x87ed2c1bae83c35b
+177, 0x8377fc0252d41278
+178, 0x843f89d257a9ba02
+179, 0xcdda696ea95d0180
+180, 0xcfc4b23a50a89def
+181, 0xf37fd270d5e29902
+182, 0xafe14418f76b7efa
+183, 0xf984b81577076842
+184, 0xe8c60649ccb5458d
+185, 0x3b7be8e50f8ff27b
+186, 0xaa7506f25cef1464
+187, 0x5e513da59f106688
+188, 0x3c585e1f21a90d91
+189, 0x1df0e2075af292a
+190, 0x29fdd36d4f72795f
+191, 0xb162fe6c24cb4741
+192, 0x45073a8c02bd12c4
+193, 0xcbaaa395c2106f34
+194, 0x5db3c4c6011bc21c
+195, 0x1b02aac4f752e377
+196, 0xa2dfb583eb7bec5
+197, 0xfe1d728805d34bb1
+198, 0xf647fb78bb4601ec
+199, 0xd17be06f0d1f51ef
+200, 0x39ec97c26e3d18a0
+201, 0xb7117c6037e142c8
+202, 0xe3a6ce6e6c71a028
+203, 0xe70a265e5db90bb2
+204, 0x24da4480530def1e
+205, 0xfd82b28ce11d9a90
+206, 0x5bf61ead55074a1d
+207, 0xbe9899c61dec480d
+208, 0xae7d66d21e51ec9e
+209, 0x384ee62c26a08419
+210, 0x6648dccb7c2f4abf
+211, 0xc72aa0c2c708bdc9
+212, 0x205c5946b2b5ba71
+213, 0xd4d8d0b01890a812
+214, 0x56f185493625378d
+215, 0x92f8072c81d39bd0
+216, 0xa60b3ceecb3e4979
+217, 0xfcf41d88b63b5896
+218, 0xf5a49aa845c14003
+219, 0xffcc7e99eee1e705
+220, 0xdd98312a7a43b32d
+221, 0xa6339bd7730b004
+222, 0xdac7874ba7e30386
+223, 0xadf6f0b0d321c8
+224, 0x126a173ae4ffa39f
+225, 0x5c854b137385c1e7
+226, 0x8173d471b1e69c00
+227, 0x23fa34de43581e27
+228, 0x343b373aef4507b1
+229, 0xa482d262b4ea919c
+230, 0xf7fbef1b6f7fbba
+231, 0xd8ce559487976613
+232, 0xbf3c8dd1e6ebc654
+233, 0xda41ed375451e988
+234, 0xf54906371fd4b9b3
+235, 0x5b6bb41231a04230
+236, 0x866d816482b29c17
+237, 0x11315b96941f27dc
+238, 0xff95c79205c47d50
+239, 0x19c4fff96fbdac98
+240, 0xbfb1ae6e4131d0f4
+241, 0x9d20923f3cdb82c9
+242, 0x282175507c865dff
+243, 0xdfd5e58a40fe29be
+244, 0xedbd906ff40c8e4f
+245, 0x11b04fc82614ccb3
+246, 0xeceb8afda76ae49f
+247, 0xa4856913847c2cdf
+248, 0x6f1425f15a627f2a
+249, 0xdf144ffedf60349e
+250, 0x392d7ecfd77cc65f
+251, 0x72b8e2531049b2c6
+252, 0x5a7eb2bdb0ec9529
+253, 0xdcfd4306443e78c1
+254, 0x89ad67ed86cd7583
+255, 0x276b06c0779a6c8f
+256, 0xb2dbb723196a0ac3
+257, 0x66c86a3b65906016
+258, 0x938348768a730b47
+259, 0x5f5282de938d1a96
+260, 0xa4d4588c4b473b1f
+261, 0x8daed5962be4796f
+262, 0x9dde8d796985a56e
+263, 0x46be06dbd9ed9543
+264, 0xdf98286ceb9c5955
+265, 0xa1da1f52d7a7ca2b
+266, 0x5a7f1449f24bbd62
+267, 0x3aedc4e324e525fd
+268, 0xced62464cd0154e1
+269, 0x148fc035e7d88ce3
+270, 0x82f8878948f40d4c
+271, 0x4c04d9cdd6135c17
+272, 0xdf046948d86b3b93
+273, 0x2f0dec84f403fe40
+274, 0xa61954fb71e63c0d
+275, 0x616d8496f00382e8
+276, 0x162c622472746e27
+277, 0x43bcfe48731d2ceb
+278, 0xff22432f9ff16d85
+279, 0xc033ed32bb0ad5a4
+280, 0x5d3717cc91c0ce09
+281, 0x7a39a4852d251075
+282, 0x61cd73d71d6e6a6
+283, 0xe37e2ea4783ab1a5
+284, 0x60e1882162579ea8
+285, 0x9258ec33f1a88e00
+286, 0x24b32acf029f0407
+287, 0x1410fc9aea6d3fac
+288, 0x6054cf2a3c71d8f7
+289, 0x82f7605157a66183
+290, 0x3b34c1c0dff9eac5
+291, 0xfebe01b6d5c61819
+292, 0x7372187c68b777f2
+293, 0xc6923812cda479f0
+294, 0x386613be41b45156
+295, 0x92cfebe8cc4014b
+296, 0x8e13c4595849828b
+297, 0x90e47390d412291f
+298, 0x6b21a1d93d285138
+299, 0xbf5b1f5922f04b12
+300, 0x21e65d1643b3cb69
+301, 0xf7683b131948ac3c
+302, 0xe5d99fc926196ed2
+303, 0x7b138debbec90116
+304, 0x8a2650a75c2c2a5c
+305, 0x20689a768f9b347b
+306, 0xdfa2900cfb72dc6e
+307, 0x98959c3855611cc2
+308, 0x5fdb71b89596cc7c
+309, 0x1c14ac5c49568c7b
+310, 0x958c4293016091fe
+311, 0x7484522eb0087243
+312, 0xc4018dfb34fc190f
+313, 0xca638567e9888860
+314, 0x102cd4805f0c0e89
+315, 0xcc3bc438e04548f8
+316, 0xb808944bb56ea5be
+317, 0xffd4778dbf945c57
+318, 0xfe42617784c0233b
+319, 0x3eccbfeae9b42d3c
+320, 0xd9f1b585fd0bfa60
+321, 0x5c063d1b2705d5dd
+322, 0x8e8bec3519941b64
+323, 0x9e94c36cbec2a42
+324, 0x1cd19f5b64ffd3ad
+325, 0x9632e3aebfc68e66
+326, 0x98960c2d9da4ae45
+327, 0xb76994b1f2bbfc1f
+328, 0xca184a737d3971cc
+329, 0x964d31b07183adfb
+330, 0xe9e0ff351cd276d4
+331, 0xb5747c860b05bbe4
+332, 0x5549ddc3bd3862e2
+333, 0x495496677b27873b
+334, 0x53910baa26e3ea18
+335, 0xaa07a07ad0a688d3
+336, 0xbb43bd1f09ecdb1e
+337, 0xe2ebc105699dd84
+338, 0x6e815a2729584035
+339, 0x2caab1713b17948a
+340, 0x43d39d209fa41c90
+341, 0xfe3e71089d5d1c3a
+342, 0xa778646c32f81177
+343, 0x8d42bfb86e6e92d5
+344, 0x175571f70b4fcfbe
+345, 0x2a66a6fe10dc3b5b
+346, 0xd9545e85235ca709
+347, 0x5642781c77ced48a
+348, 0x24facc40b72ccd09
+349, 0xa800fbacce33f6f8
+350, 0x675f58a0ff19fba
+351, 0x35aedf57bb5cde1b
+352, 0xe5535a6b63f6d068
+353, 0x84dffd0102aaa85d
+354, 0x621faad65467aaa7
+355, 0x596ad85b556b112f
+356, 0x837545fff8894c7a
+357, 0x3d9a4ae1356bc6a6
+358, 0xcd8b7153205d4ad0
+359, 0x98afdd40f1ed09a6
+360, 0xa38b2dc55a5cf87f
+361, 0x484aecce2b6838bc
+362, 0x6af05c26bdab18d9
+363, 0xf418b7399dcf2e4b
+364, 0x1cfa38789b0d2445
+365, 0xfbed23c34166ee67
+366, 0x38e6820039e4912a
+367, 0x1fe94911e963591e
+368, 0x1291c79aee29ad70
+369, 0x65eccfc89506f963
+370, 0x7d14de3b2f55b1f6
+371, 0x82eb79c36cd2a739
+372, 0x41ffe3b75ea0def5
+373, 0x9eba9156470a51d9
+374, 0xd17c00b981db37d1
+375, 0xf688769a75601aa7
+376, 0xbcf738e9e03d571e
+377, 0x14712e56df8f919b
+378, 0xab14e227d156e310
+379, 0xf53d193e993e351e
+380, 0x857fae46bd312141
+381, 0xc2dd71e41b639966
+382, 0x74f8b987a3d00ad1
+383, 0x5bce8526dc527981
+384, 0x94910926c172a379
+385, 0x503c45557688a9d5
+386, 0x244d03834e05807f
+387, 0x6e014cbab9c7a31f
+388, 0xae544c638530facf
+389, 0x9b853aaaf9cbc22d
+390, 0xfb42ab7024d060ed
+391, 0x74cc3fba0dfd7ff2
+392, 0x24ec9e8f62144ad5
+393, 0x72f082954307bbe7
+394, 0x36feda21bbf67577
+395, 0x3222191611b832f1
+396, 0xd0584e81bcac8b0b
+397, 0xdce8d793ef75e771
+398, 0x978824c6c2578fc
+399, 0x6e8f77503b3c2ee4
+400, 0xc85d2d86fecf5d03
+401, 0x3d35b4a5d4d723c4
+402, 0xd3987dfd4727fff3
+403, 0xd3cde63fb6a31add
+404, 0xf6699e86165bdaeb
+405, 0x9d60ba158ec364c4
+406, 0x920c3c18b346bfc9
+407, 0x770fd1fdfbc236ca
+408, 0x45998cfc5fc12ddd
+409, 0xd74a3454e888834b
+410, 0xbf2aa68081a4a28f
+411, 0xea41b26a6f1da1b3
+412, 0x5560a2d24b9d5903
+413, 0xe3791f652a228d8b
+414, 0x365116d3b5a8520c
+415, 0xb1b2bd46528f8969
+416, 0xfcfe14943ef16ae7
+417, 0xf4d43425e8a535dc
+418, 0xe6cf10a78782a7e0
+419, 0x9c7ac0de46556e3e
+420, 0xc667ae0856eed9ef
+421, 0x47dbb532e16f9c7e
+422, 0xdf4785a5d89ee82e
+423, 0xbd014925ce79dbcf
+424, 0xea0d663fb58fa5be
+425, 0x51af07d5cc3821fb
+426, 0x27a1bdcdc4159a9d
+427, 0x520c986c59b1e140
+428, 0x50b73fd9bacd5b39
+429, 0xae5240641f51e4f3
+430, 0x71faecc164ed9681
+431, 0xda95aa35529a7ee
+432, 0xe25ba29b853c1c6d
+433, 0x9871a925cda53735
+434, 0xde481ad8540e114d
+435, 0xa2997f540e8abca0
+436, 0xc9683c5035e28185
+437, 0x1082471b57182bac
+438, 0xbd3ecf0f0b788988
+439, 0xf479760776fbb342
+440, 0x3730929200d91f44
+441, 0xc1762d79ae72809c
+442, 0xfaa0a4c7b1686cb3
+443, 0xd581e6d55afdafcd
+444, 0x6cf57bdfba2dcf6d
+445, 0xdef79d9fe6a5bcef
+446, 0x13ed376e18132bd3
+447, 0xbe67efd72defa2a
+448, 0x5acc176c468966ea
+449, 0x8b35b626af139187
+450, 0x446de3fac0d973ac
+451, 0xe1d49e06dc890317
+452, 0x817bc3fd21fc09b7
+453, 0xb71c3958a13d5579
+454, 0x8746e010f73d7148
+455, 0x1b61c06009922e83
+456, 0xba17e62e6b092316
+457, 0x1375fa23c4db8290
+458, 0x3f071230f51245a6
+459, 0x51c99a086a61cd13
+460, 0x5f0f2ae78589e1fd
+461, 0x604834e114bbbc27
+462, 0x5eb2a7a34814e9a9
+463, 0x77a6907f386bf11e
+464, 0x99525de2bd407eeb
+465, 0xb818348c57b3b98f
+466, 0x25f5f9e702fbe78d
+467, 0x8f66669e6f884473
+468, 0x1e47d46e2af4f919
+469, 0xf6a19df846476833
+470, 0xff00c67bcd06621f
+471, 0xe3dfe069795d72d8
+472, 0x8affc88b2fea4d73
+473, 0x66df747e5f827168
+474, 0xf368ec338d898a0e
+475, 0x9e1f1a739c5984a2
+476, 0x46a1c90e1ca32cbc
+477, 0xc261bc305ed8d762
+478, 0x754d7949f7da9e72
+479, 0x4c8fbbb14ef47b17
+480, 0xccbdc67a3848d80d
+481, 0x3c25e6f58bae751d
+482, 0x7078b163b936d9b6
+483, 0x440e27463c134ecf
+484, 0x6c83ee39f324db0f
+485, 0x27cf901b22aea535
+486, 0x57262dec79a3f366
+487, 0x91db09f1dbb524fb
+488, 0xd7436eefba865df2
+489, 0x16c86b0a275a3f43
+490, 0x689493e6681deaa9
+491, 0x7e1dc536c1a9ac42
+492, 0x1145beac3ac7f5cc
+493, 0x3d05e211a104b2b0
+494, 0x4f9e77ced3c52f44
+495, 0x53de1369354add72
+496, 0x1fb60f835f47cdeb
+497, 0x6ab36f089e40c106
+498, 0xaabffcb0d3d04c7
+499, 0xaa399686d921bd25
+500, 0x2bf8dd8b6d6fa7f0
+501, 0x1ddbf4e124329613
+502, 0x466a740241466a72
+503, 0x98d7381eb68a761
+504, 0x817691510bc4857a
+505, 0x8837622c0171fe33
+506, 0xcba078873179ee16
+507, 0x13adad1ab7b75af4
+508, 0x3bac3f502428840c
+509, 0xbeb3cce138de9a91
+510, 0x30ef556e40b5f0b4
+511, 0x19c22abdf3bbb108
+512, 0x977e66ea4ddc7cf
+513, 0x9f4a505f223d3bf3
+514, 0x6bc3f42ac79ec87b
+515, 0x31e77712158d6c23
+516, 0x6d8de4295a28af0d
+517, 0xee1807dbda72adb7
+518, 0xda54140179cd038f
+519, 0x715aa5cdac38e062
+520, 0x5a7e55e99a22fa16
+521, 0xf190c36aa8edbe4f
+522, 0xccadd93a82c1d044
+523, 0x7070e6d5012c3f15
+524, 0x50a83341a26c1ba5
+525, 0x11bca7cc634142e5
+526, 0x623a0d27867d8b04
+527, 0x75c18acff54fbf6e
+528, 0x455ae7d933497a6f
+529, 0xf624cf27d030c3d3
+530, 0x7a852716f8758bac
+531, 0xe7a497ac1fa2b5b4
+532, 0xf84f097498f57562
+533, 0xc4bb392f87f65943
+534, 0x618e79a5d499fbfb
+535, 0xb3c0b61d82b48b8
+536, 0x4750a10815c78ea7
+537, 0x9cf09cca3ddece69
+538, 0x2a69f1c94cc901a2
+539, 0x347a0e446e1ce86d
+540, 0xb06f3a5a5ab37bb1
+541, 0x8035bd0713d591db
+542, 0x539c9637042c3a1f
+543, 0xd7ba4dc6b273cbd7
+544, 0x12f3f99933444f85
+545, 0x4a9517b9783fb9a4
+546, 0x6422b2ea95093bc5
+547, 0x3a5ecff0f996c2a6
+548, 0x31de504efc76a723
+549, 0x7ccb7c5233c21a9f
+550, 0xc687d9e6ce4186e8
+551, 0x6e40769d6940376a
+552, 0xf51207314f1f7528
+553, 0x67ee3acb190865e3
+554, 0xe08d586270588761
+555, 0xe387fa489af1a75c
+556, 0x73414a52d29d8375
+557, 0x671a38191cf2a357
+558, 0xe00fb25b1aa54008
+559, 0x11a0610e22cf549b
+560, 0xc90cc865d57c75be
+561, 0x90d0863cc15f2b79
+562, 0x8b3e60d32ebcb856
+563, 0xb28cc55af621e04a
+564, 0xcf60bd3cb2a5ab1d
+565, 0x212cb5d421948f86
+566, 0xee297b96e0a3363f
+567, 0x4e9392ff998760d1
+568, 0x61940c8d0105ba3e
+569, 0x14ebcbae72a59a16
+570, 0xdf0f39a3d10c02af
+571, 0xfc047b2b3c1c549d
+572, 0x91718b5b98e3b286
+573, 0x9ea9539b1547d326
+574, 0x7a5a624a89a165e6
+575, 0x145b37dcaa8c4166
+576, 0x63814bbb90e5616c
+577, 0xc4bc3ca6c38bb739
+578, 0x853c3a61ddc6626c
+579, 0xa7ce8481c433829a
+580, 0x8aff426941cc07b
+581, 0x2dc3347ca68d8b95
+582, 0xce69f44f349e9917
+583, 0x2fa5cb8aca009b11
+584, 0xf26bb012115d9aca
+585, 0xafa01c2f2d27235a
+586, 0xabcba21f1b40305e
+587, 0xfec20c896c0c1128
+588, 0xc5f7a71ebacadfa0
+589, 0xc8479ad14bab4eef
+590, 0xad86ec9a3e7d3dc
+591, 0xbbecd65292b915c5
+592, 0xb1f9e28149e67446
+593, 0x708d081c03dad352
+594, 0xaa8a84dbd1de916c
+595, 0x9aa3efb29ba9480b
+596, 0xd3c63969ff11443e
+597, 0x1e9e9ac861315919
+598, 0x4fe227f91e66b41d
+599, 0xefc0212d43d253ab
+600, 0x98341437727c42d1
+601, 0x5ea85c0fe9008adc
+602, 0x7891b15faa808613
+603, 0x32db2d63989aacfd
+604, 0xc92f7f28e88fd7bc
+605, 0x3513545eb6549475
+606, 0x49abe0082906fbf8
+607, 0xcee1e1a6551e729c
+608, 0x38556672b592a28e
+609, 0xc3e61409c4ec2d45
+610, 0x96c67ce2995a0fd4
+611, 0x9b9b0cada870293
+612, 0x82d6dd5dada48037
+613, 0xeea4f415299f1706
+614, 0x371107895f152ab3
+615, 0x2f6686159f4396bb
+616, 0x61005a2ff3680089
+617, 0x9d2f2cafb595e6b6
+618, 0x4a812a920f011672
+619, 0x317554d3a77385d7
+620, 0x24c01086727eb74b
+621, 0xa15ff76d618a3a9e
+622, 0x2121bfd983859940
+623, 0x384d11577eea8114
+624, 0xab0f4299f3c44d88
+625, 0x136fd4b07cfa14d9
+626, 0x665fe45cbfaa972a
+627, 0x76c5a23398a314e9
+628, 0x5507036357ccda98
+629, 0xd9b8c5ac9dce632b
+630, 0x366bc71781da6e27
+631, 0xdd2b2ba1d6be6d15
+632, 0xf33ed0d50ea6f1a6
+633, 0xf05a9b1900174c18
+634, 0x3947e1419e2787cf
+635, 0x6c742b1e029637d0
+636, 0x32aba12196a0d2e8
+637, 0x1b94aab2e82e7df
+638, 0x68b617db19229d6
+639, 0x6c88a95ac0a33f98
+640, 0xdc9b95fd60c2d23e
+641, 0x999e6971d3afc8b3
+642, 0x7071fc6ad8b60129
+643, 0x41a8184ef62485f6
+644, 0xb68e0605c7d5e713
+645, 0x272b961a1d1bbee
+646, 0x23f04e76446187b0
+647, 0x999a7a8f6d33f260
+648, 0xdbd6318df4f168d
+649, 0x8f5e74c84c40711e
+650, 0x8ccc6b04393a19d6
+651, 0xadcd24b782dd8d3d
+652, 0x1a966b4f80ef9499
+653, 0xcb6d4f9ff5a280f0
+654, 0x8095ff2b8484018a
+655, 0xbfd3389611b8e771
+656, 0x278eb670b7d12d51
+657, 0x31df54ca8d65c20f
+658, 0x121c7fb38af6985e
+659, 0x84fb94f38fe1d0a
+660, 0x15ae8af1a6d48f02
+661, 0x8d51e4a62cba1a28
+662, 0x58e6b6b3ae0f9e42
+663, 0x9365a0a85669cc99
+664, 0xe56e92f65a2106df
+665, 0x68fa299c66b428fc
+666, 0x55e51bb0b0a832c6
+667, 0x48b565293f9bc494
+668, 0x73d8132b1cbabb57
+669, 0x9178ac3926c36cbc
+670, 0xe2f22c7b28ea5e0f
+671, 0x6af45322a99afb12
+672, 0x59072fcb486a46f4
+673, 0x166b717b08d3d8e
+674, 0xd4e627a2dfacc4ab
+675, 0x33dad6f2921dedaa
+676, 0x4b13b806834a6704
+677, 0xe5f7971b398ed54d
+678, 0x20bfae65e3e6899b
+679, 0x881dab45d2b4fc98
+680, 0x6f248126b5b885be
+681, 0x7aeb39e986f9deee
+682, 0xf819f9574b8c3a03
+683, 0xff3d93ed6bd9781a
+684, 0x3a31e2e24a2f6385
+685, 0x7888a88f8944a5e
+686, 0x4faee12f5de95537
+687, 0x7f3e4efccdb2ed67
+688, 0x91e0f2fc12593af5
+689, 0xb5be8a4b886a40d3
+690, 0x998e8288ac3a9b1b
+691, 0x85c48fc8b1349e7b
+692, 0xf03af25222d8fae5
+693, 0x45467e805b242c2e
+694, 0xa2350db793dbebdc
+695, 0xfebe5b61d2174553
+696, 0xa9a331f02c54ad0b
+697, 0xe94e49a0f905aef3
+698, 0xe54b4c812b55e3da
+699, 0xdc454114c6bc0278
+700, 0x99c7765ab476baa2
+701, 0xccd9590e47fdff7c
+702, 0xfa2bcae7afd6cb71
+703, 0x2c1bf1a433a6f0f7
+704, 0x53882c62ff0aab28
+705, 0x80ac900f844dacc
+706, 0x27ba8eb5c4a44d54
+707, 0x78f3dfb072a46004
+708, 0x34e00e6ec629edce
+709, 0x5b88d19b552d1fbd
+710, 0xe4df375dc79df432
+711, 0x37446312ff79c3b4
+712, 0xb72256900a95fa6d
+713, 0x89f3171fbdff0bfc
+714, 0xd37885b048687eba
+715, 0xbb033213b283b60e
+716, 0xcf10b523ee769030
+717, 0xbf8070b6cfd7bafb
+718, 0xb7194da81fd1763b
+719, 0xbfc303de88e68d24
+720, 0xb949c7a5aea8a072
+721, 0x844216e7bae90455
+722, 0xf1e7f20840049a33
+723, 0x96e3263ad0cae794
+724, 0x10772d51f6e9ba49
+725, 0xcea24fccae9d23b3
+726, 0xefd378add9dde040
+727, 0xba0c7c5275805976
+728, 0x2e2a04608f64fa8c
+729, 0xafb42ec43aa0fa7
+730, 0x30444b84241ac465
+731, 0x19ef384bac4493ab
+732, 0xfd1ac615d3ba5ab9
+733, 0x6cc781ba38643aff
+734, 0x30ff27ebed875cfd
+735, 0xee1a261aca97ae62
+736, 0xc5a92715202bc940
+737, 0x9e6ec76f93c657ff
+738, 0x9b9fd55f55191ca5
+739, 0x654b13af008d8f03
+740, 0x1b7f030d9bd0719f
+741, 0x6d622e277550cb7f
+742, 0x3f8ee6b8830d0538
+743, 0x475462bcd0de190f
+744, 0x21380e8a513bdbcd
+745, 0x629bf3771b1bd7a4
+746, 0x3b5fd0b62c353709
+747, 0xf95634006ec3867e
+748, 0x1be8bb584a6653c2
+749, 0x2e2d3cfa85320ce8
+750, 0x5b904b692252d11d
+751, 0x4bfd76631d527990
+752, 0xc019571ca2bec4a0
+753, 0xf2eb730cea4cd751
+754, 0xd4571d709530191a
+755, 0x3b5bd947061f5a7d
+756, 0x56e2322cd2d1d1c0
+757, 0xa8830a5f62019f83
+758, 0x901d130c1b873cf3
+759, 0xb5dd29b363c61299
+760, 0xbb710bec3a17b26d
+761, 0xc0c464daca0f2328
+762, 0x4dc8055df02650f5
+763, 0x3d3cd9bbe8b957af
+764, 0xdb79612c2635b828
+765, 0xe25b3a8ad8fa3040
+766, 0xd5875c563cbf236b
+767, 0x46861c1c3849c9bc
+768, 0xf84bf1a2814dff43
+769, 0x6d8103902e0ad5e6
+770, 0x99f51c9be8af79e5
+771, 0xb0bfa8540ff94a96
+772, 0xaf45109a4e06f7d0
+773, 0x281df3e55aea9bfc
+774, 0x6a1155ca8aa40e60
+775, 0x754d32c5de1f5da
+776, 0xce1eafb1c6ca916f
+777, 0xc4f2185fa8577bd1
+778, 0x4a188e9bdb5501d9
+779, 0xbb14107e99bd5550
+780, 0xf0381d8425ec2962
+781, 0x213dbfffc16ec4f6
+782, 0x7a999c5a28ea65bc
+783, 0x23758c2aba7709ff
+784, 0xea7e4bb205e93b44
+785, 0x9c5a31e53911c658
+786, 0x7f04d0bbdc689ddc
+787, 0xe3ed89ab8d78dcb3
+788, 0x73c38bfb43986210
+789, 0x740c7d787eb8e158
+790, 0x5284fafdfb3fb9ec
+791, 0x2e91a58ac1fb1409
+792, 0xb94a600bf0a09af3
+793, 0x533ea4dbe07d81dd
+794, 0x48c3f1a736b3c5fd
+795, 0x56ae3499fa8720ce
+796, 0x526f2def663ca818
+797, 0x2f085759c65665c4
+798, 0xf715f042c69e0db4
+799, 0x110889c399231e60
+800, 0x64584a244866f3a0
+801, 0xf02ec101a39405d3
+802, 0xe73cd5e9a7f17283
+803, 0xfea64869e7028234
+804, 0x97559974ad877891
+805, 0xc8695aba1dc9f2e5
+806, 0x7b62b76ffc2264ec
+807, 0xf5e1df172ec5ccd
+808, 0xafaeb68765e443bd
+809, 0xd3870eb2e8337623
+810, 0x4f944d684138fb39
+811, 0x6977c575038916ad
+812, 0x8ada1a225df95a56
+813, 0xe4044c6c58d15e54
+814, 0x4e5121366681cf2
+815, 0xcf8640b079357b0d
+816, 0xcd5b157d44106fa3
+817, 0x9d7a5481279e25a1
+818, 0xe10e9db41fb4b34f
+819, 0x1052607be1eadff9
+820, 0x3403d67232fe2265
+821, 0xac9358f498c34afc
+822, 0x820172da0dc39c9
+823, 0xe186e91a3b826b6a
+824, 0x1a838e2a40284445
+825, 0x1870b617ebd7bce6
+826, 0xcb7cba4424be1ed7
+827, 0x6a2e56e40fdf9041
+828, 0xace93bbe108f97ee
+829, 0xfeb9bc74ac41ca08
+830, 0x8cb2d05b0f6a1f51
+831, 0x73792309f3fac0a9
+832, 0x2507343d431308ca
+833, 0xd0ea1197be615412
+834, 0xb1870812f1d2fa94
+835, 0x6d067b6935dcd23e
+836, 0xaf161014e5492c31
+837, 0xd4be0dce97064be4
+838, 0xf8edfe3fc75c20f1
+839, 0x894751dc442d2d9c
+840, 0xb4a95f6a6663456c
+841, 0x74e93162e2d805db
+842, 0x784bc5f3a7a2f645
+843, 0xd234d7c5b0582ea9
+844, 0x491f28d0ab6cb97c
+845, 0xa79419e5cf4336c3
+846, 0x66b00141978c849
+847, 0xa7ddbd64698d563f
+848, 0xefc33a4a5d97d4b2
+849, 0x95075514a65aebdc
+850, 0x40eca5b3e28cd25e
+851, 0x90ec7d00e9c9e35d
+852, 0x63e84104d5af417a
+853, 0xdaca0ea32df5744
+854, 0x7ed54f2587795881
+855, 0x5a73931760af4ee0
+856, 0x857d1a185a3081ec
+857, 0x6eac2aabe67fb463
+858, 0xd1f86155d8bfc55f
+859, 0x6d56398f3e7877ef
+860, 0x7642f61dfc62bc17
+861, 0x1d76b12843246ffa
+862, 0xde7817809b8a31d0
+863, 0xbcca9cd091198f9d
+864, 0xf71ca566dddcdfd4
+865, 0xea4386ee8b61d082
+866, 0xe351729d6010bac4
+867, 0xfd685d8a49910dd6
+868, 0xa7a20ea6c686bd3
+869, 0x1cdaf82f4dbd5536
+870, 0xa3da1d1e77dda3e0
+871, 0x4f723b3818ff8b2a
+872, 0x1290669eca152469
+873, 0xb54158b52d30651b
+874, 0xc06b74f2c7f0fee
+875, 0x7d5840bcbf702379
+876, 0x19fa4c1254a82ed
+877, 0xcf5ce090ad0b38ea
+878, 0xd4edd6ac9437e16d
+879, 0xc6ebf25eb623b426
+880, 0xd2b6dbdf00d8fea2
+881, 0x949cf98391cc59e1
+882, 0x380a0c7d0356f7b3
+883, 0x8ffefe32465473bf
+884, 0x637b6542d27c861e
+885, 0x347d12ffc664ecd9
+886, 0xea66e3a0c75a6b37
+887, 0xc3aff6f34fb537a1
+888, 0x67bdf3579959bf49
+889, 0xa17a348e3a74b723
+890, 0x93c9ef26ddadd569
+891, 0x483909059a5ac0b2
+892, 0x26ec9074b56d5a0d
+893, 0x6216000d9a48403a
+894, 0x79b43909eab1ec05
+895, 0xe4a8e8d03649e0de
+896, 0x1435d666f3ccdc08
+897, 0xb9e22ba902650a0e
+898, 0x44dffcccc68b41f8
+899, 0x23e60dcc7a559a17
+900, 0x6fd1735eacd81266
+901, 0xf6bda0745ea20c8e
+902, 0x85efcaefe271e07c
+903, 0x9be996ee931cef42
+904, 0xe78b41c158611d64
+905, 0xd6201df605839830
+906, 0x702e8e47d2769fd3
+907, 0xb8dcf70e18cf14c
+908, 0xac2690bab1bf5c17
+909, 0x92b166b71205d696
+910, 0xb0e73c795fc6df28
+911, 0x4bf2322c8b6b6f0d
+912, 0xa842fbe67918cea0
+913, 0xb01a8675d9294e54
+914, 0xfbe3c94f03ca5af2
+915, 0x51a5c089600c441f
+916, 0x60f0fd7512d85ded
+917, 0xef3113d3bc2cadb0
+918, 0xe1ea128ade300d60
+919, 0xde413b7f8d92d746
+920, 0xfc32c6d43f47c5d8
+921, 0x69d551d8c2b54c68
+922, 0xb9bc68c175777943
+923, 0xb9c79c687f0dae90
+924, 0xd799421ef883c06e
+925, 0xbff553ca95a29a3e
+926, 0xfc9ffac46bd0aca1
+927, 0x4f6c3a30c80c3e5a
+928, 0x8b7245bc6dc4a0a
+929, 0xaf4e191a4575ff60
+930, 0x41218c4a76b90f0b
+931, 0x986052aa51b8e89b
+932, 0x284b464ed5622f9
+933, 0xba6bded912626b40
+934, 0x43cad3ed7443cb5c
+935, 0x21641fa95725f328
+936, 0x6d99d6d09d755822
+937, 0x8246dfa2d4838492
+938, 0xd2ee70b9056f4726
+939, 0x87db515a786fbb8b
+940, 0x7c63e4c1d7786e7d
+941, 0xd1a9d548f10b3e88
+942, 0xa00856475f3b74c9
+943, 0x7f1964ce67148bf4
+944, 0x446650ec71e6018c
+945, 0xb1805ca07d1b6345
+946, 0x869c0a1625b7271b
+947, 0x79d6da06ce2ecfe2
+948, 0xec7b3cafc5e3c85f
+949, 0x1745ce21e39f2c3d
+950, 0xd9a0a7af6ee97825
+951, 0x680e0e52a6e11d5c
+952, 0xd86b3f344ff7f4cd
+953, 0xab56af117c840b9c
+954, 0x5c5404c7e333a10e
+955, 0x4f1eb462f35d990d
+956, 0xf857605a5644458e
+957, 0x3bb87cdf09262f86
+958, 0xd57295baf6da64b
+959, 0xb5993f48472f2894
+960, 0x7d1a501608c060b2
+961, 0x45fabe2d0e54adf0
+962, 0xbb41c3806afb4efe
+963, 0xbfbc506049424c8
+964, 0xb7dd6b67f2203344
+965, 0x389ce52eff883b81
+966, 0xe259c55c0cf6d000
+967, 0x70fb3e3824f7d213
+968, 0x9f36d5599ed55f4b
+969, 0xd14cf6f12f83c4f7
+970, 0x570a09d56aaa0b66
+971, 0x8accafd527f4598
+972, 0xa42d64c62175adfd
+973, 0xddb9c6a87b6e1558
+974, 0xd80b6c69fa1cde2a
+975, 0x44ebaac10082207b
+976, 0xf99be8889552fa1a
+977, 0x38253cd4b38b5dc5
+978, 0x85356c8b02675791
+979, 0xbf91677b2ecdcf55
+980, 0x2316cb85e93f366e
+981, 0x9abf35954db6b053
+982, 0xf49f7425e086b45a
+983, 0x8f5b625e074afde2
+984, 0xe0d614559791b080
+985, 0xbf7b866afab2a525
+986, 0xde89d7e1641a6412
+987, 0x1d10687d8ae5b86f
+988, 0x1f034caa0e904cbd
+989, 0x2086357aec8a7a2c
+990, 0x22dc476b80c56e1e
+991, 0xbef5a73cc0e3a493
+992, 0xddfa3829b26ed797
+993, 0x8917a87ec3d4dc78
+994, 0xfeabe390628c365e
+995, 0x581b0c4f6fb2d642
+996, 0x1ef8c590adbf5b9a
+997, 0x4d8e13aac0cce879
+998, 0xfe38f71e5977fad0
+999, 0x1f83a32d4adfd2ed
diff --git a/numpy/random/tests/data/philox-testset-2.csv b/numpy/random/tests/data/philox-testset-2.csv
new file mode 100644
index 000000000..69d24c38c
--- /dev/null
+++ b/numpy/random/tests/data/philox-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x399e5b222b82fa9
+1, 0x41fd08c1f00f3bc5
+2, 0x78b8824162ee4d04
+3, 0x176747919e02739d
+4, 0xfaa88f002a8d3596
+5, 0x418eb6f592e6c227
+6, 0xef83020b8344dd45
+7, 0x30a74a1a6eaa064b
+8, 0x93d43bf97a490c3
+9, 0xe4ba28b442194cc
+10, 0xc829083a168a8656
+11, 0x73f45d50f8e22849
+12, 0xf912db57352824cc
+13, 0xf524216927b12ada
+14, 0x22b7697473b1dfda
+15, 0x311e2a936414b39f
+16, 0xb905abfdcc425be6
+17, 0x4b14630d031eac9c
+18, 0x1cf0c4ae01222bc8
+19, 0xa6c33efc6e82ef3
+20, 0x43b3576937ba0948
+21, 0x1e483d17cdde108a
+22, 0x6722784cac11ac88
+23, 0xee87569a48fc45d7
+24, 0xb821dcbe74d18661
+25, 0xa5d1876ef3da1a81
+26, 0xe4121c2af72a483
+27, 0x2d747e355a52cf43
+28, 0x609059957bd03725
+29, 0xc3327244b49e16c5
+30, 0xb5ae6cb000dde769
+31, 0x774315003209017
+32, 0xa2013397ba8db605
+33, 0x73b228945dbcd957
+34, 0x801af7190375d3c0
+35, 0xae6dca29f24c9c67
+36, 0xd1cc0bcb1ca26249
+37, 0x1defa62a5bd853be
+38, 0x67c2f5557fa89462
+39, 0xf1729b58122fab02
+40, 0xb67eb71949ec6c42
+41, 0x5456366ec1f8f7d7
+42, 0x44492b32eb7966f5
+43, 0xa801804159f175f1
+44, 0x5a416f23cac70d84
+45, 0x186f55293302303d
+46, 0x7339d5d7b6a43639
+47, 0xfc6df38d6a566121
+48, 0xed2fe018f150b39e
+49, 0x508e0b04a781fa1b
+50, 0x8bee9d50f32eaf50
+51, 0x9870015d37e63cc
+52, 0x93c6b12309c14f2d
+53, 0xb571cf798abe93ff
+54, 0x85c35a297a88ae6e
+55, 0x9b1b79afe497a2ae
+56, 0x1ca02e5b95d96b8d
+57, 0x5bb695a666c0a94a
+58, 0x4e3caf9bbab0b208
+59, 0x44a44be1a89f2dc1
+60, 0x4ff37c33445758d1
+61, 0xd0e02875322f35da
+62, 0xfd449a91fb92646b
+63, 0xbe0b49096b95db4d
+64, 0xffa3647cad13ef5d
+65, 0x75c127a61acd10c8
+66, 0xd65f697756f5f98e
+67, 0x3ced84be93d94434
+68, 0x4da3095c2fc46d68
+69, 0x67564e2a771ee9ac
+70, 0x36944775180644a9
+71, 0xf458db1c177cdb60
+72, 0x5b58406dcd034c8
+73, 0x793301a3fdab2a73
+74, 0x1c2a1a16d6db6128
+75, 0xc2dacd4ddddbe56c
+76, 0x2e7d15be2301a111
+77, 0xd4f4a6341b3bcd18
+78, 0x3622996bbe6a9e3b
+79, 0xaf29aa9a7d6d47da
+80, 0x6d7dbb74a4cd68ae
+81, 0xc260a17e0f39f841
+82, 0xdee0170f2af66f0d
+83, 0xf84ae780d7b5a06e
+84, 0x8326247b73f43c3a
+85, 0xd44eef44b4f98b84
+86, 0x3d10aee62ec895e3
+87, 0x4f23fef01bf703b3
+88, 0xf8e50aa57d888df6
+89, 0x7da67411e3bef261
+90, 0x1d00f2769b2f96d7
+91, 0x7ef9a15b7444b84e
+92, 0xcfa16436cc2b7e21
+93, 0x29ab8cfac00460ff
+94, 0x23613de8608b0e70
+95, 0xb1aa0980625798a8
+96, 0xb9256fd29db7df99
+97, 0xdacf311bf3e7fa18
+98, 0xa013c8f9fada20d8
+99, 0xaf5fd4fe8230fe3e
+100, 0xd3d59ca55102bc5c
+101, 0x9d08e2aa5242767f
+102, 0x40278fe131e83b53
+103, 0x56397d03c7c14c98
+104, 0xe874b77b119359b3
+105, 0x926a1ba4304ab19f
+106, 0x1e115d5aa695a91d
+107, 0xc6a459df441f2fe3
+108, 0x2ca842bc1b0b3c6a
+109, 0x24c804cf8e5eed16
+110, 0x7ca00fc4a4c3ebd3
+111, 0x546af7cecc4a4ba6
+112, 0x8faae1fa18fd6e3
+113, 0x40420b0089641a6a
+114, 0x88175a35d9abcb83
+115, 0xf7d746d1b8b1357c
+116, 0x7dae771a651be970
+117, 0x2f6485247ee4df84
+118, 0x6883702fab2d8ec5
+119, 0xeb7eea829a67f9a6
+120, 0x60d5880b485562ed
+121, 0x7d4ca3d7e41a4e7e
+122, 0xbb7fef961ab8de18
+123, 0x3b92452fb810c164
+124, 0x5f4b4755348b338
+125, 0xca45a715a7539806
+126, 0xc33efd9da5399dd
+127, 0x593d665a51d4aedd
+128, 0x75d6b8636563036b
+129, 0x7b57caa55e262082
+130, 0x4ede7427969e0dd5
+131, 0xc3f19b6f78ea00b
+132, 0xeea7bab9be2181ea
+133, 0x652c45fe9c420c04
+134, 0x14ba9e3d175670ee
+135, 0xd2ad156ba6490474
+136, 0x4d65ae41065f614
+137, 0x6ff911c8afa28eb1
+138, 0xedc2b33588f3cb68
+139, 0x437c8bc324666a2f
+140, 0x828cee25457a3f0
+141, 0x530c986091f31b9b
+142, 0x2f34671e8326ade7
+143, 0x4f686a8f4d77f6da
+144, 0xa4c1987083498895
+145, 0xbce5a88b672b0fb1
+146, 0x8476115a9e6a00cc
+147, 0x16de18a55dd2c238
+148, 0xdf38cf4c416232bc
+149, 0x2cb837924e7559f3
+150, 0xfad4727484e982ed
+151, 0x32a55d4b7801e4f
+152, 0x8b9ef96804bd10a5
+153, 0xa1fd422c9b5cf2a9
+154, 0xf46ddb122eb7e442
+155, 0x6e3842547afa3b33
+156, 0x863dee1c34afe5c4
+157, 0x6a43a1935b6db171
+158, 0x1060a5c2f8145821
+159, 0xf783ec9ed34c4607
+160, 0x1da4a86bf5f8c0b0
+161, 0x4c7714041ba12af8
+162, 0x580da7010be2f192
+163, 0xad682fe795a7ea7a
+164, 0x6687b6cb88a9ed2c
+165, 0x3c8d4b175517cd18
+166, 0xe9247c3a524a6b6b
+167, 0x337ca9cfaa02658
+168, 0xed95399481c6feec
+169, 0x58726a088e606062
+170, 0xfe7588a5b4ee342a
+171, 0xee434c7ed146fdee
+172, 0xe2ade8b60fdc4ba5
+173, 0xd57e4c155de4eaab
+174, 0xdefeae12de1137cb
+175, 0xb7a276a241316ac1
+176, 0xeb838b1b1df4ca15
+177, 0x6f78965edea32f6f
+178, 0x18bebd264d7a5d53
+179, 0x3641c691d77005ec
+180, 0xbe70ed7efea8c24c
+181, 0x33047fa8d03ca560
+182, 0x3bed0d2221ff0f87
+183, 0x23083a6ffbcf38a2
+184, 0xc23eb827073d3fa5
+185, 0xc873bb3415e9fb9b
+186, 0xa4645179e54147fe
+187, 0x2c72fb443f66e207
+188, 0x98084915dd89d8f4
+189, 0x88baa2de12c99037
+190, 0x85c74ab238cb795f
+191, 0xe122186469ea3a26
+192, 0x4c3bba99b3249292
+193, 0x85d6845d9a015234
+194, 0x147ddd69c13e6a31
+195, 0x255f4d678c9a570b
+196, 0x2d7c0c410bf962b4
+197, 0x58eb7649e0aa16ca
+198, 0x9d240bf662fe0783
+199, 0x5f74f6fa32d293cc
+200, 0x4928e52f0f79d9b9
+201, 0xe61c2b87146b706d
+202, 0xcfcd90d100cf5431
+203, 0xf15ea8138e6aa178
+204, 0x6ab8287024f9a819
+205, 0xed8942593db74e01
+206, 0xefc00e4ec2ae36dd
+207, 0xc21429fb9387f334
+208, 0xf9a3389e285a9bce
+209, 0xacdee8c43aae49b3
+210, 0xefc382f02ad55c25
+211, 0x1153b50e8d406b72
+212, 0xb00d39ebcc2f89d8
+213, 0xde62f0b9831c8850
+214, 0xc076994662eef6c7
+215, 0x66f08f4752f1e3ef
+216, 0x283b90619796249a
+217, 0x4e4869bc4227499e
+218, 0xb45ad78a49efd7ed
+219, 0xffe19aa77abf5f4b
+220, 0xfce11a0daf913aef
+221, 0x7e4e64450d5cdceb
+222, 0xe9621997cfd62762
+223, 0x4d2c9e156868081
+224, 0x4e2d96eb7cc9a08
+225, 0xda74849bba6e3bd3
+226, 0x6f4621da935e7fde
+227, 0xb94b914aa0497259
+228, 0xd50d03e8b8db1563
+229, 0x1a45c1ce5dca422e
+230, 0xc8d30d33276f843f
+231, 0xb57245774e4176b4
+232, 0x8d36342c05abbbb1
+233, 0x3591ad893ecf9e78
+234, 0x62f4717239ee0ac8
+235, 0x9b71148a1a1d4200
+236, 0x65f8e0f56dd94463
+237, 0x453b1fcfd4fac8c2
+238, 0x4c25e48e54a55865
+239, 0xa866baa05112ace2
+240, 0x7741d3c69c6e79c5
+241, 0x7deb375e8f4f7a8a
+242, 0xc242087ede42abd8
+243, 0x2fa9d1d488750c4b
+244, 0xe8940137a935d3d3
+245, 0x1dab4918ca24b2f2
+246, 0xe2368c782168fe3e
+247, 0x6e8b2d1d73695909
+248, 0x70455ebea268b33e
+249, 0x656a919202e28da1
+250, 0x5a5a8935647da999
+251, 0x428c6f77e118c13c
+252, 0xa87aee2b675bb083
+253, 0x3873a6412b239969
+254, 0x5f72c1e91cb8a2ee
+255, 0xa25af80a1beb5679
+256, 0x1af65d27c7b4abc3
+257, 0x133437060670e067
+258, 0xb1990fa39a97d32e
+259, 0x724adc89ae10ed17
+260, 0x3f682a3f2363a240
+261, 0x29198f8dbd343499
+262, 0xdfaeeaa42bc51105
+263, 0x5baff3901b9480c2
+264, 0x3f760a67043e77f5
+265, 0x610fa7aa355a43ba
+266, 0x394856ac09c4f7a7
+267, 0x1d9229d058aee82e
+268, 0x19c674804c41aeec
+269, 0x74cf12372012f4aa
+270, 0xa5d89b353fa2f6ca
+271, 0x697e4f672ac363dd
+272, 0xde6f55ba73df5af9
+273, 0x679cf537510bd68f
+274, 0x3dc916114ae9ef7e
+275, 0xd7e31a66ec2ee7ba
+276, 0xc21bebb968728495
+277, 0xc5e0781414e2adfd
+278, 0x71147b5412ddd4bd
+279, 0x3b864b410625cca9
+280, 0x433d67c0036cdc6
+281, 0x48083afa0ae20b1b
+282, 0x2d80beecd64ac4e8
+283, 0x2a753c27c3a3ee3e
+284, 0xb2c5e6afd1fe051a
+285, 0xea677930cd66c46b
+286, 0x4c3960932f92810a
+287, 0xf1b367a9e527eaba
+288, 0xb7d92a8a9a69a98e
+289, 0x9f9ad3210bd6b453
+290, 0x817f2889db2dcbd8
+291, 0x4270a665ac15813c
+292, 0x90b85353bd2be4dd
+293, 0x10c0460f7b2d68d
+294, 0x11cef32b94f947f5
+295, 0x3cf29ed8e7d477e8
+296, 0x793aaa9bd50599ef
+297, 0xbac15d1190014aad
+298, 0x987944ae80b5cb13
+299, 0x460aa51f8d57c484
+300, 0xc77df0385f97c2d3
+301, 0x92e743b7293a3822
+302, 0xbc3458bcfbcbb8c0
+303, 0xe277bcf3d04b4ed7
+304, 0xa537ae5cf1c9a31c
+305, 0x95eb00d30bd8cfb2
+306, 0x6376361c24e4f2dd
+307, 0x374477fe87b9ea8e
+308, 0x8210f1a9a039902e
+309, 0xe7628f7031321f68
+310, 0x8b8e9c0888fc1d3d
+311, 0x306be461fdc9e0ed
+312, 0x510009372f9b56f5
+313, 0xa6e6fa486b7a027a
+314, 0x9d3f002025203b5a
+315, 0x7a46e0e81ecbef86
+316, 0x41e280c611d04df0
+317, 0xedcec10418a99e8a
+318, 0x5c27b6327e0b9dbd
+319, 0xa81ed2035b509f07
+320, 0x3581e855983a4cc4
+321, 0x4744594b25e9809d
+322, 0xc737ac7c27fbd0ed
+323, 0x1b523a307045433a
+324, 0x8b4ce9171076f1d9
+325, 0x2db02d817cd5eec0
+326, 0x24a1f1229af50288
+327, 0x5550c0dcf583ff16
+328, 0x3587baaa122ec422
+329, 0xf9d3dc894229e510
+330, 0xf3100430d5cf8e87
+331, 0xc31af79862f8e2fb
+332, 0xd20582063b9f3537
+333, 0xac5e90ac95fcc7ad
+334, 0x107c4c704d5109d4
+335, 0xebc8628906dbfd70
+336, 0x215242776da8c531
+337, 0xa98002f1dcf08b51
+338, 0xbc3bdc07f3b09718
+339, 0x238677062495b512
+340, 0x53b4796f2a3c49e8
+341, 0x6424286467e22f0e
+342, 0x14d0952a11a71bac
+343, 0x2f97098149b82514
+344, 0x3777f2fdc425ad2
+345, 0xa32f2382938876d4
+346, 0xda8a39a021f20ae3
+347, 0x364361ef0a6ac32c
+348, 0x4413eede008ff05a
+349, 0x8dda8ace851aa327
+350, 0x4303cabbdcecd1ee
+351, 0x2e69f06d74aa549f
+352, 0x4797079cd4d9275c
+353, 0xc7b1890917e98307
+354, 0x34031b0e822a4b4c
+355, 0xfc79f76b566303ea
+356, 0x77014adbe255a930
+357, 0xab6c43dd162f3be5
+358, 0xa430041f3463f6b9
+359, 0x5c191a32ada3f84a
+360, 0xe8674a0781645a31
+361, 0x3a11cb667b8d0916
+362, 0xaedc73e80c39fd8a
+363, 0xfde12c1b42328765
+364, 0x97abb7dcccdc1a0b
+365, 0x52475c14d2167bc8
+366, 0x540e8811196d5aff
+367, 0xa867e4ccdb2b4b77
+368, 0x2be04af61e5bcfb9
+369, 0x81b645102bfc5dfd
+370, 0x96a52c9a66c6450f
+371, 0x632ec2d136889234
+372, 0x4ed530c0b36a6c25
+373, 0x6f4851225546b75
+374, 0x2c065d6ba46a1144
+375, 0xf8a3613ff416551d
+376, 0xb5f0fd60e9c971a9
+377, 0x339011a03bb4be65
+378, 0x9439f72b6995ded6
+379, 0xc1b03f3ef3b2292d
+380, 0xad12fd221daab3ae
+381, 0xf615b770f2cf996f
+382, 0x269d0fdcb764172
+383, 0x67837025e8039256
+384, 0x6402831fc823fafa
+385, 0x22854146a4abb964
+386, 0x7b5ad9b5a1bad7a8
+387, 0x67170e7beb6ac935
+388, 0xfc2d1e8e24adfaaa
+389, 0x7ded4395345ff40d
+390, 0x418981760a80dd07
+391, 0xc03bef38022c1d2
+392, 0x3a11850b26eade29
+393, 0xaa56d02c7175c5f4
+394, 0xd83b7917b9bfbff5
+395, 0x3c1df2f8fa6fced3
+396, 0xf3d6e2999c0bb760
+397, 0xc66d683a59a950e3
+398, 0x8e3972a9d73ffabf
+399, 0x97720a0443edffd9
+400, 0xa85f5d2fe198444a
+401, 0xfc5f0458e1b0de5e
+402, 0xe3973f03df632b87
+403, 0xe151073c84c594b3
+404, 0x68eb4e22e7ff8ecf
+405, 0x274f36eaed7cae27
+406, 0x3b87b1eb60896b13
+407, 0xbe0b2f831442d70a
+408, 0x2782ed7a48a1b328
+409, 0xb3619d890310f704
+410, 0xb03926b11b55921a
+411, 0xdb46fc44aa6a0ce4
+412, 0x4b063e2ef2e9453a
+413, 0xe1584f1aeec60fb5
+414, 0x7092bd6a879c5a49
+415, 0xb84e1e7c7d52b0e6
+416, 0x29d09ca48db64dfb
+417, 0x8f6c4a402066e905
+418, 0x77390795eabc36b
+419, 0xcc2dc2e4141cc69f
+420, 0x2727f83beb9e3c7c
+421, 0x1b29868619331de0
+422, 0xd38c571e192c246f
+423, 0x535327479fe37b6f
+424, 0xaff9ce5758617eb3
+425, 0x5658539e9288a4e4
+426, 0x8df91d87126c4c6d
+427, 0xe931cf8fdba6e255
+428, 0x815dfdf25fbee9e8
+429, 0x5c61f4c7cba91697
+430, 0xdd5f5512fe2313a1
+431, 0x499dd918a92a53cd
+432, 0xa7e969d007c97dfd
+433, 0xb8d39c6fc81ac0bb
+434, 0x1d646983def5746c
+435, 0x44d4b3b17432a60c
+436, 0x65664232a14db1e3
+437, 0xda8fae6433e7500b
+438, 0xbe51b94ff2a3fe94
+439, 0xe9b1bd9a9098ef9f
+440, 0xfe47d54176297ef5
+441, 0xb8ab99bc03bb7135
+442, 0xcfad97f608565b38
+443, 0xf05da71f6760d9c1
+444, 0xef8da40a7c70e7b
+445, 0xe0465d58dbd5d138
+446, 0xb54a2d70eb1a938
+447, 0xfdd50c905958f2d8
+448, 0x3c41933c90a57d43
+449, 0x678f6d894c6ad0bb
+450, 0x403e8f4582274e8
+451, 0x5cbbe975668df6b0
+452, 0x297e6520a7902f03
+453, 0x8f6dded33cd1efd7
+454, 0x8e903c97be8d783b
+455, 0x10bd015577e30f77
+456, 0x3fcd69d1c36eab0c
+457, 0xb45989f3ca198d3
+458, 0x507655ce02b491a9
+459, 0xa92cf99bb78602ce
+460, 0xebfb82055fbc2f0f
+461, 0x3334256279289b7a
+462, 0xc19d2a0f740ee0ac
+463, 0x8bb070dea3934905
+464, 0xa4ab57d3a8d1b3eb
+465, 0xfee1b09bcacf7ff4
+466, 0xccc7fb41ceec41fa
+467, 0xd4da49094eb5a74d
+468, 0xed5c693770af02ed
+469, 0x369dabc9bbfaa8e4
+470, 0x7eab9f360d054199
+471, 0xe36dbebf5ee94076
+472, 0xd30840e499b23d7
+473, 0x8678e6cb545015ff
+474, 0x3a47932ca0b336e
+475, 0xeb7c742b6e93d6fe
+476, 0x1404ea51fe5a62a9
+477, 0xa72cd49db978e288
+478, 0xfd7bada020173dcf
+479, 0xc9e74fc7abe50054
+480, 0x93197847bb66808d
+481, 0x25fd5f053dce5698
+482, 0xe198a9b18cc21f4
+483, 0x5cc27b1689452d5d
+484, 0x8b3657af955a98dc
+485, 0xc17f7584f54aa1c0
+486, 0xe821b088246b1427
+487, 0x32b5a9f6b45b6fa0
+488, 0x2aef7c315c2bae0c
+489, 0xe1af8129846b705a
+490, 0x4123b4c091b34614
+491, 0x6999d61ec341c073
+492, 0x14b9a8fcf86831ea
+493, 0xfd4cff6548f46c9f
+494, 0x350c3b7e6cc8d7d6
+495, 0x202a5047fecafcd5
+496, 0xa82509fe496bb57d
+497, 0x835e4b2608b575fe
+498, 0xf3abe3da919f54ec
+499, 0x8705a21e2c9b8796
+500, 0xfd02d1427005c314
+501, 0xa38458faa637f49b
+502, 0x61622f2360e7622a
+503, 0xe89335a773c2963b
+504, 0x481264b659b0e0d0
+505, 0x1e82ae94ebf62f15
+506, 0x8ea7812de49209d4
+507, 0xff963d764680584
+508, 0x418a68bef717f4af
+509, 0x581f0e7621a8ab91
+510, 0x840337e9a0ec4150
+511, 0x951ef61b344be505
+512, 0xc8b1b899feb61ec2
+513, 0x8b78ca13c56f6ed9
+514, 0x3d2fd793715a946f
+515, 0xf1c04fabcd0f4084
+516, 0x92b602614a9a9fcc
+517, 0x7991bd7a94a65be7
+518, 0x5dead10b06cad2d7
+519, 0xda7719b33f722f06
+520, 0x9d87a722b7bff71e
+521, 0xb038e479071409e9
+522, 0xf4e8bbec48054775
+523, 0x4fec2cd7a28a88ea
+524, 0x839e28526aad3e56
+525, 0xd37ec57852a98bf0
+526, 0xdef2cbbe00f3a02d
+527, 0x1aecfe01a9e4d801
+528, 0x59018d3c8beaf067
+529, 0x892753e6ac8bf3cd
+530, 0xefdd3437023d2d1c
+531, 0x447bfbd148c8cb88
+532, 0x282380221bd442b8
+533, 0xfce8658d1347384a
+534, 0x60b211a7ec6bfa8
+535, 0xd21729cfcc692974
+536, 0x162087ecd5038a47
+537, 0x2b17000c4bce39d2
+538, 0x3a1f75ff6adcdce0
+539, 0x721a411d312f1a2c
+540, 0x9c13b6133f66934d
+541, 0xaa975d14978980e5
+542, 0x9403dbd4754203fa
+543, 0x588c15762fdd643
+544, 0xdd1290f8d0ada73a
+545, 0xd9b77380936103f4
+546, 0xb2e2047a356eb829
+547, 0x7019e5e7f76f7a47
+548, 0x3c29a461f62b001d
+549, 0xa07dc6cfab59c116
+550, 0x9b97e278433f8eb
+551, 0x6affc714e7236588
+552, 0x36170aeb32911a73
+553, 0x4a665104d364a789
+554, 0x4be01464ec276c9c
+555, 0x71bb10271a8b4ecf
+556, 0xbf62e1d068bc018
+557, 0xc9ada5db2cbbb413
+558, 0x2bded75e726650e5
+559, 0x33d5a7af2f34385d
+560, 0x8179c46661d85657
+561, 0x324ebcfd29267359
+562, 0xac4c9311dc9f9110
+563, 0xc14bb6a52f9f9c0
+564, 0xc430abe15e7fb9db
+565, 0xf1cce5c14df91c38
+566, 0x651e3efa2c0750d3
+567, 0x38a33604a8be5c75
+568, 0x7aaf77fe7ff56a49
+569, 0xc0d1cc56bbf27706
+570, 0x887aa47324e156c6
+571, 0x12547c004b085e8d
+572, 0xd86a8d6fbbbfd011
+573, 0x57c860188c92d7b4
+574, 0xcd5d3843d361b8ca
+575, 0x8f586ef05a9cb3ef
+576, 0x174456e1ba6267d5
+577, 0xf5dc302c62fe583c
+578, 0xa349442fabcdb71
+579, 0xe5123c1a8b6fd08e
+580, 0x80681552aa318593
+581, 0xb295396deaef1e31
+582, 0xabb626e0b900e32b
+583, 0xf024db8d3f19c15e
+584, 0x1d04bb9548e2fb6c
+585, 0xd8ed2b2214936c2b
+586, 0x618ca1e430a52bc9
+587, 0xccbca44a6088136b
+588, 0xd0481855c8b9ccbe
+589, 0x3c92a2fade28bdf7
+590, 0x855e9fefc38c0816
+591, 0x1269bbfe55a7b27c
+592, 0x1d6c853d83726d43
+593, 0xc8655511cc7fcafc
+594, 0x301503eb125a9b0e
+595, 0xb3108e4532016b11
+596, 0xbb7ab6245da9cb3d
+597, 0x18004c49116d85eb
+598, 0x3480849c20f61129
+599, 0xe28f45157463937b
+600, 0x8e85e61060f2ce1
+601, 0x1673da4ec589ba5e
+602, 0x74b9a6bd1b194712
+603, 0xed39e147fa8b7601
+604, 0x28ce54019102ca77
+605, 0x42e0347f6d7a2f30
+606, 0xb6a908d1c4814731
+607, 0x16c3435e4e9a126d
+608, 0x8880190514c1ad54
+609, 0xfffd86229a6f773c
+610, 0x4f2420cdb0aa1a93
+611, 0xf8e1acb4120fc1fa
+612, 0x63a8c553ab36a2f2
+613, 0x86b88cf3c0a6a190
+614, 0x44d8b2801622c792
+615, 0xf6eae14e93082ff1
+616, 0xd9ed4f5d1b8fac61
+617, 0x1808ce17f4e1f70
+618, 0x446e83ea336f262f
+619, 0xc7c802b04c0917b7
+620, 0x626f45fd64968b73
+621, 0x9ffa540edc9b2c5c
+622, 0xa96a1e219e486af8
+623, 0x2bb8963884e887a1
+624, 0xba7f68a5d029e3c4
+625, 0xefc45f44392d9ca0
+626, 0x98d77762503c5eab
+627, 0xd89bcf62f2da627c
+628, 0xa3cab8347f833151
+629, 0xa095b7595907d5c7
+630, 0x3b3041274286181
+631, 0xb518db8919eb71fa
+632, 0x187036c14fdc9a36
+633, 0xd06e28301e696f5d
+634, 0xdbc71184e0c56492
+635, 0xfe51e9cae6125bfd
+636, 0x3b12d17cd014df24
+637, 0x3b95e4e2c986ac1a
+638, 0x29c1cce59fb2dea2
+639, 0x58c05793182a49d6
+640, 0xc016477e330d8c00
+641, 0x79ef335133ada5d
+642, 0x168e2cad941203f3
+643, 0xf99d0f219d702ef0
+644, 0x655628068f8f135b
+645, 0xdcdea51910ae3f92
+646, 0x8e4505039c567892
+647, 0x91a9ec7e947c89ae
+648, 0x8717172530f93949
+649, 0x1c80aba9a440171a
+650, 0x9c8f83f6ebe7441e
+651, 0x6c05e1efea4aa7f9
+652, 0x10af696b777c01b
+653, 0x5892e9d9a92fc309
+654, 0xd2ba7da71e709432
+655, 0x46378c7c3269a466
+656, 0x942c63dfe18e772c
+657, 0x6245cf02ef2476f
+658, 0x6f265b2759ea2aea
+659, 0x5aa757f17d17f4a6
+660, 0x1ad6a3c44fa09be6
+661, 0xe861af14e7015fb8
+662, 0x86be2e7db388c77
+663, 0x5c7bba32b519e9a0
+664, 0x3feb314850c4437b
+665, 0x97955add60cfb45b
+666, 0xfdb536230a540bdc
+667, 0xdac9d7bf6e58512e
+668, 0x4894c00e474e8120
+669, 0xa1918a37739da366
+670, 0xa8097f2096532807
+671, 0x592afe50e6c5e643
+672, 0xd69050ee6dcb33dc
+673, 0xa6956b262dd3c561
+674, 0x1a55c815555e63f7
+675, 0x2ec7fd37516de2bb
+676, 0x8ec251d9c70e76ba
+677, 0x9b76e4abafd2689
+678, 0x9ce3f5c751a57df1
+679, 0x915c4818bf287bc7
+680, 0x2293a0d1fe07c735
+681, 0x7627dcd5d5a66d3d
+682, 0xb5e4f92cc49c7138
+683, 0x6fc51298731d268c
+684, 0xd19800aa95441f87
+685, 0x14f70f31162fa115
+686, 0x41a3da3752936f59
+687, 0xbec0652be95652ee
+688, 0x7aa4bdb1020a290f
+689, 0x4382d0d9bee899ef
+690, 0xe6d988ae4277d6ff
+691, 0xe618088ccb2a32d1
+692, 0x411669dfaa899e90
+693, 0x234e2bf4ba76d9f
+694, 0xe109fe4cb7828687
+695, 0x1fb96b5022b0b360
+696, 0x6b24ad76c061a716
+697, 0x7e1781d4d7ecee15
+698, 0xf20c2dbe82ba38ba
+699, 0xeda8e8ae1d943655
+700, 0xa58d196e2a77eaec
+701, 0x44564765a5995a0b
+702, 0x11902fe871ecae21
+703, 0x2ea60279900e675d
+704, 0x38427227c18a9a96
+705, 0xe0af01490a1b1b48
+706, 0x826f91997e057824
+707, 0x1e57308e6e50451
+708, 0xb42d469bbbfdc350
+709, 0xb9734cff1109c49b
+710, 0x98967559bb9d364f
+711, 0xd6be360041907c12
+712, 0xa86a1279122a1e21
+713, 0x26f99a8527bfc698
+714, 0xfa8b85758f28f5d6
+715, 0xe3057429940806ae
+716, 0x4bee2d7e84f93b2b
+717, 0x948350a76ea506f4
+718, 0xa139154488045e74
+719, 0x8893579ba5e78085
+720, 0x5f21c215c6a9e397
+721, 0x456134f3a59641dc
+722, 0x92c0273f8e97a9c6
+723, 0xd2936c9c3f0c6936
+724, 0xcfa4221e752c4735
+725, 0x28cd5a7457355dca
+726, 0xecdfdde23d90999f
+727, 0x60631b2d494d032b
+728, 0xf67289df269a827f
+729, 0xcbe8011ef0f5b7ef
+730, 0x20eea973c70a84f5
+731, 0xbe1fd200398557ce
+732, 0xd2279ee030191bba
+733, 0xf2bd4291dedaf819
+734, 0xfc6d167dbe8c402
+735, 0x39ac298da5d0044b
+736, 0xceac026f5f561ce
+737, 0x10a5b0bdd8ad60e6
+738, 0xdeb3c626df6d4bcb
+739, 0x3c128962e77ff6ca
+740, 0xc786262e9c67a0e5
+741, 0x4332855b3febcdc0
+742, 0x7bda9724d1c0e020
+743, 0x6a8c93399bc4df22
+744, 0xa9b20100ac707396
+745, 0xa11a3458502c4eb5
+746, 0xb185461c60478941
+747, 0x13131d56195b7ff6
+748, 0x8d55875ddbd4aa1c
+749, 0xc09b67425f469aa5
+750, 0x39e33786cc7594c4
+751, 0x75e96db8e4b08b93
+752, 0xda01cd12a3275d1e
+753, 0x2c49e7822344fab5
+754, 0x9bd5f10612514ca7
+755, 0x1c801a5c828e7332
+756, 0x29797d3f4f6c7b4c
+757, 0xac992715e21e4e53
+758, 0xe40e89ee887ddb37
+759, 0x15189a2b265a783b
+760, 0xa854159a52af5c5
+761, 0xb9d8a5a81c12bead
+762, 0x3240cdc9d59e2a58
+763, 0x1d0b872234cf8e23
+764, 0xc01224cf6ce12cff
+765, 0x2601e9f3905c8663
+766, 0xd4ecf9890168d6b4
+767, 0xa45db796d89bfdd5
+768, 0x9f389406dad64ab4
+769, 0xa5a851adce43ffe3
+770, 0xd0962c41c26e5aa9
+771, 0x8a671679e48510a4
+772, 0xc196dc0924a6bfeb
+773, 0x3ead661043b549cb
+774, 0x51af4ca737d405ac
+775, 0xf4425b5c62275fb6
+776, 0x71e69d1f818c10f5
+777, 0xacaf4af2d3c70162
+778, 0x2e1f1d4fd7524244
+779, 0xe54fdd8f388890e8
+780, 0xfda0d33e84eb2b83
+781, 0x53965c5e392b81da
+782, 0x5c92288267263097
+783, 0xcac1b431c878c66c
+784, 0x36c0e1cf417241c6
+785, 0x5cc4d9cd1a36bf2c
+786, 0x32e4257bb5d3e470
+787, 0x4aecff904adb44fb
+788, 0x4d91a8e0d1d60cac
+789, 0xa3b478388385b038
+790, 0x48d955f24eba70be
+791, 0x310e4deb07f24f68
+792, 0x8853e73b1f30a5a
+793, 0x278aee45c2a65c5
+794, 0xf6932eedbd62fb0b
+795, 0xafb95958c82fafad
+796, 0x78e807c18616c16c
+797, 0xd7abadda7488ed9f
+798, 0x2dd72e2572aa2ae6
+799, 0x6ec3791982c2be09
+800, 0x6865bb314fac478f
+801, 0xa14dc0ce09000d1a
+802, 0xb8081ad134da10f2
+803, 0xc4ac1534aa825ef5
+804, 0xd83aeb48ae2d538f
+805, 0x38052027e3074be4
+806, 0xa9833e06ef136582
+807, 0x4f02d790ec9fd78
+808, 0xec2f60bc711c5bdc
+809, 0x9253b0d12268e561
+810, 0xa8ac607fdd62c206
+811, 0x895e28ebc920289f
+812, 0xe2fd42b154243ac7
+813, 0xc69cac2f776eee19
+814, 0xf4d4ac11db56d0dc
+815, 0xa8d37049b9f39833
+816, 0x75abbf8a196c337c
+817, 0xb115bb76750d27b8
+818, 0x39426d187839154
+819, 0xd488423e7f38bf83
+820, 0xbb92e0c76ecb6a62
+821, 0x3055a018ce39f4e3
+822, 0xc93fe0e907729bfb
+823, 0x65985d17c5863340
+824, 0x2088ae081b2028e1
+825, 0x6e628de873314057
+826, 0x864377cccf573f0e
+827, 0xae03f4c9aa63d132
+828, 0xb1db766d6404c66d
+829, 0xdce5a22414a374b
+830, 0x622155b777819997
+831, 0x69fe96e620371f3c
+832, 0xa9c67dbc326d94fc
+833, 0x932a84ae5dd43bab
+834, 0xe2301a20f6c48c3f
+835, 0x795d2e79c6477300
+836, 0xd8e3e631289521e7
+837, 0xae2684979002dfd6
+838, 0xc9c2392377550f89
+839, 0xa1b0c99d508ef7ec
+840, 0x593aef3c5a5272ec
+841, 0xe32e511a4b7162cd
+842, 0xab3b81655f5a2857
+843, 0x1b535e1a0aaf053e
+844, 0x5b33f56c1b6a07e2
+845, 0x782dc8cfcac4ef36
+846, 0xb3d4f256eecfd202
+847, 0xf73a6598f58c4f7e
+848, 0xd5722189524870ae
+849, 0x707878de6b995fc0
+850, 0xc3eb6ba73e3d7e8a
+851, 0xca75c017655b75a7
+852, 0x1b29369ea3541e5f
+853, 0x352e98858bdb58a3
+854, 0x1e4412d184b6b27d
+855, 0x2d375ba0304b2d17
+856, 0x56c30fce69a5d08e
+857, 0x6b8c2b0c06584bda
+858, 0xde4dfff228c8c91f
+859, 0xb7c9edd574e6287f
+860, 0xf6078281c9fca2b2
+861, 0xb9b9a51de02a2f1e
+862, 0xa411bef31c0103b0
+863, 0xc5facd8fc5e1d7a3
+864, 0x54e631c05ddf7359
+865, 0x815b42b3fd06c474
+866, 0xc9ac07566fda18ec
+867, 0xd84ea62957bd8e15
+868, 0x5575f74b5cfd8803
+869, 0x5779a8d460c2e304
+870, 0xfd6e87e264a85587
+871, 0xa1d674daa320b26d
+872, 0x2c3c3ec64b35afc4
+873, 0x393a274ff03e6935
+874, 0x1f40ecbac52c50ea
+875, 0xc3de64fa324ffc0c
+876, 0x56ae828b7f9deb04
+877, 0xe7c1a77b5c1f2cb3
+878, 0xa4c4aab19ea921cc
+879, 0xec164c238825822c
+880, 0xa6a3304770c03b03
+881, 0x3a63641d5b1e8123
+882, 0x42677be3a54617ef
+883, 0xa2680423e3a200c0
+884, 0x8b17cf75f3f37277
+885, 0xe7ce65a49242be3d
+886, 0x7f85934271323e4b
+887, 0xcfb0f431f79a4fab
+888, 0x392e4041a8505b65
+889, 0xd3e5daf0d8b25ea6
+890, 0x9447eff675d80f53
+891, 0xea27a9d53cfaeea8
+892, 0xe3f2335945a83ba
+893, 0x8875a43ce216413b
+894, 0xe49941f9eabce33e
+895, 0x9357c1296683a5b1
+896, 0xf0f16439e81ee701
+897, 0x3181515295ffd79a
+898, 0x9d7150fffd169ed8
+899, 0x2d6a1d281e255a72
+900, 0x81bf1286fb3a92b6
+901, 0x566d3079b499e279
+902, 0xc7939ca8f047341
+903, 0xb1f8050e7c2d59f6
+904, 0x605701045e7be192
+905, 0x51b73360e8e31a1c
+906, 0x9f4ad54483ba9fe0
+907, 0xd3085b8fcf69d1c8
+908, 0xc3e7475026dc5f0b
+909, 0x5800f8554b157354
+910, 0x37dfdf858cfcd963
+911, 0x3a1fce05ce385072
+912, 0xf495c062645c20c3
+913, 0xdcbeec2c3492c773
+914, 0xc38f427589d1d0b4
+915, 0x681ead60216a8184
+916, 0x4bd569c40cc88c41
+917, 0x49b0d442e130b7a2
+918, 0xee349156b7d1fa3f
+919, 0x2bde2d2db055135b
+920, 0xc6a460d2fbcb2378
+921, 0xd0f170494ff3dbb
+922, 0xb294422492528a23
+923, 0xfc95873c854e7b86
+924, 0x6c9c3ad1797bb19c
+925, 0xe0c06f2aab65062d
+926, 0x58e32ce0f11e3a81
+927, 0xa745fcd729ff5036
+928, 0x599b249b2fc2cdb2
+929, 0x78f23b5b0dd5b082
+930, 0x6de3e957f549ecfc
+931, 0x9d0712fa6d878756
+932, 0x9076e8554e4a413a
+933, 0xf3185818c0294de8
+934, 0x5de7cdf4b455b9b6
+935, 0xb15f6908ed703f7d
+936, 0x98c654dfedc6818
+937, 0x120502ab0e93ae42
+938, 0x67966a98a58dc120
+939, 0x1caa0fc628989482
+940, 0xd8b2c3cd480a8625
+941, 0x85c70071b3aed671
+942, 0xff385f8473714662
+943, 0xe2868e4bf3773b63
+944, 0x96cf8019b279298e
+945, 0x8511cc930bd74800
+946, 0x5312e48fdd55f5ab
+947, 0xfcdae564b52df78d
+948, 0x9eee48373e652176
+949, 0x953788f6bcbc56b0
+950, 0xd1a3855dbd2f6b37
+951, 0x3ad32acf77f4d1e9
+952, 0x917c7be81b003e30
+953, 0x9ce817da1e2e9dfb
+954, 0x2968983db162d44d
+955, 0x1e005decef5828ad
+956, 0xc38fe59d1aa4f3d5
+957, 0xf357f1710dc02f1d
+958, 0x2613912a4c83ec67
+959, 0x832a11470b9a17cb
+960, 0x5e85508a611f0dad
+961, 0x2781131677f59d56
+962, 0xa82358d7d4b0237f
+963, 0xfbf8b3cc030c3af6
+964, 0x68b2f68ac8a55adb
+965, 0x3b6fcf353add0ada
+966, 0xd1956049bcd15bd5
+967, 0x95b76f31c7f98b6d
+968, 0x814b6690df971a84
+969, 0xdcf7959cddd819e4
+970, 0xcf8c72c5d804fc88
+971, 0x56883769c8945a22
+972, 0x1f034652f658cf46
+973, 0x41df1324cda235a1
+974, 0xeccd32524504a054
+975, 0x974e0910a04ec02c
+976, 0x72104507b821f6db
+977, 0x791f8d089f273044
+978, 0xe0f79a4f567f73c3
+979, 0x52fe5bea3997f024
+980, 0x5f8b9b446494f78
+981, 0xfd9f511947059190
+982, 0x3aea9dac6063bce3
+983, 0xbfdae4dfc24aee60
+984, 0xa82cdbbf0a280318
+985, 0xf460aae18d70aa9d
+986, 0x997367cb204a57c4
+987, 0x616e21ab95ba05ef
+988, 0x9bfc93bec116769f
+989, 0x2b2ee27c37a3fa5b
+990, 0xb25c6ed54006ee38
+991, 0xab04d4a5c69e69a5
+992, 0x6d2f6b45f2d8438f
+993, 0x4ad2f32afc82f092
+994, 0x513d718908f709c0
+995, 0x5272aadc4fffca51
+996, 0xeb3f87e66156ef5d
+997, 0xf8a3d5a46a86ba85
+998, 0xdb4548a86f27abfd
+999, 0x57c05f47ff62380d
diff --git a/numpy/random/tests/data/sfc64-testset-1.csv b/numpy/random/tests/data/sfc64-testset-1.csv
new file mode 100644
index 000000000..4fffe6959
--- /dev/null
+++ b/numpy/random/tests/data/sfc64-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0xa475f55fbb6bc638
+1, 0xb2d594b6c29d971c
+2, 0x275bc4ece4484fb1
+3, 0x569be72d9b3492fb
+4, 0x89a5bb9b206a670c
+5, 0xd951bfa06afdc3f9
+6, 0x7ee2e1029d52a265
+7, 0x12ef1d4de0cb4d4c
+8, 0x41658ba8f0ef0280
+9, 0x5b650c82e4fe09c5
+10, 0x638a9f3e30ec4e94
+11, 0x147487fb2ba9233e
+12, 0x89ef035603d2d1fb
+13, 0xe66ca57a190e6cbe
+14, 0x330f673740dd61fc
+15, 0xc71d3dce2f8bb34e
+16, 0x3c07c39ff150b185
+17, 0x5df952b6cae8f099
+18, 0x9f09f2b1f0ceac80
+19, 0x19598eee2d0c4c67
+20, 0x64e06483702e0ebd
+21, 0xda04d1fdb545f7fa
+22, 0xf2cf53b61a0c4f9b
+23, 0xf0bb724ce196f66e
+24, 0x71cefde55d9cf0f
+25, 0x6323f62824a20048
+26, 0x1e93604680f14b4e
+27, 0xd9d8fad1d4654025
+28, 0xf4ee25af2e76ca08
+29, 0x6af3325896befa98
+30, 0xad9e43abf5e04053
+31, 0xbf930e318ce09de3
+32, 0x61f9583b4f9ffe76
+33, 0x9b69d0b3d5ec8958
+34, 0xa608f250f9b2ca41
+35, 0x6fdba7073dc2bb5d
+36, 0xa9d57601efea6d26
+37, 0xc24a88a994954105
+38, 0xc728b1f78d88fe5b
+39, 0x88da88c2b083b3b2
+40, 0xa9e27f7303c76cfd
+41, 0xc4c24608c29176eb
+42, 0x5420b58466b972fd
+43, 0xd2018a661b6756c8
+44, 0x7caed83d9573fc7
+45, 0x562a3d81b849a06a
+46, 0x16588af120c21f2c
+47, 0x658109a7e0eb4837
+48, 0x877aabb14d3822e1
+49, 0x95704c342c3745fe
+50, 0xeeb8a0dc81603616
+51, 0x431bf94889290419
+52, 0xe4a9410ab92a5863
+53, 0xbc6be64ea60f12ba
+54, 0x328a2da920015063
+55, 0x40f6b3bf8271ae07
+56, 0x4068ff00a0e854f8
+57, 0x1b287572ca13fa78
+58, 0xa11624a600490b99
+59, 0x4a04ef29eb7150fa
+60, 0xcc9469ab5ffb739
+61, 0x99a6a9f8d95e782
+62, 0x8e90356573e7a070
+63, 0xa740b8fb415c81c4
+64, 0x47eccef67447f3da
+65, 0x2c720afe3a62a49b
+66, 0xe2a747f0a43eacf4
+67, 0xba063a87ab165576
+68, 0xbc1c78ed27feb5a3
+69, 0x285a19fa3974f9d
+70, 0x489c61e704f5f0e3
+71, 0xf5ab04f6b03f238b
+72, 0x7e25f88138a110dd
+73, 0xc3d1cef3d7c1f1d1
+74, 0xc3de6ec64d0d8e00
+75, 0x73682a15b6cc5088
+76, 0x6fecbeb319163dc5
+77, 0x7e100d5defe570a1
+78, 0xad2af9af076dce57
+79, 0x3c65100e23cd3a9a
+80, 0x4b442cc6cfe521bb
+81, 0xe89dc50f8ab1ef75
+82, 0x8b3c6fdc2496566
+83, 0xdfc50042bc2c308c
+84, 0xe39c5f158b33d2b2
+85, 0x92f6adefdfeb0ac
+86, 0xdf5808a949c85b3e
+87, 0x437384021c9dace9
+88, 0xa7b5ed0d3d67d8f
+89, 0xe1408f8b21da3c34
+90, 0xa1bba125c1e80522
+91, 0x7611dc4710385264
+92, 0xb00a46ea84082917
+93, 0x51bf8002ffa87cef
+94, 0x9bb81013e9810adc
+95, 0xd28f6600013541cd
+96, 0xc2ca3b1fa7791c1f
+97, 0x47f9ad58f099c82c
+98, 0x4d1bb9458469caf9
+99, 0xca0b165b2844257
+100, 0xc3b2e667d075dc66
+101, 0xde22f71136a3dbb1
+102, 0x23b4e3b6f219e4c3
+103, 0x327e0db4c9782f66
+104, 0x9365506a6c7a1807
+105, 0x3e868382dedd3be7
+106, 0xff04fa6534bcaa99
+107, 0x96621a8862995305
+108, 0x81bf39cb5f8e1df7
+109, 0x79b684bb8c37af7a
+110, 0xae3bc073c3cde33c
+111, 0x7805674112c899ac
+112, 0xd95a27995abb20f2
+113, 0x71a503c57b105c40
+114, 0x5ff00d6a73ec8acc
+115, 0x12f96391d91e47c2
+116, 0xd55ca097b3bd4947
+117, 0x794d79d20468b04
+118, 0x35d814efb0d7a07d
+119, 0xfa9ac9bd0aae76d3
+120, 0xa77b8a3711e175cd
+121, 0xe6694fbf421f9489
+122, 0xd8f1756525a1a0aa
+123, 0xe38dfa8426277433
+124, 0x16b640c269bbcd44
+125, 0x2a7a5a67ca24cfeb
+126, 0x669039c28d5344b4
+127, 0x2a445ee81fd596bb
+128, 0x600df94cf25607e0
+129, 0x9358561a7579abff
+130, 0xee1d52ea179fc274
+131, 0x21a8b325e89d31be
+132, 0x36fc0917486eec0a
+133, 0x3d99f40717a6be9f
+134, 0x39ac140051ca55ff
+135, 0xcef7447c26711575
+136, 0xf22666870eff441d
+137, 0x4a53c6134e1c7268
+138, 0xd26de518ad6bdb1b
+139, 0x1a736bf75b8b0e55
+140, 0xef1523f4e6bd0219
+141, 0xb287b32fd615ad92
+142, 0x2583d6af5e841dd5
+143, 0x4b9294aae7ca670c
+144, 0xf5aa4a84174f3ca9
+145, 0x886300f9e0dc6376
+146, 0x3611401e475ef130
+147, 0x69b56432b367e1ac
+148, 0x30c330e9ab36b7c4
+149, 0x1e0e73079a85b8d5
+150, 0x40fdfc7a5bfaecf
+151, 0xd7760f3e8e75a085
+152, 0x1cc1891f7f625313
+153, 0xeece1fe6165b4272
+154, 0xe61111b0c166a3c1
+155, 0x2f1201563312f185
+156, 0xfd10e8ecdd2a57cb
+157, 0x51cdc8c9dd3a89bf
+158, 0xed13cc93938b5496
+159, 0x843816129750526b
+160, 0xd09995cd6819ada
+161, 0x4601e778d40607df
+162, 0xef9df06bd66c2ea0
+163, 0xae0bdecd3db65d69
+164, 0xbb921a3c65a4ae9a
+165, 0xd66698ce8e9361be
+166, 0xacdc91647b6068f4
+167, 0xe505ef68f2a5c1c0
+168, 0xd6e62fd27c6ab137
+169, 0x6a2ba2c6a4641d86
+170, 0x9c89143715c3b81
+171, 0xe408c4e00362601a
+172, 0x986155cbf5d4bd9d
+173, 0xb9e6831728c893a7
+174, 0xb985497c3bf88d8c
+175, 0xd0d729214b727bec
+176, 0x4e557f75fece38a
+177, 0x6572067fdfd623ca
+178, 0x178d49bb4d5cd794
+179, 0xe6baf59f60445d82
+180, 0x5607d53518e3a8d2
+181, 0xba7931adb6ebbd61
+182, 0xe853576172611329
+183, 0xe945daff96000c44
+184, 0x565b9ba3d952a176
+185, 0xcdb54d4f88c584c8
+186, 0x482a7499bee9b5e5
+187, 0x76560dd0affe825b
+188, 0x2a56221faa5ca22c
+189, 0x7729be5b361f5a25
+190, 0xd6f2195795764876
+191, 0x59ef7f8f423f18c5
+192, 0x7ebefed6d02adde1
+193, 0xcfec7265329c73e5
+194, 0x4fd8606a5e59881c
+195, 0x95860982ae370b73
+196, 0xdecfa33b1f902acc
+197, 0xf9b8a57400b7c0a6
+198, 0xd20b822672ec857b
+199, 0x4eb81084096c7364
+200, 0xe535c29a44d9b6ad
+201, 0xdef8b48ebacb2e29
+202, 0x1063bc2b8ba0e915
+203, 0xe4e837fb53d76d02
+204, 0x4df935db53579fb8
+205, 0xa30a0c8053869a89
+206, 0xe891ee58a388a7b5
+207, 0x17931a0c64b8a985
+208, 0xaf2d350b494ce1b3
+209, 0x2ab9345ffbcfed82
+210, 0x7de3fe628a2592f0
+211, 0x85cf54fab8b7e79d
+212, 0x42d221520edab71b
+213, 0x17b695b3af36c233
+214, 0xa4ffe50fe53eb485
+215, 0x1102d242db800e4d
+216, 0xc8dc01f0233b3b6
+217, 0x984a030321053d36
+218, 0x27fa8dc7b7112c0e
+219, 0xba634dd8294e177f
+220, 0xe67ce34b36332eb
+221, 0x8f1351e1894fb41a
+222, 0xb522a3048761fd30
+223, 0xc350ad9bc6729edc
+224, 0xe0ed105bd3c805e1
+225, 0xa14043d2b0825aa7
+226, 0xee7779ce7fc11fdf
+227, 0xc0fa8ba23a60ab25
+228, 0xb596d1ce259afbad
+229, 0xaa9b8445537fdf62
+230, 0x770ab2c700762e13
+231, 0xe812f1183e40cc1
+232, 0x44bc898e57aefbbd
+233, 0xdd8a871df785c996
+234, 0x88836a5e371eb36b
+235, 0xb6081c9152623f27
+236, 0x895acbcd6528ca96
+237, 0xfb67e33ddfbed435
+238, 0xaf7af47d323ce26
+239, 0xe354a510c3c39b2d
+240, 0x5cacdedda0672ba3
+241, 0xa440d9a2c6c22b09
+242, 0x6395099f48d64304
+243, 0xc11cf04c75f655b5
+244, 0x1c4e054d144ddb30
+245, 0x3e0c2db89d336636
+246, 0x127ecf18a5b0b9a7
+247, 0x3b50551a88ea7a73
+248, 0xbd27003e47f1f684
+249, 0xf32d657782baac9b
+250, 0x727f5cabf020bc9
+251, 0x39c1c1c226197dc7
+252, 0x5552c87b35deeb69
+253, 0x64d54067b5ce493f
+254, 0x3494b091fe28dda0
+255, 0xdf0278bc85ee2965
+256, 0xdef16fec25efbd66
+257, 0xe2be09f578c4ce28
+258, 0xd27a9271979d3019
+259, 0x427f6fcd71845e3
+260, 0x26b52c5f81ec142b
+261, 0x98267efc3986ad46
+262, 0x7bf4165ddb7e4374
+263, 0xd05f7996d7941010
+264, 0x3b3991de97b45f14
+265, 0x9068217fb4f27a30
+266, 0xd8fe295160afc7f3
+267, 0x8a159fab4c3bc06f
+268, 0x57855506d19080b6
+269, 0x7636df6b3f2367a4
+270, 0x2844ee3abd1d5ec9
+271, 0xe5788de061f51c16
+272, 0x69e78cc9132a164
+273, 0xacd53cde6d8cd421
+274, 0xb23f3100068e91da
+275, 0x4140070a47f53891
+276, 0xe4a422225a96e53a
+277, 0xb82a8925a272a2ac
+278, 0x7c2f9573590fe3b7
+279, 0xbaf80764db170575
+280, 0x955abffa54358368
+281, 0x355ce7460614a869
+282, 0x3700ede779a4afbf
+283, 0x10a6ec01d92d68cd
+284, 0x3308f5a0a4c0afef
+285, 0x97b892d7601136c9
+286, 0x4955c3b941b8552e
+287, 0xca85aa67e941961d
+288, 0xb1859ae5db28e9d2
+289, 0x305d072ac1521fbd
+290, 0xed52a868996085bb
+291, 0x723bfa6a76358852
+292, 0x78d946ecd97c5fb3
+293, 0x39205b30a8e23e79
+294, 0xb927e3d086baadbe
+295, 0xa18d6946136e1ff5
+296, 0xdab6f0b51c1eb5ff
+297, 0xf0a640bf7a1af60c
+298, 0xf0e81db09004d0d4
+299, 0xfe76cebdbe5a4dde
+300, 0x2dafe9cc3decc376
+301, 0x4c871fdf1af34205
+302, 0xe79617d0c8fa893b
+303, 0xee658aaad3a141f7
+304, 0xfd91aa74863e19f1
+305, 0x841b8f55c103cc22
+306, 0x22766ed65444ad5d
+307, 0x56d03d1beca6c17a
+308, 0x5fd4c112c92036ae
+309, 0x75466ae58a5616dc
+310, 0xfbf98b1081e802a9
+311, 0xdc325e957bf6d8f5
+312, 0xb08da7015ebd19b7
+313, 0xf25a9c0944f0c073
+314, 0xf4625bafa0ced718
+315, 0x4349c9e093a9e692
+316, 0x75a9ccd4dd8935cb
+317, 0x7e6cf9e539361e91
+318, 0x20fdd22fb6edd475
+319, 0x5973021b57c2311f
+320, 0x75392403667edc15
+321, 0xed9b2156ea70d9f1
+322, 0xf40c114db50b64a0
+323, 0xe26bb2c9eef20c62
+324, 0x409c1e3037869f03
+325, 0xcdfd71fdda3b7f91
+326, 0xa0dfae46816777d6
+327, 0xde060a8f61a8deb8
+328, 0x890e082a8b0ca4fc
+329, 0xb9f2958eddf2d0db
+330, 0xd17c148020d20e30
+331, 0xffdc9cc176fe7201
+332, 0xffb83d925b764c1
+333, 0x817ea639e313da8d
+334, 0xa4dd335dd891ca91
+335, 0x1342d25a5e81f488
+336, 0xfa7eb9c3cf466b03
+337, 0xfe0a423d44b185d0
+338, 0x101cfd430ab96049
+339, 0x7b5d3eda9c4504b
+340, 0xe20ccc006e0193f1
+341, 0xf54ccddedebc5df0
+342, 0xc0edd142bd58f1db
+343, 0x3831f40d378d2430
+344, 0x80132353f0a88289
+345, 0x688f23c419d03ef8
+346, 0x4c6837e697884066
+347, 0x699387bb2e9a3a8f
+348, 0x8996f860342448d8
+349, 0xb0f80dff99bfa5cc
+350, 0x3e927a7f9ea12c8e
+351, 0xd7e498d1e5f9dff3
+352, 0x78ecb97bb3f864cc
+353, 0x3c4ffd069a014d38
+354, 0xf8d5073a1e09b4d4
+355, 0x8717e854f9faef23
+356, 0xfbcc5478d8d0ad7
+357, 0xd3cd8b233ca274ff
+358, 0x8bd8f11f79beb265
+359, 0xf64498a832d8fd0e
+360, 0xb01bba75112131ec
+361, 0x55572445a7869781
+362, 0x7b56622f18cb3d7a
+363, 0x7f192c9e075bdb83
+364, 0xd9a112f836b83ff3
+365, 0x68673b37269653dc
+366, 0xe46a9433fb6a0879
+367, 0x127d756ca4779001
+368, 0xc1378e8b1e8eab94
+369, 0x1006edb0f51d078c
+370, 0xc6dd53961232d926
+371, 0x9a4aeef44038256d
+372, 0xd357f4fa652d4f5f
+373, 0x59f3d2cc3378598
+374, 0xe76e6207a824a7fc
+375, 0x5fc5e33712ceffef
+376, 0x77d24aeb0ccb1adc
+377, 0x5be4b2826805659e
+378, 0x257c69d787e64634
+379, 0x58dd52ca6bc727b1
+380, 0x3ab997767235ea33
+381, 0x986a2a7a966fad14
+382, 0xc900f8b27761dcc4
+383, 0x44991bdb13795700
+384, 0xe5c145a4fe733b2
+385, 0x56f041b56bffe0d3
+386, 0x5779c4fef8067996
+387, 0xa0fe8748e829532d
+388, 0x840c1277d78d9dd4
+389, 0x37ebcb315432acbc
+390, 0xf4bc8738433ba3be
+391, 0x8b122993f2e10062
+392, 0xe1fe8481f2681ed5
+393, 0x8e23f1630d9f494a
+394, 0xda24661a01b7d0b3
+395, 0x7a02942a179cee36
+396, 0xf1e08a3c09b71ac
+397, 0x3dec2cc7ee0bd8fd
+398, 0x1f3e480113d805d4
+399, 0xc061b973ad4e3f2c
+400, 0x6bea750f17a66836
+401, 0xbc2add72eac84c25
+402, 0xcff058d3f97934ca
+403, 0x54ccc30987778ec2
+404, 0x93449ec1e1469558
+405, 0xe2ff369eb0c6836
+406, 0x41c2df2d63bf8e55
+407, 0xf9302629b6c71be2
+408, 0xdd30376b8e5ab29a
+409, 0x12db9e04f911d754
+410, 0x8d03d6cd359f1b97
+411, 0xe15956511abf1cee
+412, 0x9b68e10e2c2fd940
+413, 0x2e28de6491c1ce53
+414, 0x52b329b72d0c109d
+415, 0xc2c0b115f9da2a60
+416, 0x6ca084105271bbff
+417, 0x49b92b8676058c1e
+418, 0x767fc92a70f7e5a3
+419, 0x87ba4ed4b65a6aa0
+420, 0xf70b052e0a3975e9
+421, 0x3e925c3306db9eec
+422, 0x43253f1d96ac9513
+423, 0xe3e04f1a1ea454c4
+424, 0x763e3f4cc81ba0c8
+425, 0x2a2721ac69265705
+426, 0xdf3b0ac6416ea214
+427, 0xa6a6b57450f3e000
+428, 0xc3d3b1ac7dbfe6ac
+429, 0xb66e5e6f7d2e4ec0
+430, 0x43c65296f98f0f04
+431, 0xdb0f6e3ff974d842
+432, 0x3d6b48e02ebb203b
+433, 0xd74674ebf09d8f27
+434, 0xbe65243c58fc1200
+435, 0x55eb210a68d42625
+436, 0x87badab097dbe883
+437, 0xada3fda85a53824f
+438, 0xef2791e8f48cd37a
+439, 0x3fe7fceb927a641a
+440, 0xd3bffd3ff031ac78
+441, 0xb94efe03da4d18fb
+442, 0x162a0ad8da65ea68
+443, 0x300f234ef5b7e4a6
+444, 0xa2a8b4c77024e4fb
+445, 0x5950f095ddd7b109
+446, 0xded66dd2b1bb02ba
+447, 0x8ec24b7fa509bcb6
+448, 0x9bede53d924bdad6
+449, 0xa9c3f46423be1930
+450, 0x6dfc90597f8de8b4
+451, 0xb7419ebc65b434f0
+452, 0xa6596949238f58b9
+453, 0x966cbade640829b8
+454, 0x58c74877bdcbf65e
+455, 0xaa103b8f89b0c453
+456, 0x219f0a86e41179a4
+457, 0x90f534fc06ddc57f
+458, 0x8db7cdd644f1affa
+459, 0x38f91de0167127ac
+460, 0xdcd2a65e4df43daa
+461, 0x3e04f34a7e01f834
+462, 0x5b237eea68007768
+463, 0x7ff4d2b015921768
+464, 0xf786b286549d3d51
+465, 0xaefa053fc2c3884c
+466, 0x8e6a8ff381515d36
+467, 0x35b94f3d0a1fce3c
+468, 0x165266d19e9abb64
+469, 0x1deb5caa5f9d8076
+470, 0x13ab91290c7cfe9d
+471, 0x3651ca9856be3e05
+472, 0xe7b705f6e9cccc19
+473, 0xd6e7f79668c127ed
+474, 0xa9faf37154896f92
+475, 0x89fbf190603e0ab1
+476, 0xb34d155a86f942d0
+477, 0xb2d4400a78bfdd76
+478, 0x7c0946aca8cfb3f0
+479, 0x7492771591c9d0e8
+480, 0xd084d95c5ca2eb28
+481, 0xb18d12bd3a6023e
+482, 0xea217ed7b864d80b
+483, 0xe52f69a755dd5c6f
+484, 0x127133993d81c4aa
+485, 0xe07188fcf1670bfb
+486, 0x178fbfe668e4661d
+487, 0x1c9ee14bb0cda154
+488, 0x8d043b96b6668f98
+489, 0xbc858986ec96ca2b
+490, 0x7660f779d528b6b7
+491, 0xd448c6a1f74ae1d3
+492, 0x178e122cfc2a6862
+493, 0x236f000abaf2d23b
+494, 0x171b27f3f0921915
+495, 0x4c3ff07652f50a70
+496, 0x18663e5e7d3a66ca
+497, 0xb38c97946c750cc9
+498, 0xc5031aae6f78f909
+499, 0x4d1514e2925e95c1
+500, 0x4c2184a741dabfbb
+501, 0xfd410364edf77182
+502, 0xc228157f863ee873
+503, 0x9856fdc735cc09fc
+504, 0x660496cd1e41d60e
+505, 0x2edf1d7e01954c32
+506, 0xd32e94639bdd98cf
+507, 0x8e153f48709a77d
+508, 0x89357f332d2d6561
+509, 0x1840d512c97085e6
+510, 0x2f18d035c9e26a85
+511, 0x77b88b1448b26d5b
+512, 0xc1ca6ef4cdae0799
+513, 0xcc203f9e4508165f
+514, 0xeaf762fbc9e0cbbe
+515, 0xc070c687f3c4a290
+516, 0xd49ed321068d5c15
+517, 0x84a55eec17ee64ee
+518, 0x4d8ee685298a8871
+519, 0x9ff5f17d7e029793
+520, 0x791d7d0d62e46302
+521, 0xab218b9114e22bc6
+522, 0x4902b7ab3f7119a7
+523, 0x694930f2e29b049e
+524, 0x1a3c90650848999f
+525, 0x79f1b9d8499c932b
+526, 0xfacb6d3d55e3c92f
+527, 0x8fd8b4f25a5da9f5
+528, 0xd037dcc3a7e62ae7
+529, 0xfecf57300d8f84f4
+530, 0x32079b1e1dc12d48
+531, 0xe5f8f1e62b288f54
+532, 0x97feba3a9c108894
+533, 0xd279a51e1899a9a0
+534, 0xd68eea8e8e363fa8
+535, 0x7394cf2deeca9386
+536, 0x5f70b0c80f1dbf10
+537, 0x8d646916ed40462
+538, 0xd253bb1c8a12bbb6
+539, 0x38f399a821fbd73e
+540, 0x947523a26333ac90
+541, 0xb52e90affbc52a37
+542, 0xcf899cd964654da4
+543, 0xdf66ae9cca8d99e7
+544, 0x6051478e57c21b6a
+545, 0xffa7dc975af3c1da
+546, 0x195c7bff2d1a8f5
+547, 0x64f12b6575cf984d
+548, 0x536034cb842cf9e1
+549, 0x180f247ce5bbfad
+550, 0x8ced45081b134867
+551, 0x532bbfdf426710f3
+552, 0x4747933e74c4f54d
+553, 0x197a890dc4793401
+554, 0x76c7cc2bd42fae2
+555, 0xdabfd67f69675dd0
+556, 0x85c690a68cdb3197
+557, 0xe482cec89ce8f92
+558, 0x20bc9fb7797011b1
+559, 0x76dc85a2185782ad
+560, 0x3df37c164422117a
+561, 0x99211f5d231e0ab0
+562, 0xef7fd794a0a91f4
+563, 0x419577151915f5fe
+564, 0x3ce14a0a7135dae3
+565, 0x389b57598a075d6a
+566, 0x8cc2a9d51b5af9aa
+567, 0xe80a9beffbd13f13
+568, 0x65e96b22ea8a54d8
+569, 0x79f38c4164138ede
+570, 0xd1955846cba03d81
+571, 0x60359fe58e4f26d6
+572, 0x4ea724f585f8d13e
+573, 0x316dfdbadc801a3c
+574, 0x20aa29b7c6dd66fe
+575, 0x65eaf83a6a008caa
+576, 0x407000aff1b9e8cb
+577, 0xb4d49bfb2b268c40
+578, 0xd4e6fe8a7a0f14a9
+579, 0xe34afef924e8f58e
+580, 0xe377b0c891844824
+581, 0x29c2e20c112d30c8
+582, 0x906aad1fe0c18a95
+583, 0x308385f0efbb6474
+584, 0xf23900481bf70445
+585, 0xfdfe3ade7f937a55
+586, 0xf37aae71c33c4f97
+587, 0x1c81e3775a8bed85
+588, 0x7eb5013882ce35ea
+589, 0x37a1c1692495818d
+590, 0x3f90ae118622a0ba
+591, 0x58e4fe6fea29b037
+592, 0xd10ff1d269808825
+593, 0xbce30edb60c21bba
+594, 0x123732329afd6fee
+595, 0x429b4059f797d840
+596, 0x421166568a8c4be1
+597, 0x88f895c424c1bd7f
+598, 0x2adaf7a7b9f781cb
+599, 0xa425644b26cb698
+600, 0x8cc44d2486cc5743
+601, 0xdb9f357a33abf6ba
+602, 0x1a57c4ea77a4d70c
+603, 0x1dea29be75239e44
+604, 0x463141a137121a06
+605, 0x8fecfbbe0b8a9517
+606, 0x92c83984b3566123
+607, 0x3b1c69180ed28665
+608, 0x14a6073425ea8717
+609, 0x71f4c2b3283238d7
+610, 0xb3d491e3152f19f
+611, 0x3a0ba3a11ebac5d2
+612, 0xddb4d1dd4c0f54ac
+613, 0xdb8f36fe02414035
+614, 0x1cf5df5031b1902c
+615, 0x23a20ed12ef95870
+616, 0xf113e573b2dedcbb
+617, 0x308e2395cde0a9fa
+618, 0xd377a22581c3a7da
+619, 0xe0ced97a947a66fb
+620, 0xe44f4de9cd754b00
+621, 0x2344943337d9d1bf
+622, 0x4b5ae5e2ea6e749c
+623, 0x9b8d2e3ef41d1c01
+624, 0x59a5a53ebbd24c6b
+625, 0x4f7611bf9e8a06fb
+626, 0xea38c7b61361cd06
+627, 0xf125a2bfdd2c0c7
+628, 0x2df8dcb5926b9ebb
+629, 0x233e18720cc56988
+630, 0x974c61379b4aa95e
+631, 0xc7fe24c1c868910b
+632, 0x818fd1affc82a842
+633, 0xcee92a952a26d38e
+634, 0x8962f575ebcbf43
+635, 0x7770687e3678c460
+636, 0xdfb1db4ed1298117
+637, 0xb9db54cb03d434d3
+638, 0x34aebbf2244257ad
+639, 0xd836db0cb210c490
+640, 0x935daed7138957cd
+641, 0x3cd914b14e7948fd
+642, 0xd0472e9ed0a0f7f0
+643, 0xa9df33dca697f75e
+644, 0x15e9ea259398721a
+645, 0x23eeba0f970abd60
+646, 0x2217fdf8bbe99a12
+647, 0x5ea490a95717b198
+648, 0xf4e2bfc28280b639
+649, 0x9d19916072d6f05c
+650, 0x5e0387cab1734c6a
+651, 0x93c2c8ac26e5f01e
+652, 0xb0d934354d957eb1
+653, 0xee5099a1eef3188c
+654, 0x8be0abca8edc1115
+655, 0x989a60845dbf5aa3
+656, 0x181c7ed964eee892
+657, 0x49838ea07481288d
+658, 0x17dbc75d66116b2e
+659, 0xa4cafb7a87c0117e
+660, 0xab2d0ae44cdc2e6e
+661, 0xdf802f2457e7da6
+662, 0x4b966c4b9187e124
+663, 0x62de9db6f4811e1a
+664, 0x1e20485968bc62
+665, 0xe9ac288265caca94
+666, 0xc5c694d349aa8c1a
+667, 0x3d67f2083d9bdf10
+668, 0x9a2468e503085486
+669, 0x9d6acd3dc152d1a3
+670, 0xca951e2aeee8df77
+671, 0x2707371af9cdd7b0
+672, 0x2347ae6a4eb5ecbd
+673, 0x16abe5582cb426f
+674, 0x523af4ff980bbccb
+675, 0xb07a0f043e3694aa
+676, 0x14d7c3da81b2de7
+677, 0xf471f1b8ac22305b
+678, 0xdb087ffff9e18520
+679, 0x1a352db3574359e8
+680, 0x48d5431502cc7476
+681, 0x7c9b7e7003dfd1bf
+682, 0x4f43a48aae987169
+683, 0x9a5d3eb66dedb3e9
+684, 0xa7b331af76a9f817
+685, 0xba440154b118ab2d
+686, 0x64d22344ce24c9c6
+687, 0xa22377bd52bd043
+688, 0x9dfa1bb18ca6c5f7
+689, 0xdccf44a92f644c8b
+690, 0xf623d0a49fd18145
+691, 0x556d5c37978e28b3
+692, 0xad96e32ce9d2bb8b
+693, 0x2e479c120be52798
+694, 0x7501cf871af7b2f7
+695, 0xd02536a5d026a5b8
+696, 0x4b37ff53e76ab5a4
+697, 0xdb3a4039caaeab13
+698, 0x6cbd65e3b700c7be
+699, 0x7367abd98761a147
+700, 0xf4f9ba216a35aa77
+701, 0xf88ca25ce921eb86
+702, 0xb211de082ec2cbf2
+703, 0xdd94aa46ec57e12e
+704, 0xa967d74ad8210240
+705, 0xdaa1fada8cfa887
+706, 0x85901d081c4488ee
+707, 0xcf67f79a699ef06
+708, 0x7f2f1f0de921ee14
+709, 0x28bc61e9d3f2328b
+710, 0x3332f2963faf18e5
+711, 0x4167ac71fcf43a6
+712, 0x843c1746b0160b74
+713, 0xd9be80070c578a5e
+714, 0xbd7250c9af1473e7
+715, 0x43f78afaa3647899
+716, 0x91c6b5dd715a75a5
+717, 0x29cc66c8a07bfef3
+718, 0x3f5c667311dc22be
+719, 0x4f49cd47958260cd
+720, 0xbef8be43d920b64e
+721, 0x7a892a5f13061d8b
+722, 0x9532f40125c819b1
+723, 0x924fca3045f8a564
+724, 0x9b2c6442453b0c20
+725, 0x7e21009085b8e793
+726, 0x9b98c17e17af59d2
+727, 0xba61acb73e3ae89a
+728, 0xb9d61a710555c138
+729, 0xc2a425d80978974b
+730, 0xa275e13592da7d67
+731, 0xe962103202d9ad0f
+732, 0xbdf8367a4d6f33fd
+733, 0xe59beb2f8648bdc8
+734, 0xb4c387d8fbc4ac1c
+735, 0x5e3f276b63054b75
+736, 0xf27e616aa54d8464
+737, 0x3f271661d1cd7426
+738, 0x43a69dbee7502c78
+739, 0x8066fcea6df059a1
+740, 0x3c10f19409bdc993
+741, 0x6ba6f43fb21f23e0
+742, 0x9e182d70a5bccf09
+743, 0x1520783d2a63a199
+744, 0xba1dcc0c70b9cace
+745, 0x1009e1e9b1032d8
+746, 0xf632f6a95fb0315
+747, 0x48e711c7114cbfff
+748, 0xef281dcec67debf7
+749, 0x33789894d6abf59b
+750, 0x6c8e541fffbe7f9c
+751, 0x85417f13b08e0a88
+752, 0x9a581e36d589608f
+753, 0x461dca50b1befd35
+754, 0x5a3231680dde6462
+755, 0xcc57acf729780b97
+756, 0x50301efef62e1054
+757, 0x675d042cd4f6bbc9
+758, 0x1652fdd3794384c9
+759, 0x1c93bbeeb763cd4d
+760, 0x44b7240c4b105242
+761, 0x4c6af2a1b606ccfb
+762, 0x18fc43ece2ec1a40
+763, 0x859a5511aeae8acb
+764, 0x2f56826f1996ad2f
+765, 0xa8e95ce8bb363bdf
+766, 0xf4da396054e50e4b
+767, 0x5493865e9895883c
+768, 0x768e4c8b332ac0e3
+769, 0x32195d2aa583fca5
+770, 0xf2f353f21266bc15
+771, 0x43cddf1d021307d
+772, 0x6031e3aa30300e4a
+773, 0x4f1298469ac6088f
+774, 0x4b4d450bafac574e
+775, 0x23e1cf9c0582a22b
+776, 0x2e9036980db49cd0
+777, 0xe4e228b113c411b2
+778, 0x8bddcdb82b51706
+779, 0xd2a7ea8288593629
+780, 0x67fe90e98fdda61
+781, 0x7b63494dba95717b
+782, 0x105625904510d782
+783, 0xdf4aa2242454e50a
+784, 0x32541d6cd7d6c7e3
+785, 0x5661fb432591cf3b
+786, 0xce920a5ed047bce7
+787, 0xed4178a3c96eea8f
+788, 0xe378cd996e39863b
+789, 0x169e1fdc8e2b05e1
+790, 0xaee1812ef7149a96
+791, 0x648571c7453d12c5
+792, 0xb7b6bc9328573c43
+793, 0xe7fb969078e270d7
+794, 0xdfc2b1b8985f6e6f
+795, 0x862b6527ee39a1aa
+796, 0x1ee329aea91d7882
+797, 0x20d25324f2fe704
+798, 0xbfcc47401fc3bbfd
+799, 0x1515cdc8d48b2904
+800, 0xbd6eefe86284261c
+801, 0x9b1f28e3b35f22ee
+802, 0x842a29d35e5aecda
+803, 0xf2346109ad370765
+804, 0x24d68add5a71afd9
+805, 0x4a691421613d91e2
+806, 0x60e3058b3c244051
+807, 0x79194905cdaa5de8
+808, 0xe0e2df35c01e8987
+809, 0xe29b78beffbb5e4a
+810, 0xcdcdbc020218c19e
+811, 0x5ae0af8c16feae43
+812, 0x8109292feeaf14fa
+813, 0x34113f7508dfa521
+814, 0xc062ac163f56730a
+815, 0xf1660e66ec6d4c4c
+816, 0x5966c55f60151c80
+817, 0x3865ae8ec934b17
+818, 0x472a7314afb055ec
+819, 0x7a24277309a44a44
+820, 0x556e02dd35d38baa
+821, 0x9849611a1bc96ec1
+822, 0xd176f5d5a8eb0843
+823, 0x44db12ec60510030
+824, 0x272e3a06a0030078
+825, 0x7c4764dbefc075ea
+826, 0x910712f3735c1183
+827, 0xd49a2da74ae7aff6
+828, 0xcf9b3e6e8f776d71
+829, 0x27789fe3ec481a02
+830, 0x86659f82c6b5912b
+831, 0xe044b3dbf339158c
+832, 0x99d81f6bb62a37b0
+833, 0x5f5830c246fada9a
+834, 0xe68abab1eeb432cb
+835, 0x49c5c5ace04e104
+836, 0x1ac3871b3fc6771b
+837, 0x773b39f32d070652
+838, 0x9c4138c2ae58b1f3
+839, 0xac41c63d7452ac60
+840, 0x9248826b245359e1
+841, 0x99bba1c7a64f1670
+842, 0xe0dc99ff4ebb92f2
+843, 0x113638652740f87c
+844, 0xebf51e94da88cfc
+845, 0x5441c344b81b2585
+846, 0xe1e69e0bc2de652a
+847, 0xe9ab6d64ae42ed1e
+848, 0x879af8730e305f31
+849, 0x36b9ad912c7e00d6
+850, 0x83ef5e9fca853886
+851, 0xda54d48bb20ea974
+852, 0x32c6d93aefa92aa2
+853, 0x4e887b2c3391847d
+854, 0x50966e815f42b1b8
+855, 0x53411ac087832837
+856, 0x46f64fef79df4f29
+857, 0xb34aae3924cd272c
+858, 0xf5ad455869a0adbe
+859, 0x8351ded7144edac8
+860, 0xeb558af089677494
+861, 0x36ed71d69293a8d6
+862, 0x659f90bf5431b254
+863, 0x53349102b7519949
+864, 0x3db83e20b1713610
+865, 0x6d63f96090556254
+866, 0x4cc0467e8f45c645
+867, 0xb8840c4bd5cd4091
+868, 0xbd381463cc93d584
+869, 0x203410d878c2066d
+870, 0x2ebea06213cf71c8
+871, 0x598e8fb75e3fceb4
+872, 0xdcca41ceba0fce02
+873, 0x61bf69212b56aae5
+874, 0x97eed7f70c9114fa
+875, 0xf46f37a8b7a063f9
+876, 0x66c8f4ffe5bd6efa
+877, 0xe43fd6efda2d4e32
+878, 0x12d6c799e5ad01de
+879, 0x9ac83e7f8b709360
+880, 0xbbb7bb3c1957513d
+881, 0x7f87c08d4b3796b0
+882, 0x9a7d1d74b6aa4a5c
+883, 0xa4314530ff741b6f
+884, 0x99a80c6b6f15fca8
+885, 0xd2fec81d6d5fc3ce
+886, 0x15a98be1cc40cea
+887, 0x98693eb7719366f3
+888, 0x36ccdc2a9e9d4de8
+889, 0x3c8208f63d77df25
+890, 0xca2e376e2343df6
+891, 0xcc9b17cbb54420c6
+892, 0x8724c44a64d7dcb8
+893, 0x9d00c6949ff33869
+894, 0xf4f8e584d2699372
+895, 0x88f4748cdd5a2d53
+896, 0xe215072a1205bc6d
+897, 0x190934fe6d740442
+898, 0x7fac5c0ab2af106d
+899, 0x1b86633a0bd84fa1
+900, 0x1293e54318492dfb
+901, 0x433324fd390f34b9
+902, 0x4c5eb2c67a44643b
+903, 0x59a6e281c388b0dd
+904, 0xe78e03f9c44623b7
+905, 0x91307a93c768fc3d
+906, 0xde8867b004d8e3ff
+907, 0xdf52c3f57b7c5862
+908, 0x993f3e1d10358a92
+909, 0x9ccb10bc3e18662d
+910, 0x45093ce48a114c73
+911, 0xd59d05979d26330a
+912, 0x417c0e03300119a9
+913, 0x1c336500f90cde81
+914, 0x1c8ccd29ead9b85b
+915, 0xb76baf3e55d4d950
+916, 0x133ad6196c75fd7e
+917, 0x34200b0cde7ed560
+918, 0x9c7c3dacb213c8d9
+919, 0xd97563c4fd9bf1b6
+920, 0x5d910e871835b6cb
+921, 0x7d46c4733a16bdf9
+922, 0xe41d73194ddc87b2
+923, 0x7d3d8a0855a465a9
+924, 0x70c2a8b5d3f90c0f
+925, 0x9e7565ca5dccfe12
+926, 0x2c0acb4577aa51b1
+927, 0x3d2cd211145b79c7
+928, 0x15a7b17aa6da7732
+929, 0xab44a3730c27d780
+930, 0xf008bd6c802bde3a
+931, 0x82ed86ddf3619f77
+932, 0xaabe982ab15c49f9
+933, 0x9bcad8fa6d8e58a4
+934, 0x8f39ed8243718aa1
+935, 0xe9489340e03e3cb6
+936, 0xc722314f5eefb8d0
+937, 0x870e8869a436df59
+938, 0x4dae75b8087a8204
+939, 0xe1d790f6ec6e425b
+940, 0xafd39ea1b1d0ed09
+941, 0xdf2c99e464ddf08f
+942, 0x74936d859ab9644d
+943, 0x3871302164250e73
+944, 0x764b68921e911886
+945, 0x2a1d024b26bb9d66
+946, 0x797fba43918e75b4
+947, 0x62ec6d24ccca335b
+948, 0xf4bd8b951762b520
+949, 0x9d450dede9119397
+950, 0x5393a26d10f8c124
+951, 0x6b74769392896b57
+952, 0x7f61dbcc0e328581
+953, 0x64e1df3884d0d94
+954, 0xba77dcdf23738c37
+955, 0xf8e288bc0a177475
+956, 0x4a8abfd1702ecb7d
+957, 0x53f22886694736a7
+958, 0x8fc982597ced3e3
+959, 0x1bc46090f820fff7
+960, 0x8bd31f965d02229f
+961, 0x65cd0cb29996ee53
+962, 0x702e0f4fcf8c2e9f
+963, 0x293b77bff307a9a0
+964, 0x125a986b8b305788
+965, 0x416b0eea428ebf3c
+966, 0xeac85421ab0e8469
+967, 0x7f5496095019aa68
+968, 0x1a96d7afbc708e0
+969, 0xb91262e6766e01e1
+970, 0xd0a549cc4ccc6954
+971, 0x75a9a073f50c8a0d
+972, 0xae275d2c1c6cd23c
+973, 0xcf159b5ec5d28fd4
+974, 0x75d0838ce9b92b
+975, 0xd4eddcee6dc4677f
+976, 0x6a0a8ad5df6b75b8
+977, 0x6f3fd0ef0f13ecc4
+978, 0xb75a5826c1a8f8a8
+979, 0xd47098bbc7943766
+980, 0x3d4ddd62d5f23dd1
+981, 0x760a904e4583841c
+982, 0x2afeb5022b4cf1f
+983, 0x66d5f653729f0a13
+984, 0x9a6a5ab62980d30f
+985, 0xc332f5643bbf8d5b
+986, 0x848fb702e4056a90
+987, 0xa057beaf3f9e8c5f
+988, 0x6cc603e4560a6c6a
+989, 0xec761811a7b23211
+990, 0xb14aa4090a82aaa5
+991, 0xe29d9d028a5b2dbb
+992, 0x5564e53738d68f97
+993, 0xfabca36542eaaf3b
+994, 0xb9912fcb782020a2
+995, 0xe865e01b349284fd
+996, 0x540b5ff11c5f9274
+997, 0x3463f64e1e7451dc
+998, 0xe15d3e2f33b735f8
+999, 0xf5433336eadef6e
diff --git a/numpy/random/tests/data/sfc64-testset-2.csv b/numpy/random/tests/data/sfc64-testset-2.csv
new file mode 100644
index 000000000..70aebd5d5
--- /dev/null
+++ b/numpy/random/tests/data/sfc64-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x91959e5fb96a6332
+1, 0x3c1dd8a25a7e9f21
+2, 0x657bdffc99798d9e
+3, 0x1a04de320b19e022
+4, 0x65b92af0e5f3c61c
+5, 0x9c84070ce8f743c0
+6, 0xbb10e573693cdb25
+7, 0xd65ea9e76b37fb6b
+8, 0x503efd0e76c8ae66
+9, 0xd711dcd04c26d0f
+10, 0x12f53f435814ac8c
+11, 0xb392cd402cfc82bd
+12, 0x461764550e06c889
+13, 0x716a48b3514e6979
+14, 0xdd0a322213c18ad7
+15, 0x6673a8ca0a05c4d7
+16, 0x2992ef333437f844
+17, 0xc4aaf7e8240b2aad
+18, 0x6ab0a1af1f41474f
+19, 0xb0bae400c226941d
+20, 0xe5f80c2eeeab48c6
+21, 0x3832c6a93a4024bf
+22, 0x280bd824fabe8368
+23, 0x66b626228321e5ff
+24, 0xe0bdfba5325a307e
+25, 0x3a5f65c6ef254e05
+26, 0x99ea12503cb02f94
+27, 0x5d01fd2db77d420b
+28, 0x6959bf5f36b2368d
+29, 0xd856e30c62b5f5be
+30, 0xe33233e1d8140e66
+31, 0xb78be619d415fa8d
+32, 0x4f943bb2cc63d3b
+33, 0x9b1460b290952d81
+34, 0x19205d794826740e
+35, 0x64617bd9d7a6a1ff
+36, 0x30442124b55ea76a
+37, 0xebbbc3b29d0333fc
+38, 0x39235a0fe359751c
+39, 0xf9629768891121aa
+40, 0x32052f53f366e05a
+41, 0x60cc5b412c925bc8
+42, 0xf8b7ecda1c0e5a9
+43, 0x195f036e170a2568
+44, 0xfe06d0381a9ca782
+45, 0x919d89e8b88eebbf
+46, 0xa47fb30148cf0d43
+47, 0x5c983e99d5f9fd56
+48, 0xe7492cdb6a1d42cd
+49, 0xf9cfe5c865b0cfd8
+50, 0x35b653367bbc3b99
+51, 0xb1d92f6f4d4e440b
+52, 0x737e1d5bd87ed9c0
+53, 0x7a880ca1498f8e17
+54, 0x687dae8494f9a3f7
+55, 0x6bae1989f441d5d7
+56, 0x71ad3fa5a9195c2e
+57, 0x16b3969779f5d03
+58, 0xd1bce2ac973f15b3
+59, 0xa114b1ee2ce0dcdd
+60, 0x270d75c11eb1b8d5
+61, 0xc48ffa087c0a7bc
+62, 0xaaf9dc48cda9848d
+63, 0x8111cf10ef6e584d
+64, 0x6736df6af40ee6f4
+65, 0x1a1a111682fbf98d
+66, 0xeb217658e1cb3b5d
+67, 0xcaf58a8b79de9dec
+68, 0x25d0ffd63c88d7a1
+69, 0x4c498cd871b7f176
+70, 0x4069a6156eb0cf3c
+71, 0xdf012f12edcdd867
+72, 0x7734c0ac8edb1689
+73, 0xed6960ac53dbc245
+74, 0x305e20da8868c661
+75, 0x5f0c7a3719956f95
+76, 0x66842bbe3b28895
+77, 0xb608bc9a31eac410
+78, 0xfcb17d5529503abd
+79, 0x829ae5cbc29b92ee
+80, 0x17f2f0027bc24f3a
+81, 0x435926c33d8f44cc
+82, 0x3ab899327098dbec
+83, 0xaf78573b27f8ead8
+84, 0xa8b334fabcf8dc60
+85, 0xcdf3b366a6a303db
+86, 0x8da9379dd62b34c8
+87, 0xb0ba511955f264a7
+88, 0x9d72e21a644f961d
+89, 0xfac28382e2e7e710
+90, 0xd457065f048410aa
+91, 0x1cae57d952563969
+92, 0x5a160a6223253e03
+93, 0x2c45df736d73c8bd
+94, 0x7f651ebc6ad9cec5
+95, 0x77a6be96c7d2e7e7
+96, 0x1721fb1dbfd6546a
+97, 0xf73f433ecff3c997
+98, 0xed1e80f680965bfe
+99, 0x6705ad67a3003b30
+100, 0xac21134efcadb9f7
+101, 0x4d2ba0a91d456ac
+102, 0x59da7b59434eb52b
+103, 0x26c1d070fd414b5f
+104, 0xed7079ddfce83d9a
+105, 0x9277d21f88e0fb7a
+106, 0xfae16b9a8d53d282
+107, 0xb08a0e2e405fdf7d
+108, 0x2ea20df44229d6ec
+109, 0x80e4634cd3612825
+110, 0xbe62e8aeba8f8a1a
+111, 0x4981209769c190fb
+112, 0xcec96ef14c7e1f65
+113, 0x73fe4457b47e7b53
+114, 0x1d66300677315c31
+115, 0xe26821290498c4cc
+116, 0xf6110248fd8fb1c5
+117, 0x30fd7fe32dbd8be3
+118, 0x534ec9b910a2bd72
+119, 0x8f9bfe878bbf7382
+120, 0x4f4eb5295c0c2193
+121, 0xdeb22f03a913be9e
+122, 0x40f716f8e2a8886c
+123, 0xc65007d0e386cdb1
+124, 0x9bdd26d92b143a14
+125, 0xf644b0b77ea44625
+126, 0x75f5a53f6b01993a
+127, 0xfe803e347bf41010
+128, 0x594bff5fa17bc360
+129, 0x3551edfb450373c7
+130, 0x898f9dad433615db
+131, 0x923d2406daa26d49
+132, 0x99e07faccbc33426
+133, 0x7389f9ff4470f807
+134, 0xdc2a25957c6df90b
+135, 0x33c6d8965ef3053f
+136, 0x51a8f07e838f1ab
+137, 0x91c5db369380274f
+138, 0xc37de65ac56b207e
+139, 0xfcc6d2375dde7f14
+140, 0xa4e6418bff505958
+141, 0x4b8b9f78e46953c4
+142, 0x255ab2e0f93cf278
+143, 0xdf650717af3d96ef
+144, 0x2caa21cba3aae2b2
+145, 0xce7e46c6f393daa4
+146, 0x1d5b3573f9997ac7
+147, 0x5280c556e850847d
+148, 0x32edc31bef920ad7
+149, 0xefaa6b0b08cf2c6
+150, 0x5151c99d97b111c5
+151, 0x35ccf4bf53d17590
+152, 0xa210d7bd8697b385
+153, 0xa9419f95738fbe61
+154, 0xdeccf93a1a4fdc90
+155, 0xd0ea3365b18e7a05
+156, 0x84122df6dcd31b9a
+157, 0x33040a2125cea5f5
+158, 0xfe18306a862f6d86
+159, 0xdb97c8392e5c4457
+160, 0xc3e0fa735e80e422
+161, 0x7d106ff36467a0c1
+162, 0xb9825eecc720a76d
+163, 0x7fefc6f771647081
+164, 0xf5df3f5b3977bf13
+165, 0x18fb22736d36f1e0
+166, 0xadc4637b4953abfc
+167, 0x174e66d3e17974bd
+168, 0xf1614c51df4db5db
+169, 0x6664ecde5717b293
+170, 0xd5bc5b6839265c26
+171, 0xf6ca9ce1af3f1832
+172, 0xca696789a9d506ea
+173, 0x7399c246c8f9d53
+174, 0xadf49049626417e2
+175, 0xbcd84af37d09ab91
+176, 0xbb41c177f3a3fa45
+177, 0x592becc814d55302
+178, 0xa88b4e65f6cfe5f7
+179, 0xa0a55e34ff879426
+180, 0x3c2ea6aa725b42b7
+181, 0x65ac4a407b1f9521
+182, 0xde63d53f7e88b556
+183, 0x18bc76696d015f40
+184, 0xd1363f2cd4c116a8
+185, 0x2fe859be19a48e4a
+186, 0x83d6099b1415e656
+187, 0x43f2cbc1a4ee6410
+188, 0xb2eca3d3421c533d
+189, 0xc52b98ea3f031f5d
+190, 0xfe57eb01da07e9d1
+191, 0xf9377883537a6031
+192, 0x364030c05dac7add
+193, 0x6815cb06b35d4404
+194, 0xceae2d4ce31894be
+195, 0xc602bcdf6062bf6a
+196, 0xc8e4bd8dcc6062e3
+197, 0x9c29e87b92a1a791
+198, 0x41e626b871ca9651
+199, 0x325c3d1fb8efbcd8
+200, 0x7dbbacf8e3419fb3
+201, 0x3602e72516bb7319
+202, 0x537a008ebd94d24b
+203, 0xda7714fc9d4d161d
+204, 0x1c8c73700e1b621b
+205, 0x2749b80937d6c939
+206, 0x76ee6abac5b14d33
+207, 0xf18d1e92cb6a8b5c
+208, 0x6ce9579d9291c721
+209, 0x60523c745a40e58
+210, 0x637f837fcc901757
+211, 0x2ff71b19661dc5b3
+212, 0x393ab586326ad16f
+213, 0xa0970ea30fe742b7
+214, 0x570222d7f27fe5ae
+215, 0x3b5806d43fd38629
+216, 0x129a0ad7420180c5
+217, 0x1c4726355778d52c
+218, 0x7c1459cf77656499
+219, 0xfe038a0932132069
+220, 0x4c4cc317a937483a
+221, 0xa333d24067e926ba
+222, 0x401d9b6ab37f6ef2
+223, 0x87ad0e491ebe4a2a
+224, 0xfc02f312e72d121d
+225, 0xfde715b3b99767b2
+226, 0xd111c342ba521c92
+227, 0x83b221b10879c617
+228, 0x6a1bf5c01fdf4277
+229, 0x166bfc0c3f5892ee
+230, 0x4608d556d7c57856
+231, 0x8d786857c95ece49
+232, 0x2d357445a1aca4ac
+233, 0x79620dae28ecd796
+234, 0x90e715dc0f2201c4
+235, 0x173b68b4c9f4b665
+236, 0x4e14d040ebac4eef
+237, 0xbd25960b4b892e
+238, 0x911a199db6f1989d
+239, 0xfe822d7c601fd2e0
+240, 0x9b4c1d58d8223a69
+241, 0x907c1891283843b0
+242, 0xf4868bf54061c4b2
+243, 0x17f8cd1fc24efd85
+244, 0xd44253f9af14c3aa
+245, 0x16d0da0cb911d43c
+246, 0x3c6a46615828e79a
+247, 0x498591c1138e11a5
+248, 0xcc0f26336d0d6141
+249, 0x4d3ebc873212309a
+250, 0x16bad7792d5c2c6a
+251, 0x474215a80b2bbd11
+252, 0x7159848abd8492fc
+253, 0x359341c50973685f
+254, 0x27512ee7bf784a4a
+255, 0x45228ea080f70447
+256, 0x880cab616500d50e
+257, 0x12fae93f9830d56e
+258, 0x6744ee64348d9acd
+259, 0x484dada28cd2a828
+260, 0x98491d0729e41863
+261, 0x2f15aac43c2863b0
+262, 0x5727a34d77a1da0f
+263, 0xa435cebef6a62eed
+264, 0xd211697d57b053b0
+265, 0x65aa757b68bd557
+266, 0xe3a1b7a2d8a3e06a
+267, 0x2adf64e67252a7a9
+268, 0xadadcb75cadee276
+269, 0x7934bc57ac8d97bf
+270, 0xccff0d0f412e0606
+271, 0x101a82aa3e8f3db9
+272, 0xb0f2498094b4575c
+273, 0xba2561d9ef26ed8a
+274, 0xfbcd1268fc3febe1
+275, 0x9aa10bb19eb152e0
+276, 0xf496217a601a6d72
+277, 0xe4be1e4f2fa91363
+278, 0x473a602bf3dd68eb
+279, 0xfe8ed2a48c26f4b5
+280, 0x20e94b1a00159476
+281, 0x93e1cb1c6af86ec7
+282, 0x4fcba3898f7442ba
+283, 0x5150c3a3d94891df
+284, 0x91cfce6c85b033ea
+285, 0x625e8a832a806491
+286, 0x28c97ba72e3ec0b2
+287, 0x8e172de217c71ea1
+288, 0x926b80216c732639
+289, 0x28b19431a649ae3d
+290, 0x57c039a6e95a3795
+291, 0xfbc354182fe52718
+292, 0x819dfd7c7d534cef
+293, 0xabb4093a619ed44f
+294, 0xe785b7ac6f656745
+295, 0xb647b4588b2f942f
+296, 0x64cf870a14c72d27
+297, 0x6d4a4a2a0ba9b37e
+298, 0x78bfb0427d7ce6b0
+299, 0x8dcc72b8bfc79ac6
+300, 0x1c14d915d5e76c99
+301, 0xaf48ddea6f096d79
+302, 0x51b39b67aa130d8
+303, 0x1aeeb39d4def06de
+304, 0xd678092ffedfdd27
+305, 0x8f54787f325111d3
+306, 0xf2ca2e827beaa6bc
+307, 0x339d134099e98545
+308, 0x1f6a8a7b33942e43
+309, 0x952c8065dbef669a
+310, 0xe066aeb6690147f7
+311, 0xed25aa92cf58ebb6
+312, 0x7601edce215ef521
+313, 0xed1c5b396abd9434
+314, 0x4fd1e407535de9d5
+315, 0xccc8315a0d4d1441
+316, 0x85753e250bb86976
+317, 0xf232e469378761c3
+318, 0x81d691b8e9aef3c6
+319, 0x224a2f9cab0ad0e
+320, 0x978f3d3e50007f4e
+321, 0xd3713e6a6c0cbe60
+322, 0xcce8f1eadd41f80d
+323, 0x34bda028a97d469
+324, 0x90e242fdf0f59183
+325, 0x4d749754fbc5f092
+326, 0x4399f5b7851cc87b
+327, 0xcb921a5f25f6c5d7
+328, 0x120bf5d0162101
+329, 0x1304cc2aa352735a
+330, 0xf7236c5d0d5d417b
+331, 0xc31b320fc1654306
+332, 0xb468c6b23f3fb4e7
+333, 0xb5985b5bfaca4166
+334, 0x898285a1cd2f8375
+335, 0xa13493da372aa7c9
+336, 0x15c80c09c12634e7
+337, 0x9b765c5cc9d438bd
+338, 0xee7da816a9201dcb
+339, 0x92e269f73b5a248e
+340, 0xa8086c5de81400ce
+341, 0xe0053901853d42be
+342, 0x821df32c012f433e
+343, 0x17a6d69ca37387c7
+344, 0x2b10044bfba3501f
+345, 0x8dfd262afc2e8515
+346, 0xd68c2c7b60226371
+347, 0xe81ac114e4416774
+348, 0x5896d60061ebc471
+349, 0xa996e3147811dbd1
+350, 0xa819c7b80ecb3661
+351, 0x982ad71b38afbc01
+352, 0xab152b65aa17b7fe
+353, 0x4582bc282ef187ef
+354, 0xab5a17fe8d9bc669
+355, 0x83664fa9cb0284b7
+356, 0x234c4b0091968f52
+357, 0x8ab5f51805688d37
+358, 0xe9e11186e0c53eda
+359, 0x10df37ef1de2eccf
+360, 0x780f1b0d52db968f
+361, 0x50bd4ff292872cd5
+362, 0x51e681c265f5ad0
+363, 0x842c49660a527566
+364, 0x6e56ee026e9eda87
+365, 0x4cf39e40d8c80393
+366, 0x13e466df371f7e1f
+367, 0xf2ce1799f38e028e
+368, 0x833c8db7adc6ff0e
+369, 0xc6e189abc2ec98f
+370, 0xafebb3721283fec5
+371, 0xb49bc1eb5cc17bdc
+372, 0xf1d02e818f5e4488
+373, 0xe5e9d5b41a1dd815
+374, 0xce8aca6573b1bfe5
+375, 0x9b0a5d70e268b1d5
+376, 0xf3c0503a8358f4de
+377, 0x2681605dd755669d
+378, 0xea265ca7601efc70
+379, 0xa93747f0a159439f
+380, 0x62a86ede78a23e50
+381, 0xac8a18935c3d063c
+382, 0x729c0a298f5059f5
+383, 0xbbf195e5b54399f4
+384, 0x38aa9d551f968900
+385, 0x3b3e700c58778caa
+386, 0x68e6e33c4443957a
+387, 0x7c56fc13eb269815
+388, 0xaf7daca39711804a
+389, 0x50fde6d10f9544b3
+390, 0xf3d37159f6f6c03d
+391, 0x82d298f5c1a71685
+392, 0x478661ac54c5002c
+393, 0x6053768e1a324ae0
+394, 0xde8fb4a7e56707ea
+395, 0xaa2809301faa8cf4
+396, 0x690a8d49fedd0722
+397, 0xe17c481b9c217de9
+398, 0x60d1d8a2b57288e3
+399, 0x149adfaadc6b0886
+400, 0xa3c18b6eb79cd5fa
+401, 0x5774e3a091af5f58
+402, 0x2acca57ff30e5712
+403, 0x94454d67367c4b0c
+404, 0x581b2985ac2df5ca
+405, 0x71618e50744f3e70
+406, 0x270a7f3bd9a94ae6
+407, 0x3ef81af9bb36cd7b
+408, 0x8a4a2592875254aa
+409, 0x704ac6086fbb414a
+410, 0xda774d5d3f57414d
+411, 0xe20d3358b918ae9e
+412, 0x934a6b9f7b91e247
+413, 0xf91649cde87ec42c
+414, 0x248cec5f9b6ced30
+415, 0x56791809fd8d64ba
+416, 0xf502b2765c1395f
+417, 0x6b04ec973d75aa7f
+418, 0xb0339f2794bb26f
+419, 0x4c524636efbaea49
+420, 0x6bbf3876e9738748
+421, 0xf686524e754e9e24
+422, 0x8dafa05a42d19cd3
+423, 0xc5f069ab2434008e
+424, 0x4fd64cc713cba76
+425, 0xdbf93450c881ed5f
+426, 0x492e278ebabb59a2
+427, 0x993fddfde4542642
+428, 0xecde68a72c8d4e52
+429, 0xe0760b3074c311fd
+430, 0x68dc0e7e06528707
+431, 0x52b50edf49c0fdc7
+432, 0xb2bd4185c138f412
+433, 0x431496d7e1d86f3
+434, 0xa4e605b037e26c44
+435, 0x58236ae1f0aca2b5
+436, 0x26c72c420fc314d8
+437, 0x20134e982ab99a2b
+438, 0x544b59b8b211374b
+439, 0x1301c42f3a14d993
+440, 0x52a6ea740f763b0f
+441, 0xf209d70c2bebf119
+442, 0xac66a4ebc2aa1be
+443, 0x683713ed35878788
+444, 0x2b5578acec06b80c
+445, 0x86428efa11c45b36
+446, 0xb49010adb17d291e
+447, 0x73b686bd8664b6be
+448, 0x6d28ebf57b6884cc
+449, 0x9712091230ff58d9
+450, 0xc9c91f74c38b286
+451, 0x776310ac41dc008e
+452, 0x2f3739df0bf6a88e
+453, 0x5792dc62b94db675
+454, 0x5715910d024b06af
+455, 0xeb1dd745458da08
+456, 0xfce7b07ccfa851a7
+457, 0xc305f1e983ac368
+458, 0x485aa9519ac00bb0
+459, 0xa5354f6589fb0ea0
+460, 0x32fee02dfdbf4454
+461, 0x4d1ddc304bbefaaa
+462, 0x789a270a1737e57e
+463, 0x9f3072f4b1ed8156
+464, 0x4de3c00e89058120
+465, 0xb00a02529e0a86fa
+466, 0x539f6f0edd845d9a
+467, 0x85e578fe15a8c001
+468, 0xa12c8e1a72cce7d8
+469, 0xc6908abbc2b1828
+470, 0xcf70090774cbb38c
+471, 0x3b636a6977b45d4a
+472, 0xf0a731b220680b57
+473, 0x18973929f51443a8
+474, 0xe93e1fbe7eadabe
+475, 0x8233730f0a6dfa02
+476, 0x66e50b6919b0ab74
+477, 0xb1aba87c97fd08a2
+478, 0xd4dffc1fbc117ad6
+479, 0x6f7fa65724b96e6a
+480, 0x4bd5800dee92e0fa
+481, 0xe18a959db6256da
+482, 0xe53a291bc66df487
+483, 0xb7ec306a08651806
+484, 0x1847a6b80d2821e1
+485, 0xda50391283b14d39
+486, 0xacc4d3cd7cceb97a
+487, 0x57f70185165b7bc6
+488, 0x302b6d597c3aaba7
+489, 0xa47f32d037eab51e
+490, 0xe1509b4408abc559
+491, 0x4f30a1d7c2934157
+492, 0x2ad03e6c60b650b2
+493, 0x334d9c337b0a9064
+494, 0xc7f442821e7aac12
+495, 0xbcdeb09298694cdd
+496, 0xe42402389f8f0fb4
+497, 0xe5de56af539df727
+498, 0x7017f9b2101ee240
+499, 0x1ee5e68d5b10001d
+500, 0x436229051836387a
+501, 0xcd532d6d6ec38fb7
+502, 0x30a66606fdf38272
+503, 0xfdaa2ab9cf798496
+504, 0x4277b4adec70e7df
+505, 0x72cfc30256e0eaef
+506, 0x3c3359fd9bd34917
+507, 0xb7aa89598856efb0
+508, 0xf72226f8bf299ef5
+509, 0x258c499275a4356f
+510, 0x999a56bfc7f20d76
+511, 0x2b3e7432e20c18b
+512, 0x2d1251332f760cb5
+513, 0x7420e0eea62157c5
+514, 0xe85c895aa27cec3d
+515, 0x27a0545c7020d57c
+516, 0xc68638a65b4fff0d
+517, 0xfda473983a4ea747
+518, 0xd19fe65fb4c06062
+519, 0x6b1374e050ee15e4
+520, 0x80065ecd49bc4bef
+521, 0x4ee655954bc838de
+522, 0xe8fb777504a72299
+523, 0x86b652ea70f4bdde
+524, 0xcdc9e0fbde7e4f33
+525, 0x352c0a50cd3ac56
+526, 0x4b8605d368be75dc
+527, 0x1ac9ea8129efbc37
+528, 0x470325faa99f39c5
+529, 0x25dd7ef9adccf7a1
+530, 0x5ae2c7a03e965816
+531, 0xf733d2df59dacc7d
+532, 0xa05bbf0a8a1a7a70
+533, 0xe8aa3f102846ef5f
+534, 0xc9b85ec49ae71789
+535, 0xb904c14ed1cb1936
+536, 0x5ae618230b5f0444
+537, 0x97987fe47b5d7467
+538, 0xabb3aca8865ca761
+539, 0x38bfdf29d4508228
+540, 0x353654f408353330
+541, 0xeb7e92930ae4ef0d
+542, 0xec50f1a7ca526b96
+543, 0xd5e2dc08b5697544
+544, 0x24c7fd69d5ec32df
+545, 0x6f7e1095568b8620
+546, 0x6ed9c16ca13b3c8
+547, 0xe676ef460002130f
+548, 0xa3a01a3992c4b430
+549, 0xe2130406c3b1f202
+550, 0xa8f7263e2aedcd20
+551, 0xc45d71ef2e35f507
+552, 0x37155594021da7ba
+553, 0x22dc94f19de73159
+554, 0x7969fc6bffc5443f
+555, 0x97def7e44faa6bfe
+556, 0x8b940f5e8931d71f
+557, 0xd95b1dd3f1a3fdd5
+558, 0x1c83bfdca615701a
+559, 0xb7fcb56279ceca6b
+560, 0xd84f8950f20dcd0
+561, 0xb03343698de3cbe0
+562, 0xf64565d448d71f71
+563, 0xda52b4676e0ae662
+564, 0xda39c2c05b4ffb91
+565, 0xb35e2560421f6a85
+566, 0x1a7b108d48ac3646
+567, 0xc4e264dc390d79ed
+568, 0xa10727dfd9813256
+569, 0x40d23154e720e4f7
+570, 0xd9fa7cd7e313e119
+571, 0xcbf29107859e6013
+572, 0xc357338553d940b7
+573, 0x2641b7ab0bdfcbaa
+574, 0xd12f2b6060533ae7
+575, 0xd0435aa626411c56
+576, 0x44af4a488a9cec72
+577, 0xb934232ea8fa5696
+578, 0x760a8b12072b572d
+579, 0xfab18f9942cfa9b3
+580, 0x5676834c1fe84d16
+581, 0x9c54e4fddb353236
+582, 0xab49edfc9551f293
+583, 0x567f1fb45a871d
+584, 0x32a967c873998834
+585, 0x99240aad380ef8d1
+586, 0x7f66cbd432859a64
+587, 0x4cdc8a4658166822
+588, 0x984e3984a5766492
+589, 0xa3b2d0a3d64d3d94
+590, 0x177f667172f2affc
+591, 0xb1a90607a73a303f
+592, 0xe600b6c36427f878
+593, 0xf758f9834cb7f466
+594, 0x8ee9fce4a3f36449
+595, 0xcb8f11533e7da347
+596, 0xe7cf647794dabd7c
+597, 0xc9d92cfe6110806
+598, 0xea1335fa9145a1ec
+599, 0xbc6c29821d094552
+600, 0x37b9d6a858cc8bc3
+601, 0xf24e4c694929893e
+602, 0x55d025ce2d7d0004
+603, 0xccdc69acccf4267b
+604, 0xc491c04340c222eb
+605, 0xba50f75ecec9befb
+606, 0x1ec7bd85b8fe3bb9
+607, 0xe4de66498c59ae8a
+608, 0x38aa9e912712c889
+609, 0xcee0e43c5cc31566
+610, 0x72b69aa708fc7ed
+611, 0xdff70b7f6fa96679
+612, 0xd6d71d82112aadc3
+613, 0x365177892cb78531
+614, 0xa54852b39de4f72c
+615, 0x11dd5832bf16dd59
+616, 0x248a0f3369c97097
+617, 0xa14cec0260e26792
+618, 0x3517616ff142bed1
+619, 0x9b693ad39dab7636
+620, 0x739dff825e994434
+621, 0x67711e7356098c9
+622, 0xa81f8515d2fdf458
+623, 0xdac2908113fe568e
+624, 0xe99944ebc6e2806a
+625, 0x671728ca5b030975
+626, 0xfdad20edb2b4a789
+627, 0xedc6e466bd0369d2
+628, 0x88b5d469821f7e1b
+629, 0x2eabf94049a522a5
+630, 0x247794b7a2f5a8e3
+631, 0x278942bdbe02c649
+632, 0xbe5a9a9196ab99c1
+633, 0x75955060866da1b5
+634, 0xdedcfa149273c0b5
+635, 0xdbeb7a57758f3867
+636, 0x7b9053347a2c8d5a
+637, 0xa059b3f2eed338a5
+638, 0x59401a46ded3b79f
+639, 0x38044ba56a6d19fb
+640, 0x72c7221b4e77e779
+641, 0x526df3491a3a34da
+642, 0xc3b31184ba16c0c2
+643, 0xd94c7144488624af
+644, 0xcf966ee4dc373f91
+645, 0x62049e65dd416266
+646, 0x7c2adccb925bf8f
+647, 0xd5fa5c22ed4ef8e1
+648, 0xd00134ebd11f2cd1
+649, 0xfbdf81767bed3634
+650, 0x62e8cc8ff66b6e26
+651, 0x3a72d6bcd4f2dcf7
+652, 0xf1cd45b1b46a86ed
+653, 0x1271f98e0938bb9a
+654, 0x82e6927e83dc31fa
+655, 0x7b9b0e0acb67b92d
+656, 0x6df503e397b2e701
+657, 0x93888f6fb561e0c3
+658, 0x393fb6069a40291
+659, 0x967a7d894cc0754d
+660, 0x6e298996ad866333
+661, 0x5ff3cf5559d6ab46
+662, 0xd0d70508c40349f5
+663, 0xc64c66c0dd426b33
+664, 0x8fea340ee35c64dd
+665, 0xf9cd381eb3060005
+666, 0xfcc37c2799fc0b11
+667, 0x6a37c91d65b489fa
+668, 0x57231000fa0a0c9d
+669, 0x55f6e292c6703f9a
+670, 0xd0508ffbfa55a7a6
+671, 0x885db543276bdac8
+672, 0xc26dbe6a26b0e704
+673, 0x21f884874ebd709e
+674, 0x711f0b6c8f732220
+675, 0x354d0a361eaee195
+676, 0x721344d8d30b006a
+677, 0xa0e090a0d3a56f07
+678, 0x16b3d5d823a4952b
+679, 0x59d7874bc9eae7b6
+680, 0x9bbb32710076455f
+681, 0xd4fb22242ffabafd
+682, 0xe1d4ac6770be1d89
+683, 0xb259cedebc73dc8a
+684, 0x35faaa3b4246ab69
+685, 0x5d26addefdaee89
+686, 0x8e7ec350da0f3545
+687, 0xd0f316eed9f8fc79
+688, 0x98b2a52c9bf291b2
+689, 0xe4d294a8aca6a314
+690, 0x25bd554e6aa7673c
+691, 0xcfde5dcba5be2a6c
+692, 0xb5e01fb48d2d2107
+693, 0xe1caf28948028536
+694, 0xd434aa0a26f3ee9b
+695, 0xd17723381641b8f6
+696, 0xfe73bd1f3f3768a2
+697, 0x1cc6b1abd08d67e9
+698, 0x247e328371a28de0
+699, 0x502e7942e5a9104a
+700, 0x6a030fd242eb4502
+701, 0xa2ffe02744014ce8
+702, 0x59290763b18fe04e
+703, 0xcf14241564271436
+704, 0xb0fb73c3c1503aff
+705, 0x94e27c622f82137a
+706, 0x747a5b406ac3e1f0
+707, 0x9a914e96a732031d
+708, 0x59f68c6c8f078835
+709, 0x809d012c73eb4724
+710, 0x5b3c3b73e1b37d74
+711, 0xdde60ef3ba49cdf7
+712, 0x87a14e1f9c761986
+713, 0x4109b960604522af
+714, 0x122d0e1ed0eb6bb9
+715, 0xadc0d29e80bfe33
+716, 0xa25b1b44f5fc8e4e
+717, 0xbab85d8a9b793f20
+718, 0x825f4cbced0e7d1e
+719, 0x2d6ae8807acb37ea
+720, 0x8234420adce2e39
+721, 0x4a8ad4da6b804807
+722, 0x1e19f9bc215e5245
+723, 0x1d6f4848a916dd5e
+724, 0x9ac40dfcdc2d39cc
+725, 0x9f3524e3086155ec
+726, 0x861fffc43124b2ef
+727, 0xe640e3b756396372
+728, 0x41cb0f0c5e149669
+729, 0xe0bd37e1192e4205
+730, 0x62917d3858f4ce47
+731, 0xa36e7eb4d855820a
+732, 0x204b90255a3bf724
+733, 0x66ee83a0175535bc
+734, 0x2c14ce7c6b0c1423
+735, 0x85d9495fa514f70d
+736, 0x5a4fe45ead874dbc
+737, 0xe72248dcb8cfc863
+738, 0xfc21ff2932ed98cd
+739, 0xcbba1edd735b5cad
+740, 0x91ddc32809679bf5
+741, 0x192cdf2c7631ea1f
+742, 0xbbc451ddf2ea286f
+743, 0xad9e80cae2397a64
+744, 0x6918f0119b95d0e5
+745, 0xa40379017a27d70a
+746, 0x1aaeddb600e61e1
+747, 0x15afd93cbd7adda9
+748, 0x156719bc2b757ff4
+749, 0x13d9a59e2b2df49d
+750, 0x9a490986eaddf0a
+751, 0xef9a350f0b3eb6b4
+752, 0x5de7f6295ba4fa4d
+753, 0x7f37fd087c3fdb49
+754, 0xa9fe3749d6f3f209
+755, 0x50912ac036d9bfb
+756, 0x982cb4d726a441f8
+757, 0x8ca8d8af59b872d0
+758, 0x7f8adfb0ceeade8a
+759, 0xdad390ec742be44
+760, 0xa637944d0045be5b
+761, 0x3569a3b3af807061
+762, 0x9599da8eae14511d
+763, 0xc333e8d19589b01a
+764, 0xfb9b524a20b571e1
+765, 0xbd9dc8b37ce5c3e1
+766, 0x142333005fa389ac
+767, 0x1368bc37cd5bcce1
+768, 0x16094907ad6ecf73
+769, 0xb32c90dbba4c1130
+770, 0x82761d97c1747dd0
+771, 0x599f9f267ae3444d
+772, 0x79ad3382994852e1
+773, 0x2511f06d9ef06e54
+774, 0xb35e6ab7d5bbddae
+775, 0xfca9fa83a2988732
+776, 0x7d4350f0394ac3ba
+777, 0xa52a9527bb176ea3
+778, 0xb49fa0ceb2aa8353
+779, 0x1f62e504d1468cc0
+780, 0xe1a77bfccce6efc3
+781, 0x776cdff4dc0d6797
+782, 0x56612e39b652c1f2
+783, 0x5f096a29294eda04
+784, 0x7978abc3aabd8b23
+785, 0x79dd875e0485b979
+786, 0x8a98aa4d5735d778
+787, 0xcca43940f69d2388
+788, 0xb2d4b156f144f93a
+789, 0xbd528a676e9a862
+790, 0x2a394939c8e7ec5e
+791, 0xb1da900c6efe4abc
+792, 0x9869af479de4c034
+793, 0x78dbdfb88ac7c1db
+794, 0x18cb169143088041
+795, 0xe69e5461c51a3e13
+796, 0x5389fa16ea98183c
+797, 0xed7c80d1be1ea520
+798, 0x87246fc359758ced
+799, 0xab323eba95fae4ed
+800, 0xbc4c0dde7f8a1828
+801, 0xdb739f7955610b1a
+802, 0xecd8c68c3434cc
+803, 0x138c2eb88c477f44
+804, 0x28a65f96727aae41
+805, 0xdee879f2cf5629d
+806, 0x684f0c90ef20070f
+807, 0xa24a819ef5621800
+808, 0x8d0054f870e4fdcb
+809, 0x99e8c6e695b600b
+810, 0x50b705245891f7c3
+811, 0xc02eed3a6e58e51a
+812, 0x443d64e95443606c
+813, 0xca24959cfbd2d120
+814, 0xe072609ea48815bc
+815, 0xbcc715026590315b
+816, 0x3e76df24d7aa5938
+817, 0xd8ff04940d9b79ae
+818, 0x54474ce790059bcd
+819, 0x278390dd6aa70e81
+820, 0xf4df619fe35414e4
+821, 0x757d71270264e615
+822, 0x1e8a373699c11b23
+823, 0xef68c82046e67dd6
+824, 0xe280006599972620
+825, 0x234e095183b0f4d6
+826, 0xe3b7560ed9839749
+827, 0xcd5ec4086572332e
+828, 0xc41c0d4aaa279108
+829, 0x4b9cd6126bc16a6d
+830, 0x4a7252734f3e3dd0
+831, 0xb3132df156cc103a
+832, 0xf9e4abbf7b64464a
+833, 0xf936df27fb3c47b7
+834, 0x9142960873f6d71a
+835, 0x4ba6aa3235cdb10d
+836, 0x3237a2e765ba7766
+837, 0xd62f0b94c8e99e54
+838, 0x26b682f90a3ae41b
+839, 0x40ad5e82072b6f81
+840, 0xd0198101f5484000
+841, 0xe4fac60ba11c332
+842, 0x472d0b0a95ef9d38
+843, 0x8512557aec5a3d8f
+844, 0xef83169d3efd4de9
+845, 0x53fe89283e7a7676
+846, 0x2f50933053d69fc4
+847, 0x76f5e4362e2e53a2
+848, 0x8676fdccce28874a
+849, 0x2737764c1fb1f821
+850, 0x4a6f70afc066ab55
+851, 0x27f8e151e310fca4
+852, 0xd606960ccbe85161
+853, 0xcce51d7ddd270a32
+854, 0xb4235999794875c2
+855, 0x580084e358e884
+856, 0x2159d5e6dc8586d7
+857, 0x87bd54d8599b3ba4
+858, 0x3e9ade6a2181664
+859, 0x5e6e140406d97623
+860, 0x511545d5aa0080a2
+861, 0xf49d78ed219aac57
+862, 0xbece1f9c90b8ea87
+863, 0x1c741cac36a2c514
+864, 0x7453c141047db967
+865, 0xd751832a5037eba2
+866, 0x71370a3f30ada1f7
+867, 0x7c01cf2dcb408631
+868, 0x1052a4fbdccc0fa1
+869, 0x13d525c9df3fb6c
+870, 0xa3aa8dbfee760c55
+871, 0xc0288d200f5155cf
+872, 0x79f4bcd12af567c3
+873, 0x8160d163bb548755
+874, 0x5cf2995fb69fd2df
+875, 0xcc98ed01396639df
+876, 0xad95f1d9cfc8256e
+877, 0xa3df27d9fbdbfb9d
+878, 0x83e5f5dda4d52929
+879, 0x9adc05043009f55b
+880, 0xdfe8329dfde1c001
+881, 0x9980ccdd5298e6a2
+882, 0x636a7bd134f6ef56
+883, 0xef5ff780c4be6ba4
+884, 0x290d71dc77a56d16
+885, 0x6d65db9ff58de1e6
+886, 0x944b063b3805a696
+887, 0xce468ca2cce33008
+888, 0x5ba1ccb840f80f48
+889, 0x28ddce36fc9ad268
+890, 0x4f77ef254d507a21
+891, 0xce9b4057fadf3ab
+892, 0xb518bc68298730e6
+893, 0xd2eb5b8e2ec665b0
+894, 0xe1583303a4f87344
+895, 0x9d5a0df4fbe1bed5
+896, 0x2ba9bc03ec8cfd07
+897, 0x479ed880a96ca669
+898, 0xcedf96338324771a
+899, 0x312f4fc2da41ffaa
+900, 0xa0eb9cf23b5e1ed8
+901, 0xf8f88f975dc3f539
+902, 0x4a37e185d0e96e0f
+903, 0xf829654a5c0b46f9
+904, 0x3909cca7a7f8c7fb
+905, 0x4c2e1d66ceb45105
+906, 0xaffaa19e1db8af87
+907, 0x9ec498246bd18c76
+908, 0x21d51558edc089da
+909, 0xe8984112cd1b1561
+910, 0x7de1d2cf54b0c0e1
+911, 0xa06729aed50bfb9d
+912, 0xcf19f733e5db19e1
+913, 0x70edf2624ab777cd
+914, 0x46685becad10e078
+915, 0x825e0f6add46785
+916, 0x66d4af3b15f70de4
+917, 0xc676614b0666b21
+918, 0x282a916c864f5cb7
+919, 0x2707283a3f512167
+920, 0x37ff3afda7461623
+921, 0xc767eb1205e4ca86
+922, 0x46b359aecc4ea25b
+923, 0x67fbbb797a16dbb1
+924, 0x64fd4ba57122290e
+925, 0x8acc2a8ae59d8fac
+926, 0x64a49298599acc67
+927, 0xedf00de67177ce30
+928, 0x1ea9d8d7e76d2d2c
+929, 0x363fcac323f70eb2
+930, 0x19e6e3ec8a9712eb
+931, 0xca541e96b0961f09
+932, 0x4d8fd34c2822ec46
+933, 0x2fdd56a50b32f705
+934, 0xaac2fcf251e3fd3
+935, 0xb0c600299e57045c
+936, 0xd951ec589e909e38
+937, 0x4dc8414390cae508
+938, 0x537ef9d5e2321344
+939, 0xa57bc21fd31aa2dc
+940, 0xa3a60df564183750
+941, 0xbe69a5ce2e369fb6
+942, 0x7744601f4c053ec8
+943, 0x3838452af42f2612
+944, 0xd4f0dad7115a54e9
+945, 0x629cf68d8009a624
+946, 0x2211c8fa34cb98cb
+947, 0x8040b19e2213db83
+948, 0xb2a86d3ba2384fd
+949, 0x4b85cec4f93f0dab
+950, 0xc8d212d21ea6845d
+951, 0x5b271a03a4fe2be0
+952, 0xff4f671319ad8434
+953, 0x8e615a919d5afa96
+954, 0xea7f47c53161160a
+955, 0x33273930b13c6efc
+956, 0x98eedda27fb59c3c
+957, 0x188dc5e92e939677
+958, 0x9dbd0fa0911430f1
+959, 0x5b3dcf3fa75dfd2b
+960, 0x3f03846febdb275d
+961, 0x20cc24faea9e9cf6
+962, 0x854f3ac66199ff5d
+963, 0x31169ac99d341e6f
+964, 0xa85daed3c0bc1bbe
+965, 0x64633711e71ba5dd
+966, 0x530e79978dc73334
+967, 0x636f2ee6e20aef13
+968, 0xf6220f8b6d9a58fb
+969, 0x425db8fa32141a7b
+970, 0xac7c210f4b02be95
+971, 0x5fe8cfbe197a7754
+972, 0xfff7d40c79420ea
+973, 0x5f8bab9ef4697b77
+974, 0xaf6fe54e45b23fe8
+975, 0xce79456ccc70bbce
+976, 0x645ef680f48f1c00
+977, 0xa4dfac46e2028595
+978, 0x6bece4c41effc5df
+979, 0xd316df886442641f
+980, 0xa4f6ff994edd2a6
+981, 0x30281ae3cc49abe4
+982, 0x39acb7b663dea974
+983, 0x5e8829b01a7c06fb
+984, 0x87bdb08cf027f13e
+985, 0xdfa5ede784e802f6
+986, 0x46d03d55711c38cc
+987, 0xa55a961fc9788306
+988, 0xbf09ded495a2e57a
+989, 0xcd601b29a639cc16
+990, 0x2193ce026bfd1085
+991, 0x25ba27f3f225be13
+992, 0x6f685be82f64f2fe
+993, 0xec8454108229c450
+994, 0x6e79d8d205447a44
+995, 0x9ed7b6a96b9ccd68
+996, 0xae7134b3b7f8ee37
+997, 0x66963de0e5ebcc02
+998, 0x29c8dcd0d17c423f
+999, 0xfb8482c827eb90bc
diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py
new file mode 100644
index 000000000..0f57c4bd4
--- /dev/null
+++ b/numpy/random/tests/test_direct.py
@@ -0,0 +1,418 @@
+import os
+from os.path import join
+
+import numpy as np
+from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
+ assert_raises)
+import pytest
+
+from numpy.random import (
+ Generator, MT19937, PCG64, Philox, RandomState, SeedSequence, SFC64,
+ default_rng
+)
+from numpy.random.common import interface
+
+try:
+ import cffi # noqa: F401
+
+ MISSING_CFFI = False
+except ImportError:
+ MISSING_CFFI = True
+
+try:
+ import ctypes # noqa: F401
+
+ MISSING_CTYPES = False
+except ImportError:
+ MISSING_CTYPES = False
+
+pwd = os.path.dirname(os.path.abspath(__file__))
+
+
+def assert_state_equal(actual, target):
+ for key in actual:
+ if isinstance(actual[key], dict):
+ assert_state_equal(actual[key], target[key])
+ elif isinstance(actual[key], np.ndarray):
+ assert_array_equal(actual[key], target[key])
+ else:
+ assert actual[key] == target[key]
+
+
+def uniform32_from_uint64(x):
+ x = np.uint64(x)
+ upper = np.array(x >> np.uint64(32), dtype=np.uint32)
+ lower = np.uint64(0xffffffff)
+ lower = np.array(x & lower, dtype=np.uint32)
+ joined = np.column_stack([lower, upper]).ravel()
+ out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23)
+ return out.astype(np.float32)
+
+
+def uniform32_from_uint53(x):
+ x = np.uint64(x) >> np.uint64(16)
+ x = np.uint32(x & np.uint64(0xffffffff))
+ out = (x >> np.uint32(9)) * (1.0 / 2 ** 23)
+ return out.astype(np.float32)
+
+
+def uniform32_from_uint32(x):
+ return (x >> np.uint32(9)) * (1.0 / 2 ** 23)
+
+
+def uniform32_from_uint(x, bits):
+ if bits == 64:
+ return uniform32_from_uint64(x)
+ elif bits == 53:
+ return uniform32_from_uint53(x)
+ elif bits == 32:
+ return uniform32_from_uint32(x)
+ else:
+ raise NotImplementedError
+
+
+def uniform_from_uint(x, bits):
+ if bits in (64, 63, 53):
+ return uniform_from_uint64(x)
+ elif bits == 32:
+ return uniform_from_uint32(x)
+
+
+def uniform_from_uint64(x):
+ return (x >> np.uint64(11)) * (1.0 / 9007199254740992.0)
+
+
+def uniform_from_uint32(x):
+ out = np.empty(len(x) // 2)
+ for i in range(0, len(x), 2):
+ a = x[i] >> 5
+ b = x[i + 1] >> 6
+ out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
+ return out
+
+
+def uniform_from_dsfmt(x):
+ return x.view(np.double) - 1.0
+
+
+def gauss_from_uint(x, n, bits):
+ if bits in (64, 63):
+ doubles = uniform_from_uint64(x)
+ elif bits == 32:
+ doubles = uniform_from_uint32(x)
+ else: # bits == 'dsfmt'
+ doubles = uniform_from_dsfmt(x)
+ gauss = []
+ loc = 0
+ x1 = x2 = 0.0
+ while len(gauss) < n:
+ r2 = 2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * doubles[loc] - 1.0
+ x2 = 2.0 * doubles[loc + 1] - 1.0
+ r2 = x1 * x1 + x2 * x2
+ loc += 2
+
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ gauss.append(f * x2)
+ gauss.append(f * x1)
+
+ return gauss[:n]
+
+def test_seedsequence():
+ from numpy.random.bit_generator import (ISeedSequence,
+ ISpawnableSeedSequence,
+ SeedlessSeedSequence)
+
+ s1 = SeedSequence(range(10), spawn_key=(1, 2), pool_size=6)
+ s1.spawn(10)
+ s2 = SeedSequence(**s1.state)
+ assert_equal(s1.state, s2.state)
+ assert_equal(s1.n_children_spawned, s2.n_children_spawned)
+
+ # The interfaces cannot be instantiated themselves.
+ assert_raises(TypeError, ISeedSequence)
+ assert_raises(TypeError, ISpawnableSeedSequence)
+ dummy = SeedlessSeedSequence()
+ assert_raises(NotImplementedError, dummy.generate_state, 10)
+ assert len(dummy.spawn(10)) == 10
+
+
+class Base(object):
+ dtype = np.uint64
+ data2 = data1 = {}
+
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.seed_error_type = TypeError
+ cls.invalid_init_types = []
+ cls.invalid_init_values = []
+
+ @classmethod
+ def _read_csv(cls, filename):
+ with open(filename) as csv:
+ seed = csv.readline()
+ seed = seed.split(',')
+ seed = [int(s.strip(), 0) for s in seed[1:]]
+ data = []
+ for line in csv:
+ data.append(int(line.split(',')[-1].strip(), 0))
+ return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
+
+ def test_raw(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ uints = bit_generator.random_raw(1000)
+ assert_equal(uints, self.data1['data'])
+
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ uints = bit_generator.random_raw()
+ assert_equal(uints, self.data1['data'][0])
+
+ bit_generator = self.bit_generator(*self.data2['seed'])
+ uints = bit_generator.random_raw(1000)
+ assert_equal(uints, self.data2['data'])
+
+ def test_random_raw(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ uints = bit_generator.random_raw(output=False)
+ assert uints is None
+ uints = bit_generator.random_raw(1000, output=False)
+ assert uints is None
+
+ def test_gauss_inv(self):
+ n = 25
+ rs = RandomState(self.bit_generator(*self.data1['seed']))
+ gauss = rs.standard_normal(n)
+ assert_allclose(gauss,
+ gauss_from_uint(self.data1['data'], n, self.bits))
+
+ rs = RandomState(self.bit_generator(*self.data2['seed']))
+ gauss = rs.standard_normal(25)
+ assert_allclose(gauss,
+ gauss_from_uint(self.data2['data'], n, self.bits))
+
+ def test_uniform_double(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ vals = uniform_from_uint(self.data1['data'], self.bits)
+ uniforms = rs.random(len(vals))
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float64)
+
+ rs = Generator(self.bit_generator(*self.data2['seed']))
+ vals = uniform_from_uint(self.data2['data'], self.bits)
+ uniforms = rs.random(len(vals))
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float64)
+
+ def test_uniform_float(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ vals = uniform32_from_uint(self.data1['data'], self.bits)
+ uniforms = rs.random(len(vals), dtype=np.float32)
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float32)
+
+ rs = Generator(self.bit_generator(*self.data2['seed']))
+ vals = uniform32_from_uint(self.data2['data'], self.bits)
+ uniforms = rs.random(len(vals), dtype=np.float32)
+ assert_allclose(uniforms, vals)
+ assert_equal(uniforms.dtype, np.float32)
+
+ def test_repr(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ assert 'Generator' in repr(rs)
+ assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') in repr(rs)
+
+ def test_str(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ assert 'Generator' in str(rs)
+ assert str(self.bit_generator.__name__) in str(rs)
+ assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') not in str(rs)
+
+ def test_pickle(self):
+ import pickle
+
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ bitgen_pkl = pickle.dumps(bit_generator)
+ reloaded = pickle.loads(bitgen_pkl)
+ reloaded_state = reloaded.state
+ assert_array_equal(Generator(bit_generator).standard_normal(1000),
+ Generator(reloaded).standard_normal(1000))
+ assert bit_generator is not reloaded
+ assert_state_equal(reloaded_state, state)
+
+ ss = SeedSequence(100)
+ aa = pickle.loads(pickle.dumps(ss))
+ assert_equal(ss.state, aa.state)
+
+ def test_invalid_state_type(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ with pytest.raises(TypeError):
+ bit_generator.state = {'1'}
+
+ def test_invalid_state_value(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ state['bit_generator'] = 'otherBitGenerator'
+ with pytest.raises(ValueError):
+ bit_generator.state = state
+
+ def test_invalid_init_type(self):
+ bit_generator = self.bit_generator
+ for st in self.invalid_init_types:
+ with pytest.raises(TypeError):
+ bit_generator(*st)
+
+ def test_invalid_init_values(self):
+ bit_generator = self.bit_generator
+ for st in self.invalid_init_values:
+ with pytest.raises((ValueError, OverflowError)):
+ bit_generator(*st)
+
+ def test_benchmark(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ bit_generator._benchmark(1)
+ bit_generator._benchmark(1, 'double')
+ with pytest.raises(ValueError):
+ bit_generator._benchmark(1, 'int32')
+
+ @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
+ def test_cffi(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ cffi_interface = bit_generator.cffi
+ assert isinstance(cffi_interface, interface)
+ other_cffi_interface = bit_generator.cffi
+ assert other_cffi_interface is cffi_interface
+
+ @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
+ def test_ctypes(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ ctypes_interface = bit_generator.ctypes
+ assert isinstance(ctypes_interface, interface)
+ other_ctypes_interface = bit_generator.ctypes
+ assert other_ctypes_interface is ctypes_interface
+
+ def test_getstate(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ alt_state = bit_generator.__getstate__()
+ assert_state_equal(state, alt_state)
+
+
+class TestPhilox(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = Philox
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(
+ join(pwd, './data/philox-testset-1.csv'))
+ cls.data2 = cls._read_csv(
+ join(pwd, './data/philox-testset-2.csv'))
+ cls.seed_error_type = TypeError
+ cls.invalid_init_types = []
+ cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]
+
+ def test_set_key(self):
+ bit_generator = self.bit_generator(*self.data1['seed'])
+ state = bit_generator.state
+ keyed = self.bit_generator(counter=state['state']['counter'],
+ key=state['state']['key'])
+ assert_state_equal(bit_generator.state, keyed.state)
+
+
+class TestPCG64(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
+ cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
+ cls.seed_error_type = (ValueError, TypeError)
+ cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
+ cls.invalid_init_values = [(-1,)]
+
+ def test_advance_symmetry(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ state = rs.bit_generator.state
+ step = -0x9e3779b97f4a7c150000000000000000
+ rs.bit_generator.advance(step)
+ val_neg = rs.integers(10)
+ rs.bit_generator.state = state
+ rs.bit_generator.advance(2**128 + step)
+ val_pos = rs.integers(10)
+ rs.bit_generator.state = state
+ rs.bit_generator.advance(10 * 2**128 + step)
+ val_big = rs.integers(10)
+ assert val_neg == val_pos
+ assert val_big == val_pos
+
+
+class TestMT19937(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = MT19937
+ cls.bits = 32
+ cls.dtype = np.uint32
+ cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
+ cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
+ cls.seed_error_type = ValueError
+ cls.invalid_init_types = []
+ cls.invalid_init_values = [(-1,)]
+
+ def test_seed_float_array(self):
+ assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
+ assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
+ assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
+ assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
+ assert_raises(TypeError, self.bit_generator, [np.pi])
+ assert_raises(TypeError, self.bit_generator, [0, np.pi])
+
+ def test_state_tuple(self):
+ rs = Generator(self.bit_generator(*self.data1['seed']))
+ bit_generator = rs.bit_generator
+ state = bit_generator.state
+ desired = rs.integers(2 ** 16)
+ tup = (state['bit_generator'], state['state']['key'],
+ state['state']['pos'])
+ bit_generator.state = tup
+ actual = rs.integers(2 ** 16)
+ assert_equal(actual, desired)
+ tup = tup + (0, 0.0)
+ bit_generator.state = tup
+ actual = rs.integers(2 ** 16)
+ assert_equal(actual, desired)
+
+
+class TestSFC64(Base):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = SFC64
+ cls.bits = 64
+ cls.dtype = np.uint64
+ cls.data1 = cls._read_csv(
+ join(pwd, './data/sfc64-testset-1.csv'))
+ cls.data2 = cls._read_csv(
+ join(pwd, './data/sfc64-testset-2.csv'))
+ cls.seed_error_type = (ValueError, TypeError)
+ cls.invalid_init_types = [(3.2,), ([None],), (1, None)]
+ cls.invalid_init_values = [(-1,)]
+
+
+class TestDefaultRNG(object):
+ def test_seed(self):
+ for args in [(), (None,), (1234,), ([1234, 5678],)]:
+ rg = default_rng(*args)
+ assert isinstance(rg.bit_generator, PCG64)
+
+ def test_passthrough(self):
+ bg = Philox()
+ rg = default_rng(bg)
+ assert rg.bit_generator is bg
+ rg2 = default_rng(rg)
+ assert rg2 is rg
+ assert rg2.bit_generator is bg
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
new file mode 100644
index 000000000..391c33c1a
--- /dev/null
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -0,0 +1,2085 @@
+import sys
+
+import pytest
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_raises, assert_equal,
+ assert_warns, assert_no_warnings, assert_array_equal,
+ assert_array_almost_equal, suppress_warnings)
+
+from numpy.random import Generator, MT19937, SeedSequence
+
+random = Generator(MT19937())
+
+
+@pytest.fixture(scope='module', params=[True, False])
+def endpoint(request):
+ return request.param
+
+
+class TestSeed(object):
+ def test_scalar(self):
+ s = Generator(MT19937(0))
+ assert_equal(s.integers(1000), 479)
+ s = Generator(MT19937(4294967295))
+ assert_equal(s.integers(1000), 324)
+
+ def test_array(self):
+ s = Generator(MT19937(range(10)))
+ assert_equal(s.integers(1000), 465)
+ s = Generator(MT19937(np.arange(10)))
+ assert_equal(s.integers(1000), 465)
+ s = Generator(MT19937([0]))
+ assert_equal(s.integers(1000), 479)
+ s = Generator(MT19937([4294967295]))
+ assert_equal(s.integers(1000), 324)
+
+ def test_seedsequence(self):
+ s = MT19937(SeedSequence(0))
+ assert_equal(s.random_raw(1), 2058676884)
+
+ def test_invalid_scalar(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, MT19937, -0.5)
+ assert_raises(ValueError, MT19937, -1)
+
+ def test_invalid_array(self):
+ # seed must be an unsigned integer
+ assert_raises(TypeError, MT19937, [-0.5])
+ assert_raises(ValueError, MT19937, [-1])
+ assert_raises(ValueError, MT19937, [1, -2, 4294967296])
+
+ def test_noninstantized_bitgen(self):
+ assert_raises(ValueError, Generator, MT19937)
+
+
+class TestBinomial(object):
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ assert_array_equal(random.binomial(zeros, p), zeros)
+
+ def test_p_is_nan(self):
+ # Issue #4571.
+ assert_raises(ValueError, random.binomial, 1, np.nan)
+
+
+class TestMultinomial(object):
+ def test_basic(self):
+ random.multinomial(100, [0.2, 0.8])
+
+ def test_zero_probability(self):
+ random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
+
+ def test_int_negative_interval(self):
+ assert_(-5 <= random.integers(-5, -1) < -1)
+ x = random.integers(-5, -1, 5)
+ assert_(np.all(-5 <= x))
+ assert_(np.all(x < -1))
+
+ def test_size(self):
+ # gh-3173
+ p = [0.5, 0.5]
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
+ (2, 2, 2))
+
+ assert_raises(TypeError, random.multinomial, 1, p,
+ float(1))
+
+ def test_invalid_prob(self):
+ assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
+ assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
+
+ def test_invalid_n(self):
+ assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
+ assert_raises(ValueError, random.multinomial, [-1] * 10, [0.8, 0.2])
+
+ def test_p_non_contiguous(self):
+ p = np.arange(15.)
+ p /= np.sum(p[1::3])
+ pvals = p[1::3]
+ random = Generator(MT19937(1432985819))
+ non_contig = random.multinomial(100, pvals=pvals)
+ random = Generator(MT19937(1432985819))
+ contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
+ assert_array_equal(non_contig, contig)
+
+
+class TestSetState(object):
+ def setup(self):
+ self.seed = 1234567890
+ self.rg = Generator(MT19937(self.seed))
+ self.bit_generator = self.rg.bit_generator
+ self.state = self.bit_generator.state
+ self.legacy_state = (self.state['bit_generator'],
+ self.state['state']['key'],
+ self.state['state']['pos'])
+
+ def test_gaussian_reset(self):
+ # Make sure the cached every-other-Gaussian is reset.
+ old = self.rg.standard_normal(size=3)
+ self.bit_generator.state = self.state
+ new = self.rg.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset_in_media_res(self):
+ # When the state is saved with a cached Gaussian, make sure the
+ # cached Gaussian is restored.
+
+ self.rg.standard_normal()
+ state = self.bit_generator.state
+ old = self.rg.standard_normal(size=3)
+ self.bit_generator.state = state
+ new = self.rg.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_negative_binomial(self):
+ # Ensure that the negative binomial results take floating point
+ # arguments without truncation.
+ self.rg.negative_binomial(0.5, 0.5)
+
+
+class TestIntegers(object):
+ rfunc = random.integers
+
+ # valid integer/boolean types
+ itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self, endpoint):
+ assert_raises(TypeError, self.rfunc, 1, endpoint=endpoint, dtype=float)
+
+ def test_bounds_checking(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, lbnd,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, 0, endpoint=endpoint,
+ dtype=dt)
+
+ assert_raises(ValueError, self.rfunc, [lbnd - 1], ubnd,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [lbnd], [ubnd + 1],
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [ubnd], [lbnd],
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, [0],
+ endpoint=endpoint, dtype=dt)
+
+ def test_bounds_checking_array(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + (not endpoint)
+
+ assert_raises(ValueError, self.rfunc, [lbnd - 1] * 2, [ubnd] * 2,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [lbnd] * 2,
+ [ubnd + 1] * 2, endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, [lbnd] * 2,
+ endpoint=endpoint, dtype=dt)
+ assert_raises(ValueError, self.rfunc, [1] * 2, 0,
+ endpoint=endpoint, dtype=dt)
+
+ def test_rng_zero_and_extremes(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ is_open = not endpoint
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+ assert_equal(self.rfunc([tgt], tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+ assert_equal(self.rfunc(tgt, [tgt + is_open], size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd) // 2
+ assert_equal(self.rfunc(tgt, tgt + is_open, size=1000,
+ endpoint=endpoint, dtype=dt), tgt)
+ assert_equal(self.rfunc([tgt], [tgt + is_open],
+ size=1000, endpoint=endpoint, dtype=dt),
+ tgt)
+
+ def test_rng_zero_and_extremes_array(self, endpoint):
+ size = 1000
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc([tgt], [tgt + 1],
+ size=size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc([tgt], [tgt + 1],
+ size=size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd) // 2
+ assert_equal(self.rfunc([tgt], [tgt + 1],
+ size=size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, dtype=dt), tgt)
+ assert_equal(self.rfunc(
+ [tgt] * size, [tgt + 1] * size, size=size, dtype=dt), tgt)
+
+ def test_full_range(self, endpoint):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ try:
+ self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_full_range_array(self, endpoint):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ try:
+ self.rfunc([lbnd] * 2, [ubnd], endpoint=endpoint, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_in_bounds_fuzz(self, endpoint):
+ # Don't use fixed seed
+ random = Generator(MT19937())
+
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd - endpoint, size=2 ** 16,
+ endpoint=endpoint, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+
+ vals = self.rfunc(0, 2 - endpoint, size=2 ** 16, endpoint=endpoint,
+ dtype=bool)
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_scalar_array_equiv(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ size = 1000
+ random = Generator(MT19937(1234))
+ scalar = random.integers(lbnd, ubnd, size=size, endpoint=endpoint,
+ dtype=dt)
+
+ random = Generator(MT19937(1234))
+ scalar_array = random.integers([lbnd], [ubnd], size=size,
+ endpoint=endpoint, dtype=dt)
+
+ random = Generator(MT19937(1234))
+ array = random.integers([lbnd] * size, [ubnd] *
+ size, size=size, endpoint=endpoint, dtype=dt)
+ assert_array_equal(scalar, scalar_array)
+ assert_array_equal(scalar, array)
+
+ def test_repeatability(self, endpoint):
+ import hashlib
+ # We use a md5 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': 'b3300e66d2bb59e493d255d47c3a6cbe',
+ 'int16': '39624ead49ad67e37545744024d2648b',
+ 'int32': '5c4810373f979336c6c0c999996e47a1',
+ 'int64': 'ab126c15edff26f55c50d2b7e37391ac',
+ 'int8': 'd1746364b48a020dab9ef0568e6c0cd2',
+ 'uint16': '39624ead49ad67e37545744024d2648b',
+ 'uint32': '5c4810373f979336c6c0c999996e47a1',
+ 'uint64': 'ab126c15edff26f55c50d2b7e37391ac',
+ 'uint8': 'd1746364b48a020dab9ef0568e6c0cd2'}
+
+ for dt in self.itype[1:]:
+ random = Generator(MT19937(1234))
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
+ dtype=dt)
+ else:
+ val = random.integers(0, 6 - endpoint, size=1000, endpoint=endpoint,
+ dtype=dt).byteswap()
+
+ res = hashlib.md5(val.view(np.int8)).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianness
+ random = Generator(MT19937(1234))
+ val = random.integers(0, 2 - endpoint, size=1000, endpoint=endpoint,
+ dtype=bool).view(np.int8)
+ res = hashlib.md5(val).hexdigest()
+ assert_(tgt[np.dtype(bool).name] == res)
+
+ def test_repeatability_broadcasting(self, endpoint):
+ for dt in self.itype:
+ lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
+ ubnd = 2 if dt in (
+ np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ # view as little endian for hash
+ random = Generator(MT19937(1234))
+ val = random.integers(lbnd, ubnd, size=1000, endpoint=endpoint,
+ dtype=dt)
+
+ random = Generator(MT19937(1234))
+ val_bc = random.integers([lbnd] * 1000, ubnd, endpoint=endpoint,
+ dtype=dt)
+
+ assert_array_equal(val, val_bc)
+
+ random = Generator(MT19937(1234))
+ val_bc = random.integers([lbnd] * 1000, [ubnd] * 1000,
+ endpoint=endpoint, dtype=dt)
+
+ assert_array_equal(val, val_bc)
+
+ def test_int64_uint64_broadcast_exceptions(self, endpoint):
+ configs = {np.uint64: ((0, 2**65), (-1, 2**62), (10, 9), (0, 0)),
+ np.int64: ((0, 2**64), (-(2**64), 2**62), (10, 9), (0, 0),
+ (-2**63-1, -2**63-1))}
+ for dtype in configs:
+ for config in configs[dtype]:
+ low, high = config
+ high = high - endpoint
+ low_a = np.array([[low]*10])
+ high_a = np.array([high] * 10)
+ assert_raises(ValueError, random.integers, low, high,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low_a, high,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low, high_a,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low_a, high_a,
+ endpoint=endpoint, dtype=dtype)
+
+ low_o = np.array([[low]*10], dtype=np.object)
+ high_o = np.array([high] * 10, dtype=np.object)
+ assert_raises(ValueError, random.integers, low_o, high,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low, high_o,
+ endpoint=endpoint, dtype=dtype)
+ assert_raises(ValueError, random.integers, low_o, high_o,
+ endpoint=endpoint, dtype=dtype)
+
+ def test_int64_uint64_corner_case(self, endpoint):
+ # When stored in Numpy arrays, `lbnd` is casted
+ # as np.int64, and `ubnd` is casted as np.uint64.
+ # Checking whether `lbnd` >= `ubnd` used to be
+ # done solely via direct comparison, which is incorrect
+ # because when Numpy tries to compare both numbers,
+ # it casts both to np.float64 because there is
+ # no integer superset of np.int64 and np.uint64. However,
+ # `ubnd` is too large to be represented in np.float64,
+ # causing it be round down to np.iinfo(np.int64).max,
+ # leading to a ValueError because `lbnd` now equals
+ # the new `ubnd`.
+
+ dt = np.int64
+ tgt = np.iinfo(np.int64).max
+ lbnd = np.int64(np.iinfo(np.int64).max)
+ ubnd = np.uint64(np.iinfo(np.int64).max + 1 - endpoint)
+
+ # None of these function calls should
+ # generate a ValueError now.
+ actual = random.integers(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ assert_equal(actual, tgt)
+
+ def test_respect_dtype_singleton(self, endpoint):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ dt = np.bool_ if dt is bool else dt
+
+ sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ assert_equal(sample.dtype, dt)
+
+ for dt in (bool, int, np.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+
+ # gh-7284: Ensure that we get Python data types
+ sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
+ assert not hasattr(sample, 'dtype')
+ assert_equal(type(sample), dt)
+
+ def test_respect_dtype_array(self, endpoint):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+ ubnd = ubnd - 1 if endpoint else ubnd
+ dt = np.bool_ if dt is bool else dt
+
+ sample = self.rfunc([lbnd], [ubnd], endpoint=endpoint, dtype=dt)
+ assert_equal(sample.dtype, dt)
+ sample = self.rfunc([lbnd] * 2, [ubnd] * 2, endpoint=endpoint,
+ dtype=dt)
+ assert_equal(sample.dtype, dt)
+
+ def test_zero_size(self, endpoint):
+ # See gh-7203
+ for dt in self.itype:
+ sample = self.rfunc(0, 0, (3, 0, 4), endpoint=endpoint, dtype=dt)
+ assert sample.shape == (3, 0, 4)
+ assert sample.dtype == dt
+ assert self.rfunc(0, -10, 0, endpoint=endpoint,
+ dtype=dt).shape == (0,)
+ assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_equal(random.integers(0, -10, size=0).shape, (0,))
+ assert_equal(random.integers(10, 10, size=0).shape, (0,))
+
+ def test_error_byteorder(self):
+ other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
+ with pytest.raises(ValueError):
+ random.integers(0, 200, size=10, dtype=other_byteord_dt)
+
+
+class TestRandomDist(object):
+ # Make sure the random distribution returns the correct value for a
+ # given seed
+
+ def setup(self):
+ self.seed = 1234567890
+
+ def test_integers(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.integers(-99, 99, size=(3, 2))
+ desired = np.array([[-80, -56], [41, 37], [-83, -16]])
+ assert_array_equal(actual, desired)
+
+ def test_integers_masked(self):
+ # Test masked rejection sampling algorithm to generate array of
+ # uint32 in an interval.
+ random = Generator(MT19937(self.seed))
+ actual = random.integers(0, 99, size=(3, 2), dtype=np.uint32)
+ desired = np.array([[9, 21], [70, 68], [8, 41]], dtype=np.uint32)
+ assert_array_equal(actual, desired)
+
+ def test_integers_closed(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.integers(-99, 99, size=(3, 2), endpoint=True)
+ desired = np.array([[-80, -56], [ 41, 38], [-83, -15]])
+ assert_array_equal(actual, desired)
+
+ def test_integers_max_int(self):
+ # Tests whether integers with closed=True can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ actual = random.integers(np.iinfo('l').max, np.iinfo('l').max,
+ endpoint=True)
+
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
+
+ def test_random(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.random((3, 2))
+ desired = np.array([[0.096999199829214, 0.707517457682192],
+ [0.084364834598269, 0.767731206553125],
+ [0.665069021359413, 0.715487190596693]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.random()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_random_float(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.random((3, 2))
+ desired = np.array([[0.0969992 , 0.70751746],
+ [0.08436483, 0.76773121],
+ [0.66506902, 0.71548719]])
+ assert_array_almost_equal(actual, desired, decimal=7)
+
+ def test_random_float_scalar(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.random(dtype=np.float32)
+ desired = 0.0969992
+ assert_array_almost_equal(actual, desired, decimal=7)
+
+ def test_random_unsupported_type(self):
+ assert_raises(TypeError, random.random, dtype='int32')
+
+ def test_choice_uniform_replace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 4)
+ desired = np.array([0, 0, 2, 2], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_replace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ desired = np.array([0, 1, 0, 1], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_uniform_noreplace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 3, replace=False)
+ desired = np.array([2, 0, 3], dtype=np.int64)
+ assert_array_equal(actual, desired)
+ actual = random.choice(4, 4, replace=False, shuffle=False)
+ desired = np.arange(4, dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_noreplace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
+ desired = np.array([0, 2, 3], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ def test_choice_noninteger(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(['a', 'b', 'c', 'd'], 4)
+ desired = np.array(['a', 'a', 'c', 'c'])
+ assert_array_equal(actual, desired)
+
+ def test_choice_multidimensional_default_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 3)
+ desired = np.array([[0, 1], [0, 1], [4, 5]])
+ assert_array_equal(actual, desired)
+
+ def test_choice_multidimensional_custom_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.choice([[0, 1], [2, 3], [4, 5], [6, 7]], 1, axis=1)
+ desired = np.array([[0], [2], [4], [6]])
+ assert_array_equal(actual, desired)
+
+ def test_choice_exceptions(self):
+ sample = random.choice
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
+ assert_raises(ValueError, sample, [], 3)
+ assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
+ p=[[0.25, 0.25], [0.25, 0.25]])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
+ assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], 2,
+ replace=False, p=[1, 0, 0])
+
+ def test_choice_return_shape(self):
+ p = [0.1, 0.9]
+ # Check scalar
+ assert_(np.isscalar(random.choice(2, replace=True)))
+ assert_(np.isscalar(random.choice(2, replace=False)))
+ assert_(np.isscalar(random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(random.choice([1, 2], replace=True)))
+ assert_(random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
+ assert_(random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2, 3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_equal(random.choice(6, s, replace=True).shape, s)
+ assert_equal(random.choice(6, s, replace=False).shape, s)
+ assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
+ assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
+ assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
+
+ # Check zero-size
+ assert_equal(random.integers(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(random.integers(0, -10, size=0).shape, (0,))
+ assert_equal(random.integers(10, 10, size=0).shape, (0,))
+ assert_equal(random.choice(0, size=0).shape, (0,))
+ assert_equal(random.choice([], size=(0,)).shape, (0,))
+ assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_raises(ValueError, random.choice, [], 10)
+
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, random.choice, a, p=p)
+
+ def test_choice_p_non_contiguous(self):
+ p = np.ones(10) / 5
+ p[1::2] = 3.0
+ random = Generator(MT19937(self.seed))
+ non_contig = random.choice(5, 3, p=p[::2])
+ random = Generator(MT19937(self.seed))
+ contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
+ assert_array_equal(non_contig, contig)
+
+ def test_choice_return_type(self):
+ # gh 9867
+ p = np.ones(4) / 4.
+ actual = random.choice(4, 2)
+ assert actual.dtype == np.int64
+ actual = random.choice(4, 2, replace=False)
+ assert actual.dtype == np.int64
+ actual = random.choice(4, 2, p=p)
+ assert actual.dtype == np.int64
+ actual = random.choice(4, 2, p=p, replace=False)
+ assert actual.dtype == np.int64
+
+ def test_choice_large_sample(self):
+ import hashlib
+
+ choice_hash = 'd44962a0b1e92f4a3373c23222244e21'
+ random = Generator(MT19937(self.seed))
+ actual = random.choice(10000, 5000, replace=False)
+ if sys.byteorder != 'little':
+ actual = actual.byteswap()
+ res = hashlib.md5(actual.view(np.int8)).hexdigest()
+ assert_(choice_hash == res)
+
+ def test_bytes(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.bytes(10)
+ desired = b'\x86\xf0\xd4\x18\xe1\x81\t8%\xdd'
+ assert_equal(actual, desired)
+
+ def test_shuffle(self):
+ # Test lists, arrays (of various dtypes), and multidimensional versions
+ # of both, c-contiguous or not:
+ for conv in [lambda x: np.array([]),
+ lambda x: x,
+ lambda x: np.asarray(x).astype(np.int8),
+ lambda x: np.asarray(x).astype(np.float32),
+ lambda x: np.asarray(x).astype(np.complex64),
+ lambda x: np.asarray(x).astype(object),
+ lambda x: [(i, i) for i in x],
+ lambda x: np.asarray([[i, i] for i in x]),
+ lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
+ # gh-4270
+ lambda x: np.asarray([(i, i) for i in x],
+ [("a", object, (1,)),
+ ("b", np.int32, (1,))])]:
+ random = Generator(MT19937(self.seed))
+ alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
+ random.shuffle(alist)
+ actual = alist
+ desired = conv([4, 1, 9, 8, 0, 5, 3, 6, 2, 7])
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_custom_axis(self):
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=1)
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = np.arange(16).reshape((4, 4))
+ random.shuffle(actual, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_axis_nonsquare(self):
+ y1 = np.arange(20).reshape(2, 10)
+ y2 = y1.copy()
+ random = Generator(MT19937(self.seed))
+ random.shuffle(y1, axis=1)
+ random = Generator(MT19937(self.seed))
+ random.shuffle(y2.T)
+ assert_array_equal(y1, y2)
+
+ def test_shuffle_masked(self):
+ # gh-3263
+ a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
+ b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
+ a_orig = a.copy()
+ b_orig = b.copy()
+ for i in range(50):
+ random.shuffle(a)
+ assert_equal(
+ sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
+ random.shuffle(b)
+ assert_equal(
+ sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+
+ def test_shuffle_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.shuffle, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.shuffle, arr, 3)
+ assert_raises(TypeError, random.shuffle, arr, slice(1, 2, None))
+ arr = [[1, 2, 3], [4, 5, 6]]
+ assert_raises(NotImplementedError, random.shuffle, arr, 1)
+
+ def test_permutation(self):
+ random = Generator(MT19937(self.seed))
+ alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
+ actual = random.permutation(alist)
+ desired = [4, 1, 9, 8, 0, 5, 3, 6, 2, 7]
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
+ actual = random.permutation(arr_2d)
+ assert_array_equal(actual, np.atleast_2d(desired).T)
+
+ bad_x_str = "abcd"
+ assert_raises(np.AxisError, random.permutation, bad_x_str)
+
+ bad_x_float = 1.2
+ assert_raises(np.AxisError, random.permutation, bad_x_float)
+
+ random = Generator(MT19937(self.seed))
+ integer_val = 10
+ desired = [3, 0, 8, 7, 9, 4, 2, 5, 1, 6]
+
+ actual = random.permutation(integer_val)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_custom_axis(self):
+ a = np.arange(16).reshape((4, 4))
+ desired = np.array([[ 0, 3, 1, 2],
+ [ 4, 7, 5, 6],
+ [ 8, 11, 9, 10],
+ [12, 15, 13, 14]])
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=1)
+ assert_array_equal(actual, desired)
+ random = Generator(MT19937(self.seed))
+ actual = random.permutation(a, axis=-1)
+ assert_array_equal(actual, desired)
+
+ def test_permutation_exceptions(self):
+ random = Generator(MT19937(self.seed))
+ arr = np.arange(10)
+ assert_raises(np.AxisError, random.permutation, arr, 1)
+ arr = np.arange(9).reshape((3, 3))
+ assert_raises(np.AxisError, random.permutation, arr, 3)
+ assert_raises(TypeError, random.permutation, arr, slice(1, 2, None))
+
+ def test_beta(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.beta(.1, .9, size=(3, 2))
+ desired = np.array(
+ [[1.083029353267698e-10, 2.449965303168024e-11],
+ [2.397085162969853e-02, 3.590779671820755e-08],
+ [2.830254190078299e-04, 1.744709918330393e-01]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_binomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.binomial(100.123, .456, size=(3, 2))
+ desired = np.array([[42, 41],
+ [42, 48],
+ [44, 50]])
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.binomial(100.123, .456)
+ desired = 42
+ assert_array_equal(actual, desired)
+
+ def test_chisquare(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.chisquare(50, size=(3, 2))
+ desired = np.array([[32.9850547060149, 39.0219480493301],
+ [56.2006134779419, 57.3474165711485],
+ [55.4243733880198, 55.4209797925213]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_dirichlet(self):
+ random = Generator(MT19937(self.seed))
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.5439892869558927, 0.45601071304410745],
+ [0.5588917345860708, 0.4411082654139292 ]],
+ [[0.5632074165063435, 0.43679258349365657],
+ [0.54862581112627, 0.45137418887373015]],
+ [[0.49961831357047226, 0.5003816864295278 ],
+ [0.52374806183482, 0.47625193816517997]]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+ bad_alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, bad_alpha)
+
+ random = Generator(MT19937(self.seed))
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha)
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_dirichlet_size(self):
+ # gh-3173
+ p = np.array([51.72840233779265162, 39.74494232180943953])
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+
+ assert_raises(TypeError, random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, alpha)
+
+ def test_dirichlet_alpha_non_contiguous(self):
+ a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
+ alpha = a[::2]
+ random = Generator(MT19937(self.seed))
+ non_contig = random.dirichlet(alpha, size=(3, 2))
+ random = Generator(MT19937(self.seed))
+ contig = random.dirichlet(np.ascontiguousarray(alpha),
+ size=(3, 2))
+ assert_array_almost_equal(non_contig, contig)
+
+ def test_exponential(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[0.098845481066258, 1.560752510746964],
+ [0.075730916041636, 1.769098974710777],
+ [1.488602544592235, 2.49684815275751 ]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_exponential_0(self):
+ assert_equal(random.exponential(scale=0), 0)
+ assert_raises(ValueError, random.exponential, scale=-0.)
+
+ def test_f(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.f(12, 77, size=(3, 2))
+ desired = np.array([[0.461720027077085, 1.100441958872451],
+ [1.100337455217484, 0.91421736740018 ],
+ [0.500811891303113, 0.826802454552058]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gamma(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[ 5.03850858902096, 7.9228656732049 ],
+ [18.73983605132985, 19.57961681699238],
+ [18.17897755150825, 18.17653912505234]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_gamma_0(self):
+ assert_equal(random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
+
+ def test_geometric(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.geometric(.123456789, size=(3, 2))
+ desired = np.array([[ 1, 10],
+ [ 1, 12],
+ [ 9, 10]])
+ assert_array_equal(actual, desired)
+
+ def test_geometric_exceptions(self):
+ assert_raises(ValueError, random.geometric, 1.1)
+ assert_raises(ValueError, random.geometric, [1.1] * 10)
+ assert_raises(ValueError, random.geometric, -0.1)
+ assert_raises(ValueError, random.geometric, [-0.1] * 10)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.geometric, np.nan)
+ assert_raises(ValueError, random.geometric, [np.nan] * 10)
+
+ def test_gumbel(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[ 4.688397515056245, -0.289514845417841],
+ [ 4.981176042584683, -0.633224272589149],
+ [-0.055915275687488, -0.333962478257953]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gumbel_0(self):
+ assert_equal(random.gumbel(scale=0), 0)
+ assert_raises(ValueError, random.gumbel, scale=-0.)
+
+ def test_hypergeometric(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ desired = np.array([[ 9, 9],
+ [ 9, 9],
+ [10, 9]])
+ assert_array_equal(actual, desired)
+
+ # Test nbad = 0
+ actual = random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ def test_laplace(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-3.156353949272393, 1.195863024830054],
+ [-3.435458081645966, 1.656882398925444],
+ [ 0.924824032467446, 1.251116432209336]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_laplace_0(self):
+ assert_equal(random.laplace(scale=0), 0)
+ assert_raises(ValueError, random.laplace, scale=-0.)
+
+ def test_logistic(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-4.338584631510999, 1.890171436749954],
+ [-4.64547787337966 , 2.514545562919217],
+ [ 1.495389489198666, 1.967827627577474]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_lognormal(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[ 0.0268252166335, 13.9534486483053],
+ [ 0.1204014788936, 2.2422077497792],
+ [ 4.2484199496128, 12.0093343977523]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_lognormal_0(self):
+ assert_equal(random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, random.lognormal, sigma=-0.)
+
+ def test_logseries(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.logseries(p=.923456789, size=(3, 2))
+ desired = np.array([[14, 17],
+ [3, 18],
+ [5, 1]])
+ assert_array_equal(actual, desired)
+
+ def test_logseries_exceptions(self):
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.logseries, np.nan)
+ assert_raises(ValueError, random.logseries, [np.nan] * 10)
+
+ def test_multinomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
+ desired = np.array([[[1, 5, 1, 6, 4, 3],
+ [4, 2, 6, 2, 4, 2]],
+ [[5, 3, 2, 6, 3, 1],
+ [4, 4, 0, 2, 3, 7]],
+ [[6, 3, 1, 5, 3, 2],
+ [5, 5, 3, 1, 2, 4]]])
+ assert_array_equal(actual, desired)
+
+ def test_multivariate_normal(self):
+ random = Generator(MT19937(self.seed))
+ mean = (.123456789, 10)
+ cov = [[1, 0], [0, 1]]
+ size = (3, 2)
+ actual = random.multivariate_normal(mean, cov, size)
+ desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
+ [-0.9967333370066214, 10.342002097029821 ]],
+ [[ 0.7850019631242964, 11.181113712443013 ],
+ [ 0.8901349653255224, 8.873825399642492 ]],
+ [[ 0.7130260107430003, 9.551628690083056 ],
+ [ 0.7127098726541128, 11.991709234143173 ]]])
+
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check for default size, was raising deprecation warning
+ actual = random.multivariate_normal(mean, cov)
+ desired = np.array([0.233278563284287, 9.424140804347195])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check that non positive-semidefinite covariance warns with
+ # RuntimeWarning
+ mean = [0, 0]
+ cov = [[1, 2], [2, 1]]
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+
+ # and that it doesn't warn with RuntimeWarning check_valid='ignore'
+ assert_no_warnings(random.multivariate_normal, mean, cov,
+ check_valid='ignore')
+
+ # and that it raises with RuntimeWarning check_valid='raises'
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
+
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ mu = np.zeros(2)
+ cov = np.eye(2)
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='other')
+ assert_raises(ValueError, random.multivariate_normal,
+ np.zeros((2, 1, 1)), cov)
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.empty((3, 2)))
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.eye(3))
+
+ def test_negative_binomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[543, 727],
+ [775, 760],
+ [600, 674]])
+ assert_array_equal(actual, desired)
+
+ def test_negative_binomial_exceptions(self):
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.negative_binomial, 100, np.nan)
+ assert_raises(ValueError, random.negative_binomial, 100,
+ [np.nan] * 10)
+
+ def test_noncentral_chisquare(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[ 1.70561552362133, 15.97378184942111],
+ [13.71483425173724, 20.17859633310629],
+ [11.3615477156643 , 3.67891108738029]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[9.41427665607629e-04, 1.70473157518850e-04],
+ [1.14554372041263e+00, 1.38187755933435e-03],
+ [1.90659181905387e+00, 1.21772577941822e+00]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[0.82947954590419, 1.80139670767078],
+ [6.58720057417794, 7.00491463609814],
+ [6.31101879073157, 6.30982307753005]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ size=(3, 2))
+ desired = np.array([[0.060310671139 , 0.23866058175939],
+ [0.86860246709073, 0.2668510459738 ],
+ [0.23375780078364, 1.88922102885943]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f_nan(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
+ assert np.isnan(actual)
+
+ def test_normal(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-3.618412914693162, 2.635726692647081],
+ [-2.116923463013243, 0.807460983059643],
+ [ 1.446547137248593, 2.485684213886024]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_normal_0(self):
+ assert_equal(random.normal(scale=0), 0)
+ assert_raises(ValueError, random.normal, scale=-0.)
+
+ def test_pareto(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array([[1.0394926776069018e+00, 7.7142534343505773e+04],
+ [7.2640150889064703e-01, 3.4650454783825594e+05],
+ [4.5852344481994740e+04, 6.5851383009539105e+07]])
+ # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
+ # matrix differs by 24 nulps. Discussion:
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # Consensus is that this is probably some gcc quirk that affects
+ # rounding but not in any important way, so we just use a looser
+ # tolerance on this test:
+ np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
+
+ def test_poisson(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.poisson(lam=.123456789, size=(3, 2))
+ desired = np.array([[0, 0],
+ [0, 0],
+ [0, 0]])
+ assert_array_equal(actual, desired)
+
+ def test_poisson_exceptions(self):
+ lambig = np.iinfo('int64').max
+ lamneg = -1
+ assert_raises(ValueError, random.poisson, lamneg)
+ assert_raises(ValueError, random.poisson, [lamneg] * 10)
+ assert_raises(ValueError, random.poisson, lambig)
+ assert_raises(ValueError, random.poisson, [lambig] * 10)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, random.poisson, np.nan)
+ assert_raises(ValueError, random.poisson, [np.nan] * 10)
+
+ def test_power(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[1.977857368842754e-09, 9.806792196620341e-02],
+ [2.482442984543471e-10, 1.527108843266079e-01],
+ [8.188283434244285e-02, 3.950547209346948e-01]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rayleigh(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.rayleigh(scale=10, size=(3, 2))
+ desired = np.array([[ 4.51734079831581, 15.6802442485758 ],
+ [ 4.19850651287094, 17.08718809823704],
+ [14.7907457708776 , 15.85545333419775]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_rayleigh_0(self):
+ assert_equal(random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, random.rayleigh, scale=-0.)
+
+ def test_standard_cauchy(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_cauchy(size=(3, 2))
+ desired = np.array([[-1.489437778266206, -3.275389641569784],
+ [ 0.560102864910406, -0.680780916282552],
+ [-1.314912905226277, 0.295852965660225]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_exponential(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_exponential(size=(3, 2), method='inv')
+ desired = np.array([[0.102031839440643, 1.229350298474972],
+ [0.088137284693098, 1.459859985522667],
+ [1.093830802293668, 1.256977002164613]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_expoential_type_error(self):
+ assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
+
+ def test_standard_gamma(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[0.62970724056362, 1.22379851271008],
+ [3.899412530884 , 4.12479964250139],
+ [3.74994102464584, 3.74929307690815]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_standard_gammma_scalar_float(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_gamma(3, dtype=np.float32)
+ desired = 2.9242148399353027
+ assert_array_almost_equal(actual, desired, decimal=6)
+
+ def test_standard_gamma_float(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[0.62971, 1.2238 ],
+ [3.89941, 4.1248 ],
+ [3.74994, 3.74929]])
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ def test_standard_gammma_float_out(self):
+ actual = np.zeros((3, 2), dtype=np.float32)
+ random = Generator(MT19937(self.seed))
+ random.standard_gamma(10.0, out=actual, dtype=np.float32)
+ desired = np.array([[10.14987, 7.87012],
+ [ 9.46284, 12.56832],
+ [13.82495, 7.81533]], dtype=np.float32)
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ random = Generator(MT19937(self.seed))
+ random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ def test_standard_gamma_unknown_type(self):
+ assert_raises(TypeError, random.standard_gamma, 1.,
+ dtype='int32')
+
+ def test_out_size_mismatch(self):
+ out = np.zeros(10)
+ assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
+ out=out)
+ assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
+ out=out)
+
+ def test_standard_gamma_0(self):
+ assert_equal(random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, random.standard_gamma, shape=-0.)
+
+ def test_standard_normal(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_normal(size=(3, 2))
+ desired = np.array([[-1.870934851846581, 1.25613495182354 ],
+ [-1.120190126006621, 0.342002097029821],
+ [ 0.661545174124296, 1.181113712443012]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_normal_unsupported_type(self):
+ assert_raises(TypeError, random.standard_normal, dtype=np.int32)
+
+ def test_standard_t(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[-1.484666193042647, 0.30597891831161 ],
+ [ 1.056684299648085, -0.407312602088507],
+ [ 0.130704414281157, -2.038053410490321]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_triangular(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
+ desired = np.array([[ 7.86664070590917, 13.6313848513185 ],
+ [ 7.68152445215983, 14.36169131136546],
+ [13.16105603911429, 13.72341621856971]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_uniform(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
+ desired = np.array([[2.13306255040998 , 7.816987531021207],
+ [2.015436610109887, 8.377577533009589],
+ [7.421792588856135, 7.891185744455209]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_uniform_range_bounds(self):
+ fmin = np.finfo('float').min
+ fmax = np.finfo('float').max
+
+ func = random.uniform
+ assert_raises(OverflowError, func, -np.inf, 0)
+ assert_raises(OverflowError, func, 0, np.inf)
+ assert_raises(OverflowError, func, fmin, fmax)
+ assert_raises(OverflowError, func, [-np.inf], [0])
+ assert_raises(OverflowError, func, [0], [np.inf])
+
+ # (fmax / 1e17) - fmin is within range, so this should not throw
+ # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
+ # DBL_MAX by increasing fmin a bit
+ random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+
+ def test_scalar_exception_propagation(self):
+ # Tests that exceptions are correctly propagated in distributions
+ # when called with objects that throw exceptions when converted to
+ # scalars.
+ #
+ # Regression test for gh: 8865
+
+ class ThrowingFloat(np.ndarray):
+ def __float__(self):
+ raise TypeError
+
+ throwing_float = np.array(1.0).view(ThrowingFloat)
+ assert_raises(TypeError, random.uniform, throwing_float,
+ throwing_float)
+
+ class ThrowingInteger(np.ndarray):
+ def __int__(self):
+ raise TypeError
+
+ throwing_int = np.array(1).view(ThrowingInteger)
+ assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
+
+ def test_vonmises(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ desired = np.array([[ 1.107972248690106, 2.841536476232361],
+ [ 1.832602376042457, 1.945511926976032],
+ [-0.260147475776542, 2.058047492231698]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_vonmises_small(self):
+ # check infinite loop, gh-4720
+ random = Generator(MT19937(self.seed))
+ r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
+ assert_(np.isfinite(r).all())
+
+ def test_vonmises_nan(self):
+ random = Generator(MT19937(self.seed))
+ r = random.vonmises(mu=0., kappa=np.nan)
+ assert_(np.isnan(r))
+
+ def test_wald(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[0.26871721804551, 3.2233942732115 ],
+ [2.20328374987066, 2.40958405189353],
+ [2.07093587449261, 0.73073890064369]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_weibull(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[0.138613914769468, 1.306463419753191],
+ [0.111623365934763, 1.446570494646721],
+ [1.257145775276011, 1.914247725027957]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_weibull_0(self):
+ random = Generator(MT19937(self.seed))
+ assert_equal(random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, random.weibull, a=-0.)
+
+ def test_zipf(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.zipf(a=1.23, size=(3, 2))
+ desired = np.array([[ 1, 1],
+ [ 10, 867],
+ [354, 2]])
+ assert_array_equal(actual, desired)
+
+
+class TestBroadcast(object):
+ # tests that functions that broadcast behave
+ # correctly when presented with non-scalar arguments
+ def setup(self):
+ self.seed = 123456789
+
+
+ def test_uniform(self):
+ random = Generator(MT19937(self.seed))
+ low = [0]
+ high = [1]
+ uniform = random.uniform
+ desired = np.array([0.16693771389729, 0.19635129550675, 0.75563050964095])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.uniform(low * 3, high)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.uniform(low, high * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ random = Generator(MT19937(self.seed))
+ desired = np.array([-0.38736406738527, 0.79594375042255, 0.0197076236097])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.normal(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.normal, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ normal = random.normal
+ actual = normal(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc, bad_scale * 3)
+
+ def test_beta(self):
+ a = [1]
+ b = [2]
+ bad_a = [-1]
+ bad_b = [-2]
+ desired = np.array([0.18719338682602, 0.73234824491364, 0.17928615186455])
+
+ random = Generator(MT19937(self.seed))
+ beta = random.beta
+ actual = beta(a * 3, b)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a * 3, b)
+ assert_raises(ValueError, beta, a * 3, bad_b)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.beta(a, b * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_exponential(self):
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.exponential(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.exponential, bad_scale * 3)
+
+ def test_standard_gamma(self):
+ shape = [1]
+ bad_shape = [-1]
+ desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
+
+ random = Generator(MT19937(self.seed))
+ std_gamma = random.standard_gamma
+ actual = std_gamma(shape * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, std_gamma, bad_shape * 3)
+
+ def test_gamma(self):
+ shape = [1]
+ scale = [2]
+ bad_shape = [-1]
+ bad_scale = [-2]
+ desired = np.array([1.34491986425611, 0.42760990636187, 1.4355697857258])
+
+ random = Generator(MT19937(self.seed))
+ gamma = random.gamma
+ actual = gamma(shape * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape * 3, scale)
+ assert_raises(ValueError, gamma, shape * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ gamma = random.gamma
+ actual = gamma(shape, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape, scale * 3)
+ assert_raises(ValueError, gamma, shape, bad_scale * 3)
+
+ def test_f(self):
+ dfnum = [1]
+ dfden = [2]
+ bad_dfnum = [-1]
+ bad_dfden = [-2]
+ desired = np.array([0.07765056244107, 7.72951397913186, 0.05786093891763])
+
+ random = Generator(MT19937(self.seed))
+ f = random.f
+ actual = f(dfnum * 3, dfden)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum * 3, dfden)
+ assert_raises(ValueError, f, dfnum * 3, bad_dfden)
+
+ random = Generator(MT19937(self.seed))
+ f = random.f
+ actual = f(dfnum, dfden * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum, dfden * 3)
+ assert_raises(ValueError, f, dfnum, bad_dfden * 3)
+
+ def test_noncentral_f(self):
+ dfnum = [2]
+ dfden = [3]
+ nonc = [4]
+ bad_dfnum = [0]
+ bad_dfden = [-1]
+ bad_nonc = [-2]
+ desired = np.array([2.02434240411421, 12.91838601070124, 1.24395160354629])
+
+ random = Generator(MT19937(self.seed))
+ nonc_f = random.noncentral_f
+ actual = nonc_f(dfnum * 3, dfden, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
+
+ assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
+
+ random = Generator(MT19937(self.seed))
+ nonc_f = random.noncentral_f
+ actual = nonc_f(dfnum, dfden * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
+
+ random = Generator(MT19937(self.seed))
+ nonc_f = random.noncentral_f
+ actual = nonc_f(dfnum, dfden, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
+
+ def test_noncentral_f_small_df(self):
+ random = Generator(MT19937(self.seed))
+ desired = np.array([0.04714867120827, 0.1239390327694])
+ actual = random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_chisquare(self):
+ df = [1]
+ bad_df = [-1]
+ desired = np.array([0.05573640064251, 1.47220224353539, 2.9469379318589])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.chisquare(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.chisquare, bad_df * 3)
+
+ def test_noncentral_chisquare(self):
+ df = [1]
+ nonc = [2]
+ bad_df = [-1]
+ bad_nonc = [-2]
+ desired = np.array([0.07710766249436, 5.27829115110304, 0.630732147399])
+
+ random = Generator(MT19937(self.seed))
+ nonc_chi = random.noncentral_chisquare
+ actual = nonc_chi(df * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
+ assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
+
+ random = Generator(MT19937(self.seed))
+ nonc_chi = random.noncentral_chisquare
+ actual = nonc_chi(df, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
+ assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
+
+ def test_standard_t(self):
+ df = [1]
+ bad_df = [-1]
+ desired = np.array([-1.39498829447098, -1.23058658835223, 0.17207021065983])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.standard_t(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.standard_t, bad_df * 3)
+
+ def test_vonmises(self):
+ mu = [2]
+ kappa = [1]
+ bad_kappa = [-1]
+ desired = np.array([2.25935584988528, 2.23326261461399, -2.84152146503326])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.vonmises(mu * 3, kappa)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.vonmises, mu * 3, bad_kappa)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.vonmises(mu, kappa * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.vonmises, mu, bad_kappa * 3)
+
+ def test_pareto(self):
+ a = [1]
+ bad_a = [-1]
+ desired = np.array([0.95905052946317, 0.2383810889437 , 1.04988745750013])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.pareto(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.pareto, bad_a * 3)
+
+ def test_weibull(self):
+ a = [1]
+ bad_a = [-1]
+ desired = np.array([0.67245993212806, 0.21380495318094, 0.7177848928629])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.weibull(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.weibull, bad_a * 3)
+
+ def test_power(self):
+ a = [1]
+ bad_a = [-1]
+ desired = np.array([0.48954864361052, 0.19249412888486, 0.51216834058807])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.power(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.power, bad_a * 3)
+
+ def test_laplace(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([-1.09698732625119, -0.93470271947368, 0.71592671378202])
+
+ random = Generator(MT19937(self.seed))
+ laplace = random.laplace
+ actual = laplace(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ laplace = random.laplace
+ actual = laplace(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc, bad_scale * 3)
+
+ def test_gumbel(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([1.70020068231762, 1.52054354273631, -0.34293267607081])
+
+ random = Generator(MT19937(self.seed))
+ gumbel = random.gumbel
+ actual = gumbel(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ gumbel = random.gumbel
+ actual = gumbel(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc, bad_scale * 3)
+
+ def test_logistic(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([-1.607487640433, -1.40925686003678, 1.12887112820397])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.logistic(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.logistic, loc * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.logistic(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.logistic, loc, bad_scale * 3)
+ assert_equal(random.logistic(1.0, 0.0), 1.0)
+
+ def test_lognormal(self):
+ mean = [0]
+ sigma = [1]
+ bad_sigma = [-1]
+ desired = np.array([0.67884390500697, 2.21653186290321, 1.01990310084276])
+
+ random = Generator(MT19937(self.seed))
+ lognormal = random.lognormal
+ actual = lognormal(mean * 3, sigma)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.lognormal(mean, sigma * 3)
+ assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
+
+ def test_rayleigh(self):
+ scale = [1]
+ bad_scale = [-1]
+ desired = np.array([0.60439534475066, 0.66120048396359, 1.67873398389499])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.rayleigh(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.rayleigh, bad_scale * 3)
+
+ def test_wald(self):
+ mean = [0.5]
+ scale = [1]
+ bad_mean = [0]
+ bad_scale = [-2]
+ desired = np.array([0.38052407392905, 0.50701641508592, 0.484935249864])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.wald(mean * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.wald, bad_mean * 3, scale)
+ assert_raises(ValueError, random.wald, mean * 3, bad_scale)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.wald(mean, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, random.wald, bad_mean, scale * 3)
+ assert_raises(ValueError, random.wald, mean, bad_scale * 3)
+
+ def test_triangular(self):
+ left = [1]
+ right = [3]
+ mode = [2]
+ bad_left_one = [3]
+ bad_mode_one = [4]
+ bad_left_two, bad_mode_two = right * 2
+ desired = np.array([1.57781954604754, 1.62665986867957, 2.30090130831326])
+
+ random = Generator(MT19937(self.seed))
+ triangular = random.triangular
+ actual = triangular(left * 3, mode, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
+ assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
+
+ random = Generator(MT19937(self.seed))
+ triangular = random.triangular
+ actual = triangular(left, mode * 3, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
+ assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
+
+ random = Generator(MT19937(self.seed))
+ triangular = random.triangular
+ actual = triangular(left, mode, right * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
+ assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
+
+ assert_raises(ValueError, triangular, 10., 0., 20.)
+ assert_raises(ValueError, triangular, 10., 25., 20.)
+ assert_raises(ValueError, triangular, 10., 10., 10.)
+
+ def test_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ desired = np.array([0, 0, 1])
+
+ random = Generator(MT19937(self.seed))
+ binom = random.binomial
+ actual = binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n * 3, p)
+ assert_raises(ValueError, binom, n * 3, bad_p_one)
+ assert_raises(ValueError, binom, n * 3, bad_p_two)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.binomial(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n, p * 3)
+ assert_raises(ValueError, binom, n, bad_p_one * 3)
+ assert_raises(ValueError, binom, n, bad_p_two * 3)
+
+ def test_negative_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ desired = np.array([0, 2, 1], dtype=np.int64)
+
+ random = Generator(MT19937(self.seed))
+ neg_binom = random.negative_binomial
+ actual = neg_binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n * 3, p)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
+
+ random = Generator(MT19937(self.seed))
+ neg_binom = random.negative_binomial
+ actual = neg_binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n, p * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
+
+ def test_poisson(self):
+
+ lam = [1]
+ bad_lam_one = [-1]
+ desired = np.array([0, 0, 3])
+
+ random = Generator(MT19937(self.seed))
+ max_lam = random._poisson_lam_max
+ bad_lam_two = [max_lam * 2]
+ poisson = random.poisson
+ actual = poisson(lam * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, poisson, bad_lam_one * 3)
+ assert_raises(ValueError, poisson, bad_lam_two * 3)
+
+ def test_zipf(self):
+ a = [2]
+ bad_a = [0]
+ desired = np.array([1, 8, 1])
+
+ random = Generator(MT19937(self.seed))
+ zipf = random.zipf
+ actual = zipf(a * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, zipf, bad_a * 3)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, zipf, np.nan)
+ assert_raises(ValueError, zipf, [0, 0, np.nan])
+
+ def test_geometric(self):
+ p = [0.5]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ desired = np.array([1, 1, 3])
+
+ random = Generator(MT19937(self.seed))
+ geometric = random.geometric
+ actual = geometric(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, geometric, bad_p_one * 3)
+ assert_raises(ValueError, geometric, bad_p_two * 3)
+
+ def test_hypergeometric(self):
+ ngood = [1]
+ nbad = [2]
+ nsample = [2]
+ bad_ngood = [-1]
+ bad_nbad = [-2]
+ bad_nsample_one = [-1]
+ bad_nsample_two = [4]
+ desired = np.array([0, 0, 1])
+
+ random = Generator(MT19937(self.seed))
+ actual = random.hypergeometric(ngood * 3, nbad, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, random.hypergeometric, bad_ngood * 3, nbad, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood * 3, bad_nbad, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_one)
+ assert_raises(ValueError, random.hypergeometric, ngood * 3, nbad, bad_nsample_two)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.hypergeometric(ngood, nbad * 3, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, random.hypergeometric, bad_ngood, nbad * 3, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood, bad_nbad * 3, nsample)
+ assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_one)
+ assert_raises(ValueError, random.hypergeometric, ngood, nbad * 3, bad_nsample_two)
+
+ random = Generator(MT19937(self.seed))
+ hypergeom = random.hypergeometric
+ actual = hypergeom(ngood, nbad, nsample * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+
+ assert_raises(ValueError, hypergeom, -1, 10, 20)
+ assert_raises(ValueError, hypergeom, 10, -1, 20)
+ assert_raises(ValueError, hypergeom, 10, 10, -1)
+ assert_raises(ValueError, hypergeom, 10, 10, 25)
+
+ # ValueError for arguments that are too big.
+ assert_raises(ValueError, hypergeom, 2**30, 10, 20)
+ assert_raises(ValueError, hypergeom, 999, 2**31, 50)
+ assert_raises(ValueError, hypergeom, 999, [2**29, 2**30], 1000)
+
+ def test_logseries(self):
+ p = [0.5]
+ bad_p_one = [2]
+ bad_p_two = [-1]
+ desired = np.array([1, 1, 1])
+
+ random = Generator(MT19937(self.seed))
+ logseries = random.logseries
+ actual = logseries(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, logseries, bad_p_one * 3)
+ assert_raises(ValueError, logseries, bad_p_two * 3)
+
+ def test_multinomial(self):
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial([5, 20], [1 / 6.] * 6, size=(3, 2))
+ desired = np.array([[[0, 0, 2, 1, 2, 0],
+ [2, 3, 6, 4, 2, 3]],
+ [[1, 0, 1, 0, 2, 1],
+ [7, 2, 2, 1, 4, 4]],
+ [[0, 2, 0, 1, 2, 0],
+ [3, 2, 3, 3, 4, 5]]], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+ random = Generator(MT19937(self.seed))
+ actual = random.multinomial([5, 20], [1 / 6.] * 6)
+ desired = np.array([[0, 0, 2, 1, 2, 0],
+ [2, 3, 6, 4, 2, 3]], dtype=np.int64)
+ assert_array_equal(actual, desired)
+
+
+class TestThread(object):
+ # make sure each state produces the same sequence even in threads
+ def setup(self):
+ self.seeds = range(4)
+
+ def check_function(self, function, sz):
+ from threading import Thread
+
+ out1 = np.empty((len(self.seeds),) + sz)
+ out2 = np.empty((len(self.seeds),) + sz)
+
+ # threaded generation
+ t = [Thread(target=function, args=(Generator(MT19937(s)), o))
+ for s, o in zip(self.seeds, out1)]
+ [x.start() for x in t]
+ [x.join() for x in t]
+
+ # the same serial
+ for s, o in zip(self.seeds, out2):
+ function(Generator(MT19937(s)), o)
+
+ # these platforms change x87 fpu precision mode in threads
+ if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
+ assert_array_almost_equal(out1, out2)
+ else:
+ assert_array_equal(out1, out2)
+
+ def test_normal(self):
+ def gen_random(state, out):
+ out[...] = state.normal(size=10000)
+
+ self.check_function(gen_random, sz=(10000,))
+
+ def test_exp(self):
+ def gen_random(state, out):
+ out[...] = state.exponential(scale=np.ones((100, 1000)))
+
+ self.check_function(gen_random, sz=(100, 1000))
+
+ def test_multinomial(self):
+ def gen_random(state, out):
+ out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
+
+ self.check_function(gen_random, sz=(10000, 6))
+
+
+# See Issue #4263
+class TestSingleEltArrayInput(object):
+ def setup(self):
+ self.argOne = np.array([2])
+ self.argTwo = np.array([3])
+ self.argThree = np.array([4])
+ self.tgtShape = (1,)
+
+ def test_one_arg_funcs(self):
+ funcs = (random.exponential, random.standard_gamma,
+ random.chisquare, random.standard_t,
+ random.pareto, random.weibull,
+ random.power, random.rayleigh,
+ random.poisson, random.zipf,
+ random.geometric, random.logseries)
+
+ probfuncs = (random.geometric, random.logseries)
+
+ for func in funcs:
+ if func in probfuncs: # p < 1.0
+ out = func(np.array([0.5]))
+
+ else:
+ out = func(self.argOne)
+
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_two_arg_funcs(self):
+ funcs = (random.uniform, random.normal,
+ random.beta, random.gamma,
+ random.f, random.noncentral_chisquare,
+ random.vonmises, random.laplace,
+ random.gumbel, random.logistic,
+ random.lognormal, random.wald,
+ random.binomial, random.negative_binomial)
+
+ probfuncs = (random.binomial, random.negative_binomial)
+
+ for func in funcs:
+ if func in probfuncs: # p <= 1
+ argTwo = np.array([0.5])
+
+ else:
+ argTwo = self.argTwo
+
+ out = func(self.argOne, argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, argTwo[0])
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_integers(self, endpoint):
+ itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+ func = random.integers
+ high = np.array([1])
+ low = np.array([0])
+
+ for dt in itype:
+ out = func(low, high, endpoint=endpoint, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(low[0], high, endpoint=endpoint, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(low, high[0], endpoint=endpoint, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_three_arg_funcs(self):
+ funcs = [random.noncentral_f, random.triangular,
+ random.hypergeometric]
+
+ for func in funcs:
+ out = func(self.argOne, self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, self.argTwo[0], self.argThree)
+ assert_equal(out.shape, self.tgtShape)
diff --git a/numpy/random/tests/test_generator_mt19937_regressions.py b/numpy/random/tests/test_generator_mt19937_regressions.py
new file mode 100644
index 000000000..3a937f997
--- /dev/null
+++ b/numpy/random/tests/test_generator_mt19937_regressions.py
@@ -0,0 +1,158 @@
+import sys
+from numpy.testing import (assert_, assert_array_equal)
+from numpy.compat import long
+import numpy as np
+import pytest
+from numpy.random import Generator, MT19937
+
+mt19937 = Generator(MT19937())
+
+
+class TestRegression(object):
+
+ def test_VonMises_range(self):
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
+ for mu in np.linspace(-7., 7., 5):
+ r = mt19937.vonmises(mu, 1, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
+ assert_(np.all(mt19937.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(mt19937.hypergeometric(18, 3, 11, size=10) > 0))
+
+ # Test for ticket #5623
+ args = (2**20 - 2, 2**20 - 2, 2**20 - 2) # Check for 32-bit systems
+ assert_(mt19937.hypergeometric(*args) > 0)
+
+ def test_logseries_convergence(self):
+ # Test for ticket #923
+ N = 1000
+ mt19937 = Generator(MT19937(0))
+ rvsn = mt19937.logseries(0.8, size=N)
+ # these two frequency counts should be close to theoretical
+ # numbers with this large sample
+ # theoretical large N result is 0.49706795
+ freq = np.sum(rvsn == 1) / float(N)
+ msg = "Frequency was %f, should be > 0.45" % freq
+ assert_(freq > 0.45, msg)
+ # theoretical large N result is 0.19882718
+ freq = np.sum(rvsn == 2) / float(N)
+ msg = "Frequency was %f, should be < 0.23" % freq
+ assert_(freq < 0.23, msg)
+
+ def test_permutation_longs(self):
+ mt19937 = Generator(MT19937(1234))
+ a = mt19937.permutation(12)
+ mt19937 = Generator(MT19937(1234))
+ b = mt19937.permutation(long(12))
+ assert_array_equal(a, b)
+
+ def test_shuffle_mixed_dimension(self):
+ # Test for trac ticket #2074
+ for t in [[1, 2, 3, None],
+ [(1, 1), (2, 2), (3, 3), None],
+ [1, (2, 2), (3, 3), None],
+ [(1, 1), 2, 3, None]]:
+ mt19937 = Generator(MT19937(12345))
+ shuffled = list(t)
+ mt19937.shuffle(shuffled)
+ assert_array_equal(shuffled, [t[2], t[0], t[3], t[1]])
+
+ def test_call_within_randomstate(self):
+ # Check that custom BitGenerator does not call into global state
+ res = np.array([1, 8, 0, 1, 5, 3, 3, 8, 1, 4])
+ for i in range(3):
+ mt19937 = Generator(MT19937(i))
+ m = Generator(MT19937(4321))
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ mt19937.multivariate_normal([0], [[0]], size=1)
+ mt19937.multivariate_normal([0], [[0]], size=np.int_(1))
+ mt19937.multivariate_normal([0], [[0]], size=np.int64(1))
+
+ def test_beta_small_parameters(self):
+ # Test that beta with small a and b parameters does not produce
+ # NaNs due to roundoff errors causing 0 / 0, gh-5851
+ mt19937 = Generator(MT19937(1234567890))
+ x = mt19937.beta(0.0001, 0.0001, size=100)
+ assert_(not np.any(np.isnan(x)), 'Nans in mt19937.beta')
+
+ def test_choice_sum_of_probs_tolerance(self):
+ # The sum of probs should be 1.0 with some tolerance.
+ # For low precision dtypes the tolerance was too tight.
+ # See numpy github issue 6123.
+ mt19937 = Generator(MT19937(1234))
+ a = [1, 2, 3]
+ counts = [4, 4, 2]
+ for dt in np.float16, np.float32, np.float64:
+ probs = np.array(counts, dtype=dt) / sum(counts)
+ c = mt19937.choice(a, p=probs)
+ assert_(c in a)
+ with pytest.raises(ValueError):
+ mt19937.choice(a, p=probs*0.9)
+
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ mt19937 = Generator(MT19937(1234))
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ mt19937.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ mt19937 = Generator(MT19937(1234))
+ a = np.array([np.arange(1), np.arange(4)])
+
+ for _ in range(1000):
+ mt19937.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_permutation_subclass(self):
+ class N(np.ndarray):
+ pass
+
+ mt19937 = Generator(MT19937(1))
+ orig = np.arange(3).view(N)
+ perm = mt19937.permutation(orig)
+ assert_array_equal(perm, np.array([2, 0, 1]))
+ assert_array_equal(orig, np.arange(3).view(N))
+
+ class M(object):
+ a = np.arange(5)
+
+ def __array__(self):
+ return self.a
+
+ mt19937 = Generator(MT19937(1))
+ m = M()
+ perm = mt19937.permutation(m)
+ assert_array_equal(perm, np.array([4, 1, 3, 0, 2]))
+ assert_array_equal(m.__array__(), np.arange(5))
+
+ def test_gamma_0(self):
+ assert mt19937.standard_gamma(0.0) == 0.0
+ assert_array_equal(mt19937.standard_gamma([0.0]), 0.0)
+
+ actual = mt19937.standard_gamma([0.0], dtype='float')
+ expected = np.array([0.], dtype=np.float32)
+ assert_array_equal(actual, expected)
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 42816a943..37bd121f3 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -43,7 +43,8 @@ class TestSeed(object):
def test_invalid_array_shape(self):
# gh-9832
- assert_raises(ValueError, np.random.RandomState, np.array([], dtype=np.int64))
+ assert_raises(ValueError, np.random.RandomState,
+ np.array([], dtype=np.int64))
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3]])
assert_raises(ValueError, np.random.RandomState, [[1, 2, 3],
[4, 5, 6]])
@@ -349,9 +350,9 @@ class TestRandomDist(object):
np.random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
- def test_random_sample(self):
+ def test_random(self):
np.random.seed(self.seed)
- actual = np.random.random_sample((3, 2))
+ actual = np.random.random((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
@@ -400,6 +401,10 @@ class TestRandomDist(object):
assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
assert_raises(ValueError, sample, [1, 2, 3], 2,
replace=False, p=[1, 0, 0])
@@ -445,9 +450,15 @@ class TestRandomDist(object):
assert_equal(np.random.randint(10, 10, size=0).shape, (0,))
assert_equal(np.random.choice(0, size=0).shape, (0,))
assert_equal(np.random.choice([], size=(0,)).shape, (0,))
- assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(np.random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
assert_raises(ValueError, np.random.choice, [], 10)
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, np.random.choice, a, p=p)
+
def test_bytes(self):
np.random.seed(self.seed)
actual = np.random.bytes(10)
@@ -466,10 +477,13 @@ class TestRandomDist(object):
lambda x: [(i, i) for i in x],
lambda x: np.asarray([[i, i] for i in x]),
lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
# gh-4270
lambda x: np.asarray([(i, i) for i in x],
- [("a", object, 1),
- ("b", np.int32, 1)])]:
+ [("a", object), ("b", np.int32)])]:
np.random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
np.random.shuffle(alist)
@@ -502,7 +516,7 @@ class TestRandomDist(object):
def test_binomial(self):
np.random.seed(self.seed)
- actual = np.random.binomial(100.123, .456, size=(3, 2))
+ actual = np.random.binomial(100, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
@@ -599,7 +613,7 @@ class TestRandomDist(object):
def test_hypergeometric(self):
np.random.seed(self.seed)
- actual = np.random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ actual = np.random.hypergeometric(10, 5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
@@ -680,7 +694,7 @@ class TestRandomDist(object):
cov = [[1, 0], [0, 1]]
size = (3, 2)
actual = np.random.multivariate_normal(mean, cov, size)
- desired = np.array([[[1.463620246718631, 11.73759122771936 ],
+ desired = np.array([[[1.463620246718631, 11.73759122771936],
[1.622445133300628, 9.771356667546383]],
[[2.154490787682787, 12.170324946056553],
[1.719909438201865, 9.230548443648306]],
@@ -708,6 +722,12 @@ class TestRandomDist(object):
assert_raises(ValueError, np.random.multivariate_normal, mean, cov,
check_valid='raise')
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ np.random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
def test_negative_binomial(self):
np.random.seed(self.seed)
actual = np.random.negative_binomial(n=100, p=.12345, size=(3, 2))
@@ -898,12 +918,15 @@ class TestRandomDist(object):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
- assert_raises(TypeError, np.random.uniform, throwing_float, throwing_float)
+ assert_raises(TypeError, np.random.uniform, throwing_float,
+ throwing_float)
class ThrowingInteger(np.ndarray):
def __int__(self):
raise TypeError
+ __index__ = __int__
+
throwing_int = np.array(1).view(ThrowingInteger)
assert_raises(TypeError, np.random.hypergeometric, throwing_int, 1, 1)
@@ -938,7 +961,8 @@ class TestRandomDist(object):
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
- assert_equal(np.random.weibull(a=0), 0)
+ np.random.seed(self.seed)
+ assert_equal(np.random.weibull(a=0, size=12), np.zeros(12))
assert_raises(ValueError, np.random.weibull, a=-0.)
def test_zipf(self):
@@ -1344,6 +1368,8 @@ class TestBroadcast(object):
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
+ assert_raises(ValueError, wald, 0.0, 1)
+ assert_raises(ValueError, wald, 0.5, 0.0)
def test_triangular(self):
left = [1]
@@ -1362,21 +1388,24 @@ class TestBroadcast(object):
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
- assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
self.setSeed()
actual = triangular(left, mode * 3, right)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
- assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
self.setSeed()
actual = triangular(left, mode, right * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
- assert_raises(ValueError, triangular, bad_left_two, bad_mode_two, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
def test_binomial(self):
n = [1]
@@ -1425,7 +1454,7 @@ class TestBroadcast(object):
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
def test_poisson(self):
- max_lam = np.random.RandomState().poisson_lam_max
+ max_lam = np.random.RandomState()._poisson_lam_max
lam = [1]
bad_lam_one = [-1]
@@ -1514,6 +1543,7 @@ class TestBroadcast(object):
assert_raises(ValueError, logseries, bad_p_one * 3)
assert_raises(ValueError, logseries, bad_p_two * 3)
+
class TestThread(object):
# make sure each state produces the same sequence even in threads
def setup(self):
@@ -1556,6 +1586,7 @@ class TestThread(object):
out[...] = state.multinomial(10, [1/6.]*6, size=10000)
self.check_function(gen_random, sz=(10000, 6))
+
# See Issue #4263
class TestSingleEltArrayInput(object):
def setup(self):
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
new file mode 100644
index 000000000..a0edc5c23
--- /dev/null
+++ b/numpy/random/tests/test_randomstate.py
@@ -0,0 +1,1966 @@
+import hashlib
+import pickle
+import sys
+import warnings
+
+import numpy as np
+import pytest
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_warns,
+ assert_no_warnings, assert_array_equal, assert_array_almost_equal,
+ suppress_warnings
+ )
+
+from numpy.random import MT19937, PCG64, mtrand as random
+
+INT_FUNCS = {'binomial': (100.0, 0.6),
+ 'geometric': (.5,),
+ 'hypergeometric': (20, 20, 10),
+ 'logseries': (.5,),
+ 'multinomial': (20, np.ones(6) / 6.0),
+ 'negative_binomial': (100, .5),
+ 'poisson': (10.0,),
+ 'zipf': (2,),
+ }
+
+if np.iinfo(int).max < 2**32:
+ # Windows and some 32-bit platforms, e.g., ARM
+ INT_FUNC_HASHES = {'binomial': '670e1c04223ffdbab27e08fbbad7bdba',
+ 'logseries': '6bd0183d2f8030c61b0d6e11aaa60caf',
+ 'geometric': '6e9df886f3e1e15a643168568d5280c0',
+ 'hypergeometric': '7964aa611b046aecd33063b90f4dec06',
+ 'multinomial': '68a0b049c16411ed0aa4aff3572431e4',
+ 'negative_binomial': 'dc265219eec62b4338d39f849cd36d09',
+ 'poisson': '7b4dce8e43552fc82701c2fa8e94dc6e',
+ 'zipf': 'fcd2a2095f34578723ac45e43aca48c5',
+ }
+else:
+ INT_FUNC_HASHES = {'binomial': 'b5f8dcd74f172836536deb3547257b14',
+ 'geometric': '8814571f45c87c59699d62ccd3d6c350',
+ 'hypergeometric': 'bc64ae5976eac452115a16dad2dcf642',
+ 'logseries': '84be924b37485a27c4a98797bc88a7a4',
+ 'multinomial': 'ec3c7f9cf9664044bb0c6fb106934200',
+ 'negative_binomial': '210533b2234943591364d0117a552969',
+ 'poisson': '0536a8850c79da0c78defd742dccc3e0',
+ 'zipf': 'f2841f504dd2525cd67cdcad7561e532',
+ }
+
+
+@pytest.fixture(scope='module', params=INT_FUNCS)
+def int_func(request):
+ return (request.param, INT_FUNCS[request.param],
+ INT_FUNC_HASHES[request.param])
+
+
+def assert_mt19937_state_equal(a, b):
+ assert_equal(a['bit_generator'], b['bit_generator'])
+ assert_array_equal(a['state']['key'], b['state']['key'])
+ assert_array_equal(a['state']['pos'], b['state']['pos'])
+ assert_equal(a['has_gauss'], b['has_gauss'])
+ assert_equal(a['gauss'], b['gauss'])
+
+
+class TestSeed(object):
+ def test_scalar(self):
+ s = random.RandomState(0)
+ assert_equal(s.randint(1000), 684)
+ s = random.RandomState(4294967295)
+ assert_equal(s.randint(1000), 419)
+
+ def test_array(self):
+ s = random.RandomState(range(10))
+ assert_equal(s.randint(1000), 468)
+ s = random.RandomState(np.arange(10))
+ assert_equal(s.randint(1000), 468)
+ s = random.RandomState([0])
+ assert_equal(s.randint(1000), 973)
+ s = random.RandomState([4294967295])
+ assert_equal(s.randint(1000), 265)
+
+ def test_invalid_scalar(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, random.RandomState, -0.5)
+ assert_raises(ValueError, random.RandomState, -1)
+
+ def test_invalid_array(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, random.RandomState, [-0.5])
+ assert_raises(ValueError, random.RandomState, [-1])
+ assert_raises(ValueError, random.RandomState, [4294967296])
+ assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
+ assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
+
+ def test_invalid_array_shape(self):
+ # gh-9832
+ assert_raises(ValueError, random.RandomState, np.array([],
+ dtype=np.int64))
+ assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
+ assert_raises(ValueError, random.RandomState, [[1, 2, 3],
+ [4, 5, 6]])
+
+ def test_cannot_seed(self):
+ rs = random.RandomState(PCG64(0))
+ with assert_raises(TypeError):
+ rs.seed(1234)
+
+ def test_invalid_initialization(self):
+ assert_raises(ValueError, random.RandomState, MT19937)
+
+
+class TestBinomial(object):
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ assert_array_equal(random.binomial(zeros, p), zeros)
+
+ def test_p_is_nan(self):
+ # Issue #4571.
+ assert_raises(ValueError, random.binomial, 1, np.nan)
+
+
+class TestMultinomial(object):
+ def test_basic(self):
+ random.multinomial(100, [0.2, 0.8])
+
+ def test_zero_probability(self):
+ random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
+
+ def test_int_negative_interval(self):
+ assert_(-5 <= random.randint(-5, -1) < -1)
+ x = random.randint(-5, -1, 5)
+ assert_(np.all(-5 <= x))
+ assert_(np.all(x < -1))
+
+ def test_size(self):
+ # gh-3173
+ p = [0.5, 0.5]
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
+ (2, 2, 2))
+
+ assert_raises(TypeError, random.multinomial, 1, p,
+ float(1))
+
+ def test_invalid_prob(self):
+ assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
+ assert_raises(ValueError, random.multinomial, 100, [-.1, 0.9])
+
+ def test_invalid_n(self):
+ assert_raises(ValueError, random.multinomial, -1, [0.8, 0.2])
+
+ def test_p_non_contiguous(self):
+ p = np.arange(15.)
+ p /= np.sum(p[1::3])
+ pvals = p[1::3]
+ random.seed(1432985819)
+ non_contig = random.multinomial(100, pvals=pvals)
+ random.seed(1432985819)
+ contig = random.multinomial(100, pvals=np.ascontiguousarray(pvals))
+ assert_array_equal(non_contig, contig)
+
+
+class TestSetState(object):
+ def setup(self):
+ self.seed = 1234567890
+ self.random_state = random.RandomState(self.seed)
+ self.state = self.random_state.get_state()
+
+ def test_basic(self):
+ old = self.random_state.tomaxint(16)
+ self.random_state.set_state(self.state)
+ new = self.random_state.tomaxint(16)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset(self):
+ # Make sure the cached every-other-Gaussian is reset.
+ old = self.random_state.standard_normal(size=3)
+ self.random_state.set_state(self.state)
+ new = self.random_state.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset_in_media_res(self):
+ # When the state is saved with a cached Gaussian, make sure the
+ # cached Gaussian is restored.
+
+ self.random_state.standard_normal()
+ state = self.random_state.get_state()
+ old = self.random_state.standard_normal(size=3)
+ self.random_state.set_state(state)
+ new = self.random_state.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_backwards_compatibility(self):
+ # Make sure we can accept old state tuples that do not have the
+ # cached Gaussian value.
+ old_state = self.state[:-2]
+ x1 = self.random_state.standard_normal(size=16)
+ self.random_state.set_state(old_state)
+ x2 = self.random_state.standard_normal(size=16)
+ self.random_state.set_state(self.state)
+ x3 = self.random_state.standard_normal(size=16)
+ assert_(np.all(x1 == x2))
+ assert_(np.all(x1 == x3))
+
+ def test_negative_binomial(self):
+ # Ensure that the negative binomial results take floating point
+ # arguments without truncation.
+ self.random_state.negative_binomial(0.5, 0.5)
+
+ def test_get_state_warning(self):
+ rs = random.RandomState(PCG64())
+ with suppress_warnings() as sup:
+ w = sup.record(RuntimeWarning)
+ state = rs.get_state()
+ assert_(len(w) == 1)
+ assert isinstance(state, dict)
+ assert state['bit_generator'] == 'PCG64'
+
+ def test_invalid_legacy_state_setting(self):
+ state = self.random_state.get_state()
+ new_state = ('Unknown', ) + state[1:]
+ assert_raises(ValueError, self.random_state.set_state, new_state)
+ assert_raises(TypeError, self.random_state.set_state,
+ np.array(new_state, dtype=np.object))
+ state = self.random_state.get_state(legacy=False)
+ del state['bit_generator']
+ assert_raises(ValueError, self.random_state.set_state, state)
+
+ def test_pickle(self):
+ self.random_state.seed(0)
+ self.random_state.random_sample(100)
+ self.random_state.standard_normal()
+ pickled = self.random_state.get_state(legacy=False)
+ assert_equal(pickled['has_gauss'], 1)
+ rs_unpick = pickle.loads(pickle.dumps(self.random_state))
+ unpickled = rs_unpick.get_state(legacy=False)
+ assert_mt19937_state_equal(pickled, unpickled)
+
+ def test_state_setting(self):
+ attr_state = self.random_state.__getstate__()
+ self.random_state.standard_normal()
+ self.random_state.__setstate__(attr_state)
+ state = self.random_state.get_state(legacy=False)
+ assert_mt19937_state_equal(attr_state, state)
+
+ def test_repr(self):
+ assert repr(self.random_state).startswith('RandomState(MT19937)')
+
+
+class TestRandint(object):
+
+ rfunc = random.randint
+
+ # valid integer/boolean types
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self):
+ assert_raises(TypeError, self.rfunc, 1, dtype=float)
+
+ def test_bounds_checking(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+ assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
+
+ def test_rng_zero_and_extremes(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd)//2
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ def test_full_range(self):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ try:
+ self.rfunc(lbnd, ubnd, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_in_bounds_fuzz(self):
+ # Don't use fixed seed
+ random.seed()
+
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+
+ vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
+
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_repeatability(self):
+ # We use a md5 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
+ 'int16': '1b7741b80964bb190c50d541dca1cac1',
+ 'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
+ 'int64': '17db902806f448331b5a758d7d2ee672',
+ 'int8': '27dd30c4e08a797063dffac2490b0be6',
+ 'uint16': '1b7741b80964bb190c50d541dca1cac1',
+ 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
+ 'uint64': '17db902806f448331b5a758d7d2ee672',
+ 'uint8': '27dd30c4e08a797063dffac2490b0be6'}
+
+ for dt in self.itype[1:]:
+ random.seed(1234)
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = self.rfunc(0, 6, size=1000, dtype=dt)
+ else:
+ val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
+
+ res = hashlib.md5(val.view(np.int8)).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianness
+ random.seed(1234)
+ val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
+ res = hashlib.md5(val).hexdigest()
+ assert_(tgt[np.dtype(bool).name] == res)
+
+ def test_int64_uint64_corner_case(self):
+ # When stored in Numpy arrays, `lbnd` is casted
+ # as np.int64, and `ubnd` is casted as np.uint64.
+ # Checking whether `lbnd` >= `ubnd` used to be
+ # done solely via direct comparison, which is incorrect
+ # because when Numpy tries to compare both numbers,
+ # it casts both to np.float64 because there is
+ # no integer superset of np.int64 and np.uint64. However,
+ # `ubnd` is too large to be represented in np.float64,
+ # causing it be round down to np.iinfo(np.int64).max,
+ # leading to a ValueError because `lbnd` now equals
+ # the new `ubnd`.
+
+ dt = np.int64
+ tgt = np.iinfo(np.int64).max
+ lbnd = np.int64(np.iinfo(np.int64).max)
+ ubnd = np.uint64(np.iinfo(np.int64).max + 1)
+
+ # None of these function calls should
+ # generate a ValueError now.
+ actual = random.randint(lbnd, ubnd, dtype=dt)
+ assert_equal(actual, tgt)
+
+ def test_respect_dtype_singleton(self):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_equal(sample.dtype, np.dtype(dt))
+
+ for dt in (bool, int, np.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+
+ # gh-7284: Ensure that we get Python data types
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_(not hasattr(sample, 'dtype'))
+ assert_equal(type(sample), dt)
+
+
+class TestRandomDist(object):
+ # Make sure the random distribution returns the correct value for a
+ # given seed
+
+ def setup(self):
+ self.seed = 1234567890
+
+ def test_rand(self):
+ random.seed(self.seed)
+ actual = random.rand(3, 2)
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rand_singleton(self):
+ random.seed(self.seed)
+ actual = random.rand()
+ desired = 0.61879477158567997
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn(self):
+ random.seed(self.seed)
+ actual = random.randn(3, 2)
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random.seed(self.seed)
+ actual = random.randn()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_randint(self):
+ random.seed(self.seed)
+ actual = random.randint(-99, 99, size=(3, 2))
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ def test_random_integers(self):
+ random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(-99, 99, size=(3, 2))
+ assert_(len(w) == 1)
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(198, size=(3, 2))
+ assert_(len(w) == 1)
+ assert_array_equal(actual, desired + 100)
+
+ def test_tomaxint(self):
+ random.seed(self.seed)
+ rs = random.RandomState(self.seed)
+ actual = rs.tomaxint(size=(3, 2))
+ if np.iinfo(np.int).max == 2147483647:
+ desired = np.array([[1328851649, 731237375],
+ [1270502067, 320041495],
+ [1908433478, 499156889]], dtype=np.int64)
+ else:
+ desired = np.array([[5707374374421908479, 5456764827585442327],
+ [8196659375100692377, 8224063923314595285],
+ [4220315081820346526, 7177518203184491332]],
+ dtype=np.int64)
+
+ assert_equal(actual, desired)
+
+ rs.seed(self.seed)
+ actual = rs.tomaxint()
+ assert_equal(actual, desired[0, 0])
+
+ def test_random_integers_max_int(self):
+ # Tests whether random_integers can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
+ assert_(len(w) == 1)
+
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ typer = np.dtype('l').type
+ actual = random.random_integers(typer(np.iinfo('l').max),
+ typer(np.iinfo('l').max))
+ assert_(len(w) == 1)
+ assert_equal(actual, desired)
+
+ def test_random_integers_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # DeprecationWarning raised with high == None
+ assert_raises(DeprecationWarning,
+ random.random_integers,
+ np.iinfo('l').max)
+
+ # DeprecationWarning raised with high != None
+ assert_raises(DeprecationWarning,
+ random.random_integers,
+ np.iinfo('l').max, np.iinfo('l').max)
+
+ def test_random_sample(self):
+ random.seed(self.seed)
+ actual = random.random_sample((3, 2))
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random.seed(self.seed)
+ actual = random.random_sample()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_choice_uniform_replace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 4)
+ desired = np.array([2, 3, 2, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_replace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ desired = np.array([1, 1, 2, 2])
+ assert_array_equal(actual, desired)
+
+ def test_choice_uniform_noreplace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False)
+ desired = np.array([0, 1, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_noreplace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
+ desired = np.array([2, 3, 1])
+ assert_array_equal(actual, desired)
+
+ def test_choice_noninteger(self):
+ random.seed(self.seed)
+ actual = random.choice(['a', 'b', 'c', 'd'], 4)
+ desired = np.array(['c', 'd', 'c', 'd'])
+ assert_array_equal(actual, desired)
+
+ def test_choice_exceptions(self):
+ sample = random.choice
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
+ assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
+ assert_raises(ValueError, sample, [], 3)
+ assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
+ p=[[0.25, 0.25], [0.25, 0.25]])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
+ assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], 2,
+ replace=False, p=[1, 0, 0])
+
+ def test_choice_return_shape(self):
+ p = [0.1, 0.9]
+ # Check scalar
+ assert_(np.isscalar(random.choice(2, replace=True)))
+ assert_(np.isscalar(random.choice(2, replace=False)))
+ assert_(np.isscalar(random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(random.choice([1, 2], replace=True)))
+ assert_(random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
+ assert_(random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2, 3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_equal(random.choice(6, s, replace=True).shape, s)
+ assert_equal(random.choice(6, s, replace=False).shape, s)
+ assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
+ assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
+ assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
+
+ # Check zero-size
+ assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(random.randint(0, -10, size=0).shape, (0,))
+ assert_equal(random.randint(10, 10, size=0).shape, (0,))
+ assert_equal(random.choice(0, size=0).shape, (0,))
+ assert_equal(random.choice([], size=(0,)).shape, (0,))
+ assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_raises(ValueError, random.choice, [], 10)
+
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, random.choice, a, p=p)
+
+ def test_choice_p_non_contiguous(self):
+ p = np.ones(10) / 5
+ p[1::2] = 3.0
+ random.seed(self.seed)
+ non_contig = random.choice(5, 3, p=p[::2])
+ random.seed(self.seed)
+ contig = random.choice(5, 3, p=np.ascontiguousarray(p[::2]))
+ assert_array_equal(non_contig, contig)
+
+ def test_bytes(self):
+ random.seed(self.seed)
+ actual = random.bytes(10)
+ desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
+ assert_equal(actual, desired)
+
+ def test_shuffle(self):
+ # Test lists, arrays (of various dtypes), and multidimensional versions
+ # of both, c-contiguous or not:
+ for conv in [lambda x: np.array([]),
+ lambda x: x,
+ lambda x: np.asarray(x).astype(np.int8),
+ lambda x: np.asarray(x).astype(np.float32),
+ lambda x: np.asarray(x).astype(np.complex64),
+ lambda x: np.asarray(x).astype(object),
+ lambda x: [(i, i) for i in x],
+ lambda x: np.asarray([[i, i] for i in x]),
+ lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
+ # gh-4270
+ lambda x: np.asarray([(i, i) for i in x],
+ [("a", object, (1,)),
+ ("b", np.int32, (1,))])]:
+ random.seed(self.seed)
+ alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
+ random.shuffle(alist)
+ actual = alist
+ desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
+ assert_array_equal(actual, desired)
+
+ def test_shuffle_masked(self):
+ # gh-3263
+ a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
+ b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
+ a_orig = a.copy()
+ b_orig = b.copy()
+ for i in range(50):
+ random.shuffle(a)
+ assert_equal(
+ sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
+ random.shuffle(b)
+ assert_equal(
+ sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+
+ def test_permutation(self):
+ random.seed(self.seed)
+ alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
+ actual = random.permutation(alist)
+ desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
+ actual = random.permutation(arr_2d)
+ assert_array_equal(actual, np.atleast_2d(desired).T)
+
+ random.seed(self.seed)
+ bad_x_str = "abcd"
+ assert_raises(IndexError, random.permutation, bad_x_str)
+
+ random.seed(self.seed)
+ bad_x_float = 1.2
+ assert_raises(IndexError, random.permutation, bad_x_float)
+
+ integer_val = 10
+ desired = [9, 0, 8, 5, 1, 3, 4, 7, 6, 2]
+
+ random.seed(self.seed)
+ actual = random.permutation(integer_val)
+ assert_array_equal(actual, desired)
+
+ def test_beta(self):
+ random.seed(self.seed)
+ actual = random.beta(.1, .9, size=(3, 2))
+ desired = np.array(
+ [[1.45341850513746058e-02, 5.31297615662868145e-04],
+ [1.85366619058432324e-06, 4.19214516800110563e-03],
+ [1.58405155108498093e-04, 1.26252891949397652e-04]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_binomial(self):
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456, size=(3, 2))
+ desired = np.array([[37, 43],
+ [42, 48],
+ [46, 45]])
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456)
+ desired = 37
+ assert_array_equal(actual, desired)
+
+ def test_chisquare(self):
+ random.seed(self.seed)
+ actual = random.chisquare(50, size=(3, 2))
+ desired = np.array([[63.87858175501090585, 68.68407748911370447],
+ [65.77116116901505904, 47.09686762438974483],
+ [72.3828403199695174, 74.18408615260374006]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_dirichlet(self):
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.54539444573611562, 0.45460555426388438],
+ [0.62345816822039413, 0.37654183177960598]],
+ [[0.55206000085785778, 0.44793999914214233],
+ [0.58964023305154301, 0.41035976694845688]],
+ [[0.59266909280647828, 0.40733090719352177],
+ [0.56974431743975207, 0.43025568256024799]]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+ bad_alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, bad_alpha)
+
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha)
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_dirichlet_size(self):
+ # gh-3173
+ p = np.array([51.72840233779265162, 39.74494232180943953])
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+
+ assert_raises(TypeError, random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, alpha)
+
+ def test_dirichlet_alpha_non_contiguous(self):
+ a = np.array([51.72840233779265162, -1.0, 39.74494232180943953])
+ alpha = a[::2]
+ random.seed(self.seed)
+ non_contig = random.dirichlet(alpha, size=(3, 2))
+ random.seed(self.seed)
+ contig = random.dirichlet(np.ascontiguousarray(alpha),
+ size=(3, 2))
+ assert_array_almost_equal(non_contig, contig)
+
+ def test_exponential(self):
+ random.seed(self.seed)
+ actual = random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[1.08342649775011624, 1.00607889924557314],
+ [2.46628830085216721, 2.49668106809923884],
+ [0.68717433461363442, 1.69175666993575979]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_exponential_0(self):
+ assert_equal(random.exponential(scale=0), 0)
+ assert_raises(ValueError, random.exponential, scale=-0.)
+
+ def test_f(self):
+ random.seed(self.seed)
+ actual = random.f(12, 77, size=(3, 2))
+ desired = np.array([[1.21975394418575878, 1.75135759791559775],
+ [1.44803115017146489, 1.22108959480396262],
+ [1.02176975757740629, 1.34431827623300415]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gamma(self):
+ random.seed(self.seed)
+ actual = random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[24.60509188649287182, 28.54993563207210627],
+ [26.13476110204064184, 12.56988482927716078],
+ [31.71863275789960568, 33.30143302795922011]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_gamma_0(self):
+ assert_equal(random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
+
+ def test_geometric(self):
+ random.seed(self.seed)
+ actual = random.geometric(.123456789, size=(3, 2))
+ desired = np.array([[8, 7],
+ [17, 17],
+ [5, 12]])
+ assert_array_equal(actual, desired)
+
+ def test_geometric_exceptions(self):
+ assert_raises(ValueError, random.geometric, 1.1)
+ assert_raises(ValueError, random.geometric, [1.1] * 10)
+ assert_raises(ValueError, random.geometric, -0.1)
+ assert_raises(ValueError, random.geometric, [-0.1] * 10)
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.geometric, np.nan)
+ assert_raises(ValueError, random.geometric, [np.nan] * 10)
+
+ def test_gumbel(self):
+ random.seed(self.seed)
+ actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.19591898743416816, 0.34405539668096674],
+ [-1.4492522252274278, -1.47374816298446865],
+ [1.10651090478803416, -0.69535848626236174]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gumbel_0(self):
+ assert_equal(random.gumbel(scale=0), 0)
+ assert_raises(ValueError, random.gumbel, scale=-0.)
+
+ def test_hypergeometric(self):
+ random.seed(self.seed)
+ actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ desired = np.array([[10, 10],
+ [10, 10],
+ [9, 9]])
+ assert_array_equal(actual, desired)
+
+ # Test nbad = 0
+ actual = random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ def test_laplace(self):
+ random.seed(self.seed)
+ actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.66599721112760157, 0.52829452552221945],
+ [3.12791959514407125, 3.18202813572992005],
+ [-0.05391065675859356, 1.74901336242837324]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_laplace_0(self):
+ assert_equal(random.laplace(scale=0), 0)
+ assert_raises(ValueError, random.laplace, scale=-0.)
+
+ def test_logistic(self):
+ random.seed(self.seed)
+ actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[1.09232835305011444, 0.8648196662399954],
+ [4.27818590694950185, 4.33897006346929714],
+ [-0.21682183359214885, 2.63373365386060332]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_lognormal(self):
+ random.seed(self.seed)
+ actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[16.50698631688883822, 36.54846706092654784],
+ [22.67886599981281748, 0.71617561058995771],
+ [65.72798501792723869, 86.84341601437161273]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_lognormal_0(self):
+ assert_equal(random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, random.lognormal, sigma=-0.)
+
+ def test_logseries(self):
+ random.seed(self.seed)
+ actual = random.logseries(p=.923456789, size=(3, 2))
+ desired = np.array([[2, 2],
+ [6, 17],
+ [3, 6]])
+ assert_array_equal(actual, desired)
+
+ def test_logseries_exceptions(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.logseries, np.nan)
+ assert_raises(ValueError, random.logseries, [np.nan] * 10)
+
+ def test_multinomial(self):
+ random.seed(self.seed)
+ actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
+ desired = np.array([[[4, 3, 5, 4, 2, 2],
+ [5, 2, 8, 2, 2, 1]],
+ [[3, 4, 3, 6, 0, 4],
+ [2, 1, 4, 3, 6, 4]],
+ [[4, 4, 2, 5, 2, 3],
+ [4, 3, 4, 2, 3, 4]]])
+ assert_array_equal(actual, desired)
+
+ def test_multivariate_normal(self):
+ random.seed(self.seed)
+ mean = (.123456789, 10)
+ cov = [[1, 0], [0, 1]]
+ size = (3, 2)
+ actual = random.multivariate_normal(mean, cov, size)
+ desired = np.array([[[1.463620246718631, 11.73759122771936],
+ [1.622445133300628, 9.771356667546383]],
+ [[2.154490787682787, 12.170324946056553],
+ [1.719909438201865, 9.230548443648306]],
+ [[0.689515026297799, 9.880729819607714],
+ [-0.023054015651998, 9.201096623542879]]])
+
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check for default size, was raising deprecation warning
+ actual = random.multivariate_normal(mean, cov)
+ desired = np.array([0.895289569463708, 9.17180864067987])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check that non positive-semidefinite covariance warns with
+ # RuntimeWarning
+ mean = [0, 0]
+ cov = [[1, 2], [2, 1]]
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+
+ # and that it doesn't warn with RuntimeWarning check_valid='ignore'
+ assert_no_warnings(random.multivariate_normal, mean, cov,
+ check_valid='ignore')
+
+ # and that it raises with RuntimeWarning check_valid='raises'
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
+
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ mu = np.zeros(2)
+ cov = np.eye(2)
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='other')
+ assert_raises(ValueError, random.multivariate_normal,
+ np.zeros((2, 1, 1)), cov)
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.empty((3, 2)))
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.eye(3))
+
+ def test_negative_binomial(self):
+ random.seed(self.seed)
+ actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[848, 841],
+ [892, 611],
+ [779, 647]])
+ assert_array_equal(actual, desired)
+
+ def test_negative_binomial_exceptions(self):
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.negative_binomial, 100, np.nan)
+ assert_raises(ValueError, random.negative_binomial, 100,
+ [np.nan] * 10)
+
+ def test_noncentral_chisquare(self):
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[23.91905354498517511, 13.35324692733826346],
+ [31.22452661329736401, 16.60047399466177254],
+ [5.03461598262724586, 17.94973089023519464]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[1.47145377828516666, 0.15052899268012659],
+ [0.00943803056963588, 1.02647251615666169],
+ [0.332334982684171, 0.15451287602753125]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[9.597154162763948, 11.725484450296079],
+ [10.413711048138335, 3.694475922923986],
+ [13.484222138963087, 14.377255424602957]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f(self):
+ random.seed(self.seed)
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ size=(3, 2))
+ desired = np.array([[1.40598099674926669, 0.34207973179285761],
+ [3.57715069265772545, 7.92632662577829805],
+ [0.43741599463544162, 1.1774208752428319]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f_nan(self):
+ random.seed(self.seed)
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=np.nan)
+ assert np.isnan(actual)
+
+ def test_normal(self):
+ random.seed(self.seed)
+ actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[2.80378370443726244, 3.59863924443872163],
+ [3.121433477601256, -0.33382987590723379],
+ [4.18552478636557357, 4.46410668111310471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_normal_0(self):
+ assert_equal(random.normal(scale=0), 0)
+ assert_raises(ValueError, random.normal, scale=-0.)
+
+ def test_pareto(self):
+ random.seed(self.seed)
+ actual = random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array(
+ [[2.46852460439034849e+03, 1.41286880810518346e+03],
+ [5.28287797029485181e+07, 6.57720981047328785e+07],
+ [1.40840323350391515e+02, 1.98390255135251704e+05]])
+ # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
+ # matrix differs by 24 nulps. Discussion:
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # Consensus is that this is probably some gcc quirk that affects
+ # rounding but not in any important way, so we just use a looser
+ # tolerance on this test:
+ np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
+
+ def test_poisson(self):
+ random.seed(self.seed)
+ actual = random.poisson(lam=.123456789, size=(3, 2))
+ desired = np.array([[0, 0],
+ [1, 0],
+ [0, 0]])
+ assert_array_equal(actual, desired)
+
+ def test_poisson_exceptions(self):
+ lambig = np.iinfo('l').max
+ lamneg = -1
+ assert_raises(ValueError, random.poisson, lamneg)
+ assert_raises(ValueError, random.poisson, [lamneg] * 10)
+ assert_raises(ValueError, random.poisson, lambig)
+ assert_raises(ValueError, random.poisson, [lambig] * 10)
+ with suppress_warnings() as sup:
+ sup.record(RuntimeWarning)
+ assert_raises(ValueError, random.poisson, np.nan)
+ assert_raises(ValueError, random.poisson, [np.nan] * 10)
+
+ def test_power(self):
+ random.seed(self.seed)
+ actual = random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[0.02048932883240791, 0.01424192241128213],
+ [0.38446073748535298, 0.39499689943484395],
+ [0.00177699707563439, 0.13115505880863756]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rayleigh(self):
+ random.seed(self.seed)
+ actual = random.rayleigh(scale=10, size=(3, 2))
+ desired = np.array([[13.8882496494248393, 13.383318339044731],
+ [20.95413364294492098, 21.08285015800712614],
+ [11.06066537006854311, 17.35468505778271009]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_rayleigh_0(self):
+ assert_equal(random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, random.rayleigh, scale=-0.)
+
+ def test_standard_cauchy(self):
+ random.seed(self.seed)
+ actual = random.standard_cauchy(size=(3, 2))
+ desired = np.array([[0.77127660196445336, -6.55601161955910605],
+ [0.93582023391158309, -2.07479293013759447],
+ [-4.74601644297011926, 0.18338989290760804]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_exponential(self):
+ random.seed(self.seed)
+ actual = random.standard_exponential(size=(3, 2))
+ desired = np.array([[0.96441739162374596, 0.89556604882105506],
+ [2.1953785836319808, 2.22243285392490542],
+ [0.6116915921431676, 1.50592546727413201]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_gamma(self):
+ random.seed(self.seed)
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[5.50841531318455058, 6.62953470301903103],
+ [5.93988484943779227, 2.31044849402133989],
+ [7.54838614231317084, 8.012756093271868]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_standard_gamma_0(self):
+ assert_equal(random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, random.standard_gamma, shape=-0.)
+
+ def test_standard_normal(self):
+ random.seed(self.seed)
+ actual = random.standard_normal(size=(3, 2))
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn_singleton(self):
+ random.seed(self.seed)
+ actual = random.randn()
+ desired = np.array(1.34016345771863121)
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_t(self):
+ random.seed(self.seed)
+ actual = random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[0.97140611862659965, -0.08830486548450577],
+ [1.36311143689505321, -0.55317463909867071],
+ [-0.18473749069684214, 0.61181537341755321]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_triangular(self):
+ random.seed(self.seed)
+ actual = random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
+ desired = np.array([[12.68117178949215784, 12.4129206149193152],
+ [16.20131377335158263, 16.25692138747600524],
+ [11.20400690911820263, 14.4978144835829923]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_uniform(self):
+ random.seed(self.seed)
+ actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
+ desired = np.array([[6.99097932346268003, 6.73801597444323974],
+ [9.50364421400426274, 9.53130618907631089],
+ [5.48995325769805476, 8.47493103280052118]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_uniform_range_bounds(self):
+ fmin = np.finfo('float').min
+ fmax = np.finfo('float').max
+
+ func = random.uniform
+ assert_raises(OverflowError, func, -np.inf, 0)
+ assert_raises(OverflowError, func, 0, np.inf)
+ assert_raises(OverflowError, func, fmin, fmax)
+ assert_raises(OverflowError, func, [-np.inf], [0])
+ assert_raises(OverflowError, func, [0], [np.inf])
+
+ # (fmax / 1e17) - fmin is within range, so this should not throw
+ # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
+ # DBL_MAX by increasing fmin a bit
+ random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+
+ def test_scalar_exception_propagation(self):
+ # Tests that exceptions are correctly propagated in distributions
+ # when called with objects that throw exceptions when converted to
+ # scalars.
+ #
+ # Regression test for gh: 8865
+
+ class ThrowingFloat(np.ndarray):
+ def __float__(self):
+ raise TypeError
+
+ throwing_float = np.array(1.0).view(ThrowingFloat)
+ assert_raises(TypeError, random.uniform, throwing_float,
+ throwing_float)
+
+ class ThrowingInteger(np.ndarray):
+ def __int__(self):
+ raise TypeError
+
+ throwing_int = np.array(1).view(ThrowingInteger)
+ assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
+
+ def test_vonmises(self):
+ random.seed(self.seed)
+ actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ desired = np.array([[2.28567572673902042, 2.89163838442285037],
+ [0.38198375564286025, 2.57638023113890746],
+ [1.19153771588353052, 1.83509849681825354]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_vonmises_small(self):
+ # check infinite loop, gh-4720
+ random.seed(self.seed)
+ r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
+ assert_(np.isfinite(r).all())
+
+ def test_vonmises_nan(self):
+ random.seed(self.seed)
+ r = random.vonmises(mu=0., kappa=np.nan)
+ assert_(np.isnan(r))
+
+ def test_wald(self):
+ random.seed(self.seed)
+ actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[3.82935265715889983, 5.13125249184285526],
+ [0.35045403618358717, 1.50832396872003538],
+ [0.24124319895843183, 0.22031101461955038]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_weibull(self):
+ random.seed(self.seed)
+ actual = random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[0.97097342648766727, 0.91422896443565516],
+ [1.89517770034962929, 1.91414357960479564],
+ [0.67057783752390987, 1.39494046635066793]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_weibull_0(self):
+ random.seed(self.seed)
+ assert_equal(random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, random.weibull, a=-0.)
+
+ def test_zipf(self):
+ random.seed(self.seed)
+ actual = random.zipf(a=1.23, size=(3, 2))
+ desired = np.array([[66, 29],
+ [1, 1],
+ [3, 13]])
+ assert_array_equal(actual, desired)
+
+
+class TestBroadcast(object):
+ # tests that functions that broadcast behave
+ # correctly when presented with non-scalar arguments
+ def setup(self):
+ self.seed = 123456789
+
+ def set_seed(self):
+ random.seed(self.seed)
+
+ def test_uniform(self):
+ low = [0]
+ high = [1]
+ uniform = random.uniform
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.set_seed()
+ actual = uniform(low * 3, high)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ self.set_seed()
+ actual = uniform(low, high * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ normal = random.normal
+ desired = np.array([2.2129019979039612,
+ 2.1283977976520019,
+ 1.8417114045748335])
+
+ self.set_seed()
+ actual = normal(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = normal(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc, bad_scale * 3)
+
+ def test_beta(self):
+ a = [1]
+ b = [2]
+ bad_a = [-1]
+ bad_b = [-2]
+ beta = random.beta
+ desired = np.array([0.19843558305989056,
+ 0.075230336409423643,
+ 0.24976865978980844])
+
+ self.set_seed()
+ actual = beta(a * 3, b)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a * 3, b)
+ assert_raises(ValueError, beta, a * 3, bad_b)
+
+ self.set_seed()
+ actual = beta(a, b * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a, b * 3)
+ assert_raises(ValueError, beta, a, bad_b * 3)
+
+ def test_exponential(self):
+ scale = [1]
+ bad_scale = [-1]
+ exponential = random.exponential
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.set_seed()
+ actual = exponential(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, exponential, bad_scale * 3)
+
+ def test_standard_gamma(self):
+ shape = [1]
+ bad_shape = [-1]
+ std_gamma = random.standard_gamma
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.set_seed()
+ actual = std_gamma(shape * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, std_gamma, bad_shape * 3)
+
+ def test_gamma(self):
+ shape = [1]
+ scale = [2]
+ bad_shape = [-1]
+ bad_scale = [-2]
+ gamma = random.gamma
+ desired = np.array([1.5221370731769048,
+ 1.5277256455738331,
+ 1.4248762625178359])
+
+ self.set_seed()
+ actual = gamma(shape * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape * 3, scale)
+ assert_raises(ValueError, gamma, shape * 3, bad_scale)
+
+ self.set_seed()
+ actual = gamma(shape, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape, scale * 3)
+ assert_raises(ValueError, gamma, shape, bad_scale * 3)
+
+ def test_f(self):
+ dfnum = [1]
+ dfden = [2]
+ bad_dfnum = [-1]
+ bad_dfden = [-2]
+ f = random.f
+ desired = np.array([0.80038951638264799,
+ 0.86768719635363512,
+ 2.7251095168386801])
+
+ self.set_seed()
+ actual = f(dfnum * 3, dfden)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum * 3, dfden)
+ assert_raises(ValueError, f, dfnum * 3, bad_dfden)
+
+ self.set_seed()
+ actual = f(dfnum, dfden * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum, dfden * 3)
+ assert_raises(ValueError, f, dfnum, bad_dfden * 3)
+
+ def test_noncentral_f(self):
+ dfnum = [2]
+ dfden = [3]
+ nonc = [4]
+ bad_dfnum = [0]
+ bad_dfden = [-1]
+ bad_nonc = [-2]
+ nonc_f = random.noncentral_f
+ desired = np.array([9.1393943263705211,
+ 13.025456344595602,
+ 8.8018098359100545])
+
+ self.set_seed()
+ actual = nonc_f(dfnum * 3, dfden, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert np.all(np.isnan(nonc_f(dfnum, dfden, [np.nan] * 3)))
+
+ assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
+
+ self.set_seed()
+ actual = nonc_f(dfnum, dfden * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
+
+ self.set_seed()
+ actual = nonc_f(dfnum, dfden, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
+
+ def test_noncentral_f_small_df(self):
+ self.set_seed()
+ desired = np.array([6.869638627492048, 0.785880199263955])
+ actual = random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_chisquare(self):
+ df = [1]
+ bad_df = [-1]
+ chisquare = random.chisquare
+ desired = np.array([0.57022801133088286,
+ 0.51947702108840776,
+ 0.1320969254923558])
+
+ self.set_seed()
+ actual = chisquare(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, chisquare, bad_df * 3)
+
+ def test_noncentral_chisquare(self):
+ df = [1]
+ nonc = [2]
+ bad_df = [-1]
+ bad_nonc = [-2]
+ nonc_chi = random.noncentral_chisquare
+ desired = np.array([9.0015599467913763,
+ 4.5804135049718742,
+ 6.0872302432834564])
+
+ self.set_seed()
+ actual = nonc_chi(df * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
+ assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
+
+ self.set_seed()
+ actual = nonc_chi(df, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
+ assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
+
+ def test_standard_t(self):
+ df = [1]
+ bad_df = [-1]
+ t = random.standard_t
+ desired = np.array([3.0702872575217643,
+ 5.8560725167361607,
+ 1.0274791436474273])
+
+ self.set_seed()
+ actual = t(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, t, bad_df * 3)
+ assert_raises(ValueError, random.standard_t, bad_df * 3)
+
+ def test_vonmises(self):
+ mu = [2]
+ kappa = [1]
+ bad_kappa = [-1]
+ vonmises = random.vonmises
+ desired = np.array([2.9883443664201312,
+ -2.7064099483995943,
+ -1.8672476700665914])
+
+ self.set_seed()
+ actual = vonmises(mu * 3, kappa)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
+
+ self.set_seed()
+ actual = vonmises(mu, kappa * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
+
+ def test_pareto(self):
+ a = [1]
+ bad_a = [-1]
+ pareto = random.pareto
+ desired = np.array([1.1405622680198362,
+ 1.1465519762044529,
+ 1.0389564467453547])
+
+ self.set_seed()
+ actual = pareto(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, pareto, bad_a * 3)
+ assert_raises(ValueError, random.pareto, bad_a * 3)
+
+ def test_weibull(self):
+ a = [1]
+ bad_a = [-1]
+ weibull = random.weibull
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.set_seed()
+ actual = weibull(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, weibull, bad_a * 3)
+ assert_raises(ValueError, random.weibull, bad_a * 3)
+
+ def test_power(self):
+ a = [1]
+ bad_a = [-1]
+ power = random.power
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.set_seed()
+ actual = power(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, power, bad_a * 3)
+ assert_raises(ValueError, random.power, bad_a * 3)
+
+ def test_laplace(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ laplace = random.laplace
+ desired = np.array([0.067921356028507157,
+ 0.070715642226971326,
+ 0.019290950698972624])
+
+ self.set_seed()
+ actual = laplace(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = laplace(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc, bad_scale * 3)
+
+ def test_gumbel(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ gumbel = random.gumbel
+ desired = np.array([0.2730318639556768,
+ 0.26936705726291116,
+ 0.33906220393037939])
+
+ self.set_seed()
+ actual = gumbel(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = gumbel(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc, bad_scale * 3)
+
+ def test_logistic(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ logistic = random.logistic
+ desired = np.array([0.13152135837586171,
+ 0.13675915696285773,
+ 0.038216792802833396])
+
+ self.set_seed()
+ actual = logistic(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc * 3, bad_scale)
+
+ self.set_seed()
+ actual = logistic(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc, bad_scale * 3)
+ assert_equal(random.logistic(1.0, 0.0), 1.0)
+
+ def test_lognormal(self):
+ mean = [0]
+ sigma = [1]
+ bad_sigma = [-1]
+ lognormal = random.lognormal
+ desired = np.array([9.1422086044848427,
+ 8.4013952870126261,
+ 6.3073234116578671])
+
+ self.set_seed()
+ actual = lognormal(mean * 3, sigma)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
+ assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
+
+ self.set_seed()
+ actual = lognormal(mean, sigma * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
+ assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
+
+ def test_rayleigh(self):
+ scale = [1]
+ bad_scale = [-1]
+ rayleigh = random.rayleigh
+ desired = np.array([1.2337491937897689,
+ 1.2360119924878694,
+ 1.1936818095781789])
+
+ self.set_seed()
+ actual = rayleigh(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, rayleigh, bad_scale * 3)
+
+ def test_wald(self):
+ mean = [0.5]
+ scale = [1]
+ bad_mean = [0]
+ bad_scale = [-2]
+ wald = random.wald
+ desired = np.array([0.11873681120271318,
+ 0.12450084820795027,
+ 0.9096122728408238])
+
+ self.set_seed()
+ actual = wald(mean * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean * 3, scale)
+ assert_raises(ValueError, wald, mean * 3, bad_scale)
+ assert_raises(ValueError, random.wald, bad_mean * 3, scale)
+ assert_raises(ValueError, random.wald, mean * 3, bad_scale)
+
+ self.set_seed()
+ actual = wald(mean, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean, scale * 3)
+ assert_raises(ValueError, wald, mean, bad_scale * 3)
+ assert_raises(ValueError, wald, 0.0, 1)
+ assert_raises(ValueError, wald, 0.5, 0.0)
+
+ def test_triangular(self):
+ left = [1]
+ right = [3]
+ mode = [2]
+ bad_left_one = [3]
+ bad_mode_one = [4]
+ bad_left_two, bad_mode_two = right * 2
+ triangular = random.triangular
+ desired = np.array([2.03339048710429,
+ 2.0347400359389356,
+ 2.0095991069536208])
+
+ self.set_seed()
+ actual = triangular(left * 3, mode, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
+ assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
+
+ self.set_seed()
+ actual = triangular(left, mode * 3, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
+ assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
+
+ self.set_seed()
+ actual = triangular(left, mode, right * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
+ assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
+
+ assert_raises(ValueError, triangular, 10., 0., 20.)
+ assert_raises(ValueError, triangular, 10., 25., 20.)
+ assert_raises(ValueError, triangular, 10., 10., 10.)
+
+ def test_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ binom = random.binomial
+ desired = np.array([1, 1, 1])
+
+ self.set_seed()
+ actual = binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n * 3, p)
+ assert_raises(ValueError, binom, n * 3, bad_p_one)
+ assert_raises(ValueError, binom, n * 3, bad_p_two)
+
+ self.set_seed()
+ actual = binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n, p * 3)
+ assert_raises(ValueError, binom, n, bad_p_one * 3)
+ assert_raises(ValueError, binom, n, bad_p_two * 3)
+
+ def test_negative_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ neg_binom = random.negative_binomial
+ desired = np.array([1, 0, 1])
+
+ self.set_seed()
+ actual = neg_binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n * 3, p)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
+
+ self.set_seed()
+ actual = neg_binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n, p * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
+
+ def test_poisson(self):
+ max_lam = random.RandomState()._poisson_lam_max
+
+ lam = [1]
+ bad_lam_one = [-1]
+ bad_lam_two = [max_lam * 2]
+ poisson = random.poisson
+ desired = np.array([1, 1, 0])
+
+ self.set_seed()
+ actual = poisson(lam * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, poisson, bad_lam_one * 3)
+ assert_raises(ValueError, poisson, bad_lam_two * 3)
+
+ def test_zipf(self):
+ a = [2]
+ bad_a = [0]
+ zipf = random.zipf
+ desired = np.array([2, 2, 1])
+
+ self.set_seed()
+ actual = zipf(a * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, zipf, bad_a * 3)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, zipf, np.nan)
+ assert_raises(ValueError, zipf, [0, 0, np.nan])
+
+ def test_geometric(self):
+ p = [0.5]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ geom = random.geometric
+ desired = np.array([2, 2, 2])
+
+ self.set_seed()
+ actual = geom(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, geom, bad_p_one * 3)
+ assert_raises(ValueError, geom, bad_p_two * 3)
+
+ def test_hypergeometric(self):
+ ngood = [1]
+ nbad = [2]
+ nsample = [2]
+ bad_ngood = [-1]
+ bad_nbad = [-2]
+ bad_nsample_one = [0]
+ bad_nsample_two = [4]
+ hypergeom = random.hypergeometric
+ desired = np.array([1, 1, 1])
+
+ self.set_seed()
+ actual = hypergeom(ngood * 3, nbad, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
+
+ self.set_seed()
+ actual = hypergeom(ngood, nbad * 3, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
+
+ self.set_seed()
+ actual = hypergeom(ngood, nbad, nsample * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+
+ assert_raises(ValueError, hypergeom, -1, 10, 20)
+ assert_raises(ValueError, hypergeom, 10, -1, 20)
+ assert_raises(ValueError, hypergeom, 10, 10, 0)
+ assert_raises(ValueError, hypergeom, 10, 10, 25)
+
+ def test_logseries(self):
+ p = [0.5]
+ bad_p_one = [2]
+ bad_p_two = [-1]
+ logseries = random.logseries
+ desired = np.array([1, 1, 1])
+
+ self.set_seed()
+ actual = logseries(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, logseries, bad_p_one * 3)
+ assert_raises(ValueError, logseries, bad_p_two * 3)
+
+
+class TestThread(object):
+ # make sure each state produces the same sequence even in threads
+ def setup(self):
+ self.seeds = range(4)
+
+ def check_function(self, function, sz):
+ from threading import Thread
+
+ out1 = np.empty((len(self.seeds),) + sz)
+ out2 = np.empty((len(self.seeds),) + sz)
+
+ # threaded generation
+ t = [Thread(target=function, args=(random.RandomState(s), o))
+ for s, o in zip(self.seeds, out1)]
+ [x.start() for x in t]
+ [x.join() for x in t]
+
+ # the same serial
+ for s, o in zip(self.seeds, out2):
+ function(random.RandomState(s), o)
+
+ # these platforms change x87 fpu precision mode in threads
+ if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
+ assert_array_almost_equal(out1, out2)
+ else:
+ assert_array_equal(out1, out2)
+
+ def test_normal(self):
+ def gen_random(state, out):
+ out[...] = state.normal(size=10000)
+
+ self.check_function(gen_random, sz=(10000,))
+
+ def test_exp(self):
+ def gen_random(state, out):
+ out[...] = state.exponential(scale=np.ones((100, 1000)))
+
+ self.check_function(gen_random, sz=(100, 1000))
+
+ def test_multinomial(self):
+ def gen_random(state, out):
+ out[...] = state.multinomial(10, [1 / 6.] * 6, size=10000)
+
+ self.check_function(gen_random, sz=(10000, 6))
+
+
+# See Issue #4263
+class TestSingleEltArrayInput(object):
+ def setup(self):
+ self.argOne = np.array([2])
+ self.argTwo = np.array([3])
+ self.argThree = np.array([4])
+ self.tgtShape = (1,)
+
+ def test_one_arg_funcs(self):
+ funcs = (random.exponential, random.standard_gamma,
+ random.chisquare, random.standard_t,
+ random.pareto, random.weibull,
+ random.power, random.rayleigh,
+ random.poisson, random.zipf,
+ random.geometric, random.logseries)
+
+ probfuncs = (random.geometric, random.logseries)
+
+ for func in funcs:
+ if func in probfuncs: # p < 1.0
+ out = func(np.array([0.5]))
+
+ else:
+ out = func(self.argOne)
+
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_two_arg_funcs(self):
+ funcs = (random.uniform, random.normal,
+ random.beta, random.gamma,
+ random.f, random.noncentral_chisquare,
+ random.vonmises, random.laplace,
+ random.gumbel, random.logistic,
+ random.lognormal, random.wald,
+ random.binomial, random.negative_binomial)
+
+ probfuncs = (random.binomial, random.negative_binomial)
+
+ for func in funcs:
+ if func in probfuncs: # p <= 1
+ argTwo = np.array([0.5])
+
+ else:
+ argTwo = self.argTwo
+
+ out = func(self.argOne, argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, argTwo[0])
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_three_arg_funcs(self):
+ funcs = [random.noncentral_f, random.triangular,
+ random.hypergeometric]
+
+ for func in funcs:
+ out = func(self.argOne, self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, self.argTwo[0], self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+
+# Ensure returned array dtype is correct for platform
+def test_integer_dtype(int_func):
+ random.seed(123456789)
+ fname, args, md5 = int_func
+ f = getattr(random, fname)
+ actual = f(*args, size=2)
+ assert_(actual.dtype == np.dtype('l'))
+
+
+def test_integer_repeat(int_func):
+ random.seed(123456789)
+ fname, args, md5 = int_func
+ f = getattr(random, fname)
+ val = f(*args, size=1000000)
+ if sys.byteorder != 'little':
+ val = val.byteswap()
+ res = hashlib.md5(val.view(np.int8)).hexdigest()
+ assert_(res == md5)
diff --git a/numpy/random/tests/test_randomstate_regression.py b/numpy/random/tests/test_randomstate_regression.py
new file mode 100644
index 000000000..edf32ea97
--- /dev/null
+++ b/numpy/random/tests/test_randomstate_regression.py
@@ -0,0 +1,210 @@
+import sys
+
+import pytest
+
+from numpy.testing import (
+ assert_, assert_array_equal, assert_raises,
+ )
+from numpy.compat import long
+import numpy as np
+
+from numpy.random import mtrand as random
+
+
+class TestRegression(object):
+
+ def test_VonMises_range(self):
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
+ for mu in np.linspace(-7., 7., 5):
+ r = random.vonmises(mu, 1, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
+ assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
+
+ # Test for ticket #5623
+ args = [
+ (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
+ ]
+ is_64bits = sys.maxsize > 2**32
+ if is_64bits and sys.platform != 'win32':
+ # Check for 64-bit systems
+ args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
+ for arg in args:
+ assert_(random.hypergeometric(*arg) > 0)
+
+ def test_logseries_convergence(self):
+ # Test for ticket #923
+ N = 1000
+ random.seed(0)
+ rvsn = random.logseries(0.8, size=N)
+ # these two frequency counts should be close to theoretical
+ # numbers with this large sample
+ # theoretical large N result is 0.49706795
+ freq = np.sum(rvsn == 1) / float(N)
+ msg = "Frequency was %f, should be > 0.45" % freq
+ assert_(freq > 0.45, msg)
+ # theoretical large N result is 0.19882718
+ freq = np.sum(rvsn == 2) / float(N)
+ msg = "Frequency was %f, should be < 0.23" % freq
+ assert_(freq < 0.23, msg)
+
+ def test_permutation_longs(self):
+ random.seed(1234)
+ a = random.permutation(12)
+ random.seed(1234)
+ b = random.permutation(long(12))
+ assert_array_equal(a, b)
+
+ def test_shuffle_mixed_dimension(self):
+ # Test for trac ticket #2074
+ for t in [[1, 2, 3, None],
+ [(1, 1), (2, 2), (3, 3), None],
+ [1, (2, 2), (3, 3), None],
+ [(1, 1), 2, 3, None]]:
+ random.seed(12345)
+ shuffled = list(t)
+ random.shuffle(shuffled)
+ assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
+
+ def test_call_within_randomstate(self):
+ # Check that custom RandomState does not call into global state
+ m = random.RandomState()
+ res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
+ for i in range(3):
+ random.seed(i)
+ m.seed(4321)
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ random.multivariate_normal([0], [[0]], size=1)
+ random.multivariate_normal([0], [[0]], size=np.int_(1))
+ random.multivariate_normal([0], [[0]], size=np.int64(1))
+
+ def test_beta_small_parameters(self):
+ # Test that beta with small a and b parameters does not produce
+ # NaNs due to roundoff errors causing 0 / 0, gh-5851
+ random.seed(1234567890)
+ x = random.beta(0.0001, 0.0001, size=100)
+ assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
+
+ def test_choice_sum_of_probs_tolerance(self):
+ # The sum of probs should be 1.0 with some tolerance.
+ # For low precision dtypes the tolerance was too tight.
+ # See numpy github issue 6123.
+ random.seed(1234)
+ a = [1, 2, 3]
+ counts = [4, 4, 2]
+ for dt in np.float16, np.float32, np.float64:
+ probs = np.array(counts, dtype=dt) / sum(counts)
+ c = random.choice(a, p=probs)
+ assert_(c in a)
+ assert_raises(ValueError, random.choice, a, p=probs*0.9)
+
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ random.seed(1234)
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ random.seed(1234)
+ a = np.array([np.arange(1), np.arange(4)])
+
+ for _ in range(1000):
+ random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_permutation_subclass(self):
+ class N(np.ndarray):
+ pass
+
+ random.seed(1)
+ orig = np.arange(3).view(N)
+ perm = random.permutation(orig)
+ assert_array_equal(perm, np.array([0, 2, 1]))
+ assert_array_equal(orig, np.arange(3).view(N))
+
+ class M(object):
+ a = np.arange(5)
+
+ def __array__(self):
+ return self.a
+
+ random.seed(1)
+ m = M()
+ perm = random.permutation(m)
+ assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
+ assert_array_equal(m.__array__(), np.arange(5))
+
+ def test_warns_byteorder(self):
+ # GH 13159
+ other_byteord_dt = '<i4' if sys.byteorder == 'big' else '>i4'
+ with pytest.deprecated_call(match='non-native byteorder is not'):
+ random.randint(0, 200, size=10, dtype=other_byteord_dt)
+
+ def test_named_argument_initialization(self):
+ # GH 13669
+ rs1 = np.random.RandomState(123456789)
+ rs2 = np.random.RandomState(seed=123456789)
+ assert rs1.randint(0, 100) == rs2.randint(0, 100)
+
+ def test_choice_retun_dtype(self):
+ # GH 9867
+ c = np.random.choice(10, p=[.1]*10, size=2)
+ assert c.dtype == np.dtype(int)
+ c = np.random.choice(10, p=[.1]*10, replace=False, size=2)
+ assert c.dtype == np.dtype(int)
+ c = np.random.choice(10, size=2)
+ assert c.dtype == np.dtype(int)
+ c = np.random.choice(10, replace=False, size=2)
+ assert c.dtype == np.dtype(int)
+
+ @pytest.mark.skipif(np.iinfo('l').max < 2**32,
+ reason='Cannot test with 32-bit C long')
+ def test_randint_117(self):
+ # GH 14189
+ random.seed(0)
+ expected = np.array([2357136044, 2546248239, 3071714933, 3626093760,
+ 2588848963, 3684848379, 2340255427, 3638918503,
+ 1819583497, 2678185683], dtype='int64')
+ actual = random.randint(2**32, size=10)
+ assert_array_equal(actual, expected)
+
+ def test_p_zero_stream(self):
+ # Regression test for gh-14522. Ensure that future versions
+ # generate the same variates as version 1.16.
+ np.random.seed(12345)
+ assert_array_equal(random.binomial(1, [0, 0.25, 0.5, 0.75, 1]),
+ [0, 0, 0, 1, 1])
+
+ def test_n_zero_stream(self):
+ # Regression test for gh-14522. Ensure that future versions
+ # generate the same variates as version 1.16.
+ np.random.seed(8675309)
+ expected = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
+ [3, 4, 2, 3, 3, 1, 5, 3, 1, 3]])
+ assert_array_equal(random.binomial([[0], [10]], 0.25, size=(2, 10)),
+ expected)
diff --git a/numpy/random/tests/test_regression.py b/numpy/random/tests/test_regression.py
index ca9bbbc71..509e2d57f 100644
--- a/numpy/random/tests/test_regression.py
+++ b/numpy/random/tests/test_regression.py
@@ -29,7 +29,8 @@ class TestRegression(object):
]
is_64bits = sys.maxsize > 2**32
if is_64bits and sys.platform != 'win32':
- args.append((2**40 - 2, 2**40 - 2, 2**40 - 2)) # Check for 64-bit systems
+ # Check for 64-bit systems
+ args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
for arg in args:
assert_(np.random.hypergeometric(*arg) > 0)
diff --git a/numpy/random/tests/test_seed_sequence.py b/numpy/random/tests/test_seed_sequence.py
new file mode 100644
index 000000000..8d6d604a2
--- /dev/null
+++ b/numpy/random/tests/test_seed_sequence.py
@@ -0,0 +1,54 @@
+import numpy as np
+from numpy.testing import assert_array_equal
+
+from numpy.random.bit_generator import SeedSequence
+
+
+def test_reference_data():
+ """ Check that SeedSequence generates data the same as the C++ reference.
+
+ https://gist.github.com/imneme/540829265469e673d045
+ """
+ inputs = [
+ [3735928559, 195939070, 229505742, 305419896],
+ [3668361503, 4165561550, 1661411377, 3634257570],
+ [164546577, 4166754639, 1765190214, 1303880213],
+ [446610472, 3941463886, 522937693, 1882353782],
+ [1864922766, 1719732118, 3882010307, 1776744564],
+ [4141682960, 3310988675, 553637289, 902896340],
+ [1134851934, 2352871630, 3699409824, 2648159817],
+ [1240956131, 3107113773, 1283198141, 1924506131],
+ [2669565031, 579818610, 3042504477, 2774880435],
+ [2766103236, 2883057919, 4029656435, 862374500],
+ ]
+ outputs = [
+ [3914649087, 576849849, 3593928901, 2229911004],
+ [2240804226, 3691353228, 1365957195, 2654016646],
+ [3562296087, 3191708229, 1147942216, 3726991905],
+ [1403443605, 3591372999, 1291086759, 441919183],
+ [1086200464, 2191331643, 560336446, 3658716651],
+ [3249937430, 2346751812, 847844327, 2996632307],
+ [2584285912, 4034195531, 3523502488, 169742686],
+ [959045797, 3875435559, 1886309314, 359682705],
+ [3978441347, 432478529, 3223635119, 138903045],
+ [296367413, 4262059219, 13109864, 3283683422],
+ ]
+ outputs64 = [
+ [2477551240072187391, 9577394838764454085],
+ [15854241394484835714, 11398914698975566411],
+ [13708282465491374871, 16007308345579681096],
+ [15424829579845884309, 1898028439751125927],
+ [9411697742461147792, 15714068361935982142],
+ [10079222287618677782, 12870437757549876199],
+ [17326737873898640088, 729039288628699544],
+ [16644868984619524261, 1544825456798124994],
+ [1857481142255628931, 596584038813451439],
+ [18305404959516669237, 14103312907920476776],
+ ]
+ for seed, expected, expected64 in zip(inputs, outputs, outputs64):
+ expected = np.array(expected, dtype=np.uint32)
+ ss = SeedSequence(seed)
+ state = ss.generate_state(len(expected))
+ assert_array_equal(state, expected)
+ state64 = ss.generate_state(len(expected64), dtype=np.uint64)
+ assert_array_equal(state64, expected64)
diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py
new file mode 100644
index 000000000..6e641b5f4
--- /dev/null
+++ b/numpy/random/tests/test_smoke.py
@@ -0,0 +1,808 @@
+import pickle
+import time
+from functools import partial
+
+import numpy as np
+import pytest
+from numpy.testing import assert_equal, assert_, assert_array_equal
+from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64)
+
+@pytest.fixture(scope='module',
+ params=(np.bool, np.int8, np.int16, np.int32, np.int64,
+ np.uint8, np.uint16, np.uint32, np.uint64))
+def dtype(request):
+ return request.param
+
+
+def params_0(f):
+ val = f()
+ assert_(np.isscalar(val))
+ val = f(10)
+ assert_(val.shape == (10,))
+ val = f((10, 10))
+ assert_(val.shape == (10, 10))
+ val = f((10, 10, 10))
+ assert_(val.shape == (10, 10, 10))
+ val = f(size=(5, 5))
+ assert_(val.shape == (5, 5))
+
+
+def params_1(f, bounded=False):
+ a = 5.0
+ b = np.arange(2.0, 12.0)
+ c = np.arange(2.0, 102.0).reshape((10, 10))
+ d = np.arange(2.0, 1002.0).reshape((10, 10, 10))
+ e = np.array([2.0, 3.0])
+ g = np.arange(2.0, 12.0).reshape((1, 10, 1))
+ if bounded:
+ a = 0.5
+ b = b / (1.5 * b.max())
+ c = c / (1.5 * c.max())
+ d = d / (1.5 * d.max())
+ e = e / (1.5 * e.max())
+ g = g / (1.5 * g.max())
+
+ # Scalar
+ f(a)
+ # Scalar - size
+ f(a, size=(10, 10))
+ # 1d
+ f(b)
+ # 2d
+ f(c)
+ # 3d
+ f(d)
+ # 1d size
+ f(b, size=10)
+ # 2d - size - broadcast
+ f(e, size=(10, 2))
+ # 3d - size
+ f(g, size=(10, 10, 10))
+
+
+def comp_state(state1, state2):
+ identical = True
+ if isinstance(state1, dict):
+ for key in state1:
+ identical &= comp_state(state1[key], state2[key])
+ elif type(state1) != type(state2):
+ identical &= type(state1) == type(state2)
+ else:
+ if (isinstance(state1, (list, tuple, np.ndarray)) and isinstance(
+ state2, (list, tuple, np.ndarray))):
+ for s1, s2 in zip(state1, state2):
+ identical &= comp_state(s1, s2)
+ else:
+ identical &= state1 == state2
+ return identical
+
+
+def warmup(rg, n=None):
+ if n is None:
+ n = 11 + np.random.randint(0, 20)
+ rg.standard_normal(n)
+ rg.standard_normal(n)
+ rg.standard_normal(n, dtype=np.float32)
+ rg.standard_normal(n, dtype=np.float32)
+ rg.integers(0, 2 ** 24, n, dtype=np.uint64)
+ rg.integers(0, 2 ** 48, n, dtype=np.uint64)
+ rg.standard_gamma(11.0, n)
+ rg.standard_gamma(11.0, n, dtype=np.float32)
+ rg.random(n, dtype=np.float64)
+ rg.random(n, dtype=np.float32)
+
+
+class RNG(object):
+ @classmethod
+ def setup_class(cls):
+ # Overridden in test classes. Place holder to silence IDE noise
+ cls.bit_generator = PCG64
+ cls.advance = None
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+ @classmethod
+ def _extra_setup(cls):
+ cls.vec_1d = np.arange(2.0, 102.0)
+ cls.vec_2d = np.arange(2.0, 102.0)[None, :]
+ cls.mat = np.arange(2.0, 102.0, 0.01).reshape((100, 100))
+ cls.seed_error = TypeError
+
+ def _reset_state(self):
+ self.rg.bit_generator.state = self.initial_state
+
+ def test_init(self):
+ rg = Generator(self.bit_generator())
+ state = rg.bit_generator.state
+ rg.standard_normal(1)
+ rg.standard_normal(1)
+ rg.bit_generator.state = state
+ new_state = rg.bit_generator.state
+ assert_(comp_state(state, new_state))
+
+ def test_advance(self):
+ state = self.rg.bit_generator.state
+ if hasattr(self.rg.bit_generator, 'advance'):
+ self.rg.bit_generator.advance(self.advance)
+ assert_(not comp_state(state, self.rg.bit_generator.state))
+ else:
+ bitgen_name = self.rg.bit_generator.__class__.__name__
+ pytest.skip('Advance is not supported by {0}'.format(bitgen_name))
+
+ def test_jump(self):
+ state = self.rg.bit_generator.state
+ if hasattr(self.rg.bit_generator, 'jumped'):
+ bit_gen2 = self.rg.bit_generator.jumped()
+ jumped_state = bit_gen2.state
+ assert_(not comp_state(state, jumped_state))
+ self.rg.random(2 * 3 * 5 * 7 * 11 * 13 * 17)
+ self.rg.bit_generator.state = state
+ bit_gen3 = self.rg.bit_generator.jumped()
+ rejumped_state = bit_gen3.state
+ assert_(comp_state(jumped_state, rejumped_state))
+ else:
+ bitgen_name = self.rg.bit_generator.__class__.__name__
+ if bitgen_name not in ('SFC64',):
+ raise AttributeError('no "jumped" in %s' % bitgen_name)
+ pytest.skip('Jump is not supported by {0}'.format(bitgen_name))
+
+ def test_uniform(self):
+ r = self.rg.uniform(-1.0, 0.0, size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+
+ def test_uniform_array(self):
+ r = self.rg.uniform(np.array([-1.0] * 10), 0.0, size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+ r = self.rg.uniform(np.array([-1.0] * 10),
+ np.array([0.0] * 10), size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+ r = self.rg.uniform(-1.0, np.array([0.0] * 10), size=10)
+ assert_(len(r) == 10)
+ assert_((r > -1).all())
+ assert_((r <= 0).all())
+
+ def test_random(self):
+ assert_(len(self.rg.random(10)) == 10)
+ params_0(self.rg.random)
+
+ def test_standard_normal_zig(self):
+ assert_(len(self.rg.standard_normal(10)) == 10)
+
+ def test_standard_normal(self):
+ assert_(len(self.rg.standard_normal(10)) == 10)
+ params_0(self.rg.standard_normal)
+
+ def test_standard_gamma(self):
+ assert_(len(self.rg.standard_gamma(10, 10)) == 10)
+ assert_(len(self.rg.standard_gamma(np.array([10] * 10), 10)) == 10)
+ params_1(self.rg.standard_gamma)
+
+ def test_standard_exponential(self):
+ assert_(len(self.rg.standard_exponential(10)) == 10)
+ params_0(self.rg.standard_exponential)
+
+ def test_standard_exponential_float(self):
+ randoms = self.rg.standard_exponential(10, dtype='float32')
+ assert_(len(randoms) == 10)
+ assert randoms.dtype == np.float32
+ params_0(partial(self.rg.standard_exponential, dtype='float32'))
+
+ def test_standard_exponential_float_log(self):
+ randoms = self.rg.standard_exponential(10, dtype='float32',
+ method='inv')
+ assert_(len(randoms) == 10)
+ assert randoms.dtype == np.float32
+ params_0(partial(self.rg.standard_exponential, dtype='float32',
+ method='inv'))
+
+ def test_standard_cauchy(self):
+ assert_(len(self.rg.standard_cauchy(10)) == 10)
+ params_0(self.rg.standard_cauchy)
+
+ def test_standard_t(self):
+ assert_(len(self.rg.standard_t(10, 10)) == 10)
+ params_1(self.rg.standard_t)
+
+ def test_binomial(self):
+ assert_(self.rg.binomial(10, .5) >= 0)
+ assert_(self.rg.binomial(1000, .5) >= 0)
+
+ def test_reset_state(self):
+ state = self.rg.bit_generator.state
+ int_1 = self.rg.integers(2**31)
+ self.rg.bit_generator.state = state
+ int_2 = self.rg.integers(2**31)
+ assert_(int_1 == int_2)
+
+ def test_entropy_init(self):
+ rg = Generator(self.bit_generator())
+ rg2 = Generator(self.bit_generator())
+ assert_(not comp_state(rg.bit_generator.state,
+ rg2.bit_generator.state))
+
+ def test_seed(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg2 = Generator(self.bit_generator(*self.seed))
+ rg.random()
+ rg2.random()
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_reset_state_gauss(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg.standard_normal()
+ state = rg.bit_generator.state
+ n1 = rg.standard_normal(size=10)
+ rg2 = Generator(self.bit_generator())
+ rg2.bit_generator.state = state
+ n2 = rg2.standard_normal(size=10)
+ assert_array_equal(n1, n2)
+
+ def test_reset_state_uint32(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg.integers(0, 2 ** 24, 120, dtype=np.uint32)
+ state = rg.bit_generator.state
+ n1 = rg.integers(0, 2 ** 24, 10, dtype=np.uint32)
+ rg2 = Generator(self.bit_generator())
+ rg2.bit_generator.state = state
+ n2 = rg2.integers(0, 2 ** 24, 10, dtype=np.uint32)
+ assert_array_equal(n1, n2)
+
+ def test_reset_state_float(self):
+ rg = Generator(self.bit_generator(*self.seed))
+ rg.random(dtype='float32')
+ state = rg.bit_generator.state
+ n1 = rg.random(size=10, dtype='float32')
+ rg2 = Generator(self.bit_generator())
+ rg2.bit_generator.state = state
+ n2 = rg2.random(size=10, dtype='float32')
+ assert_((n1 == n2).all())
+
+ def test_shuffle(self):
+ original = np.arange(200, 0, -1)
+ permuted = self.rg.permutation(original)
+ assert_((original != permuted).any())
+
+ def test_permutation(self):
+ original = np.arange(200, 0, -1)
+ permuted = self.rg.permutation(original)
+ assert_((original != permuted).any())
+
+ def test_beta(self):
+ vals = self.rg.beta(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(np.array([2.0] * 10), 2.0)
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(2.0, np.array([2.0] * 10))
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(np.array([2.0] * 10), np.array([2.0] * 10))
+ assert_(len(vals) == 10)
+ vals = self.rg.beta(np.array([2.0] * 10), np.array([[2.0]] * 10))
+ assert_(vals.shape == (10, 10))
+
+ def test_bytes(self):
+ vals = self.rg.bytes(10)
+ assert_(len(vals) == 10)
+
+ def test_chisquare(self):
+ vals = self.rg.chisquare(2.0, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.chisquare)
+
+ def test_exponential(self):
+ vals = self.rg.exponential(2.0, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.exponential)
+
+ def test_f(self):
+ vals = self.rg.f(3, 1000, 10)
+ assert_(len(vals) == 10)
+
+ def test_gamma(self):
+ vals = self.rg.gamma(3, 2, 10)
+ assert_(len(vals) == 10)
+
+ def test_geometric(self):
+ vals = self.rg.geometric(0.5, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.exponential, bounded=True)
+
+ def test_gumbel(self):
+ vals = self.rg.gumbel(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_laplace(self):
+ vals = self.rg.laplace(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_logitic(self):
+ vals = self.rg.logistic(2.0, 2.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_logseries(self):
+ vals = self.rg.logseries(0.5, 10)
+ assert_(len(vals) == 10)
+
+ def test_negative_binomial(self):
+ vals = self.rg.negative_binomial(10, 0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_noncentral_chisquare(self):
+ vals = self.rg.noncentral_chisquare(10, 2, 10)
+ assert_(len(vals) == 10)
+
+ def test_noncentral_f(self):
+ vals = self.rg.noncentral_f(3, 1000, 2, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.noncentral_f(np.array([3] * 10), 1000, 2)
+ assert_(len(vals) == 10)
+ vals = self.rg.noncentral_f(3, np.array([1000] * 10), 2)
+ assert_(len(vals) == 10)
+ vals = self.rg.noncentral_f(3, 1000, np.array([2] * 10))
+ assert_(len(vals) == 10)
+
+ def test_normal(self):
+ vals = self.rg.normal(10, 0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_pareto(self):
+ vals = self.rg.pareto(3.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_poisson(self):
+ vals = self.rg.poisson(10, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.poisson(np.array([10] * 10))
+ assert_(len(vals) == 10)
+ params_1(self.rg.poisson)
+
+ def test_power(self):
+ vals = self.rg.power(0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_integers(self):
+ vals = self.rg.integers(10, 20, 10)
+ assert_(len(vals) == 10)
+
+ def test_rayleigh(self):
+ vals = self.rg.rayleigh(0.2, 10)
+ assert_(len(vals) == 10)
+ params_1(self.rg.rayleigh, bounded=True)
+
+ def test_vonmises(self):
+ vals = self.rg.vonmises(10, 0.2, 10)
+ assert_(len(vals) == 10)
+
+ def test_wald(self):
+ vals = self.rg.wald(1.0, 1.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_weibull(self):
+ vals = self.rg.weibull(1.0, 10)
+ assert_(len(vals) == 10)
+
+ def test_zipf(self):
+ vals = self.rg.zipf(10, 10)
+ assert_(len(vals) == 10)
+ vals = self.rg.zipf(self.vec_1d)
+ assert_(len(vals) == 100)
+ vals = self.rg.zipf(self.vec_2d)
+ assert_(vals.shape == (1, 100))
+ vals = self.rg.zipf(self.mat)
+ assert_(vals.shape == (100, 100))
+
+ def test_hypergeometric(self):
+ vals = self.rg.hypergeometric(25, 25, 20)
+ assert_(np.isscalar(vals))
+ vals = self.rg.hypergeometric(np.array([25] * 10), 25, 20)
+ assert_(vals.shape == (10,))
+
+ def test_triangular(self):
+ vals = self.rg.triangular(-5, 0, 5)
+ assert_(np.isscalar(vals))
+ vals = self.rg.triangular(-5, np.array([0] * 10), 5)
+ assert_(vals.shape == (10,))
+
+ def test_multivariate_normal(self):
+ mean = [0, 0]
+ cov = [[1, 0], [0, 100]] # diagonal covariance
+ x = self.rg.multivariate_normal(mean, cov, 5000)
+ assert_(x.shape == (5000, 2))
+ x_zig = self.rg.multivariate_normal(mean, cov, 5000)
+ assert_(x.shape == (5000, 2))
+ x_inv = self.rg.multivariate_normal(mean, cov, 5000)
+ assert_(x.shape == (5000, 2))
+ assert_((x_zig != x_inv).any())
+
+ def test_multinomial(self):
+ vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3])
+ assert_(vals.shape == (2,))
+ vals = self.rg.multinomial(100, [1.0 / 3, 2.0 / 3], size=10)
+ assert_(vals.shape == (10, 2))
+
+ def test_dirichlet(self):
+ s = self.rg.dirichlet((10, 5, 3), 20)
+ assert_(s.shape == (20, 3))
+
+ def test_pickle(self):
+ pick = pickle.dumps(self.rg)
+ unpick = pickle.loads(pick)
+ assert_((type(self.rg) == type(unpick)))
+ assert_(comp_state(self.rg.bit_generator.state,
+ unpick.bit_generator.state))
+
+ pick = pickle.dumps(self.rg)
+ unpick = pickle.loads(pick)
+ assert_((type(self.rg) == type(unpick)))
+ assert_(comp_state(self.rg.bit_generator.state,
+ unpick.bit_generator.state))
+
+ def test_seed_array(self):
+ if self.seed_vector_bits is None:
+ bitgen_name = self.bit_generator.__name__
+ pytest.skip('Vector seeding is not supported by '
+ '{0}'.format(bitgen_name))
+
+ if self.seed_vector_bits == 32:
+ dtype = np.uint32
+ else:
+ dtype = np.uint64
+ seed = np.array([1], dtype=dtype)
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(1)
+ state2 = bg.state
+ assert_(comp_state(state1, state2))
+
+ seed = np.arange(4, dtype=dtype)
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(seed[0])
+ state2 = bg.state
+ assert_(not comp_state(state1, state2))
+
+ seed = np.arange(1500, dtype=dtype)
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(seed[0])
+ state2 = bg.state
+ assert_(not comp_state(state1, state2))
+
+ seed = 2 ** np.mod(np.arange(1500, dtype=dtype),
+ self.seed_vector_bits - 1) + 1
+ bg = self.bit_generator(seed)
+ state1 = bg.state
+ bg = self.bit_generator(seed[0])
+ state2 = bg.state
+ assert_(not comp_state(state1, state2))
+
+ def test_uniform_float(self):
+ rg = Generator(self.bit_generator(12345))
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.random(11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.random(11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_gamma_floats(self):
+ rg = Generator(self.bit_generator())
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.standard_gamma(4.0, 11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.standard_gamma(4.0, 11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_normal_floats(self):
+ rg = Generator(self.bit_generator())
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.standard_normal(11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.standard_normal(11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_normal_zig_floats(self):
+ rg = Generator(self.bit_generator())
+ warmup(rg)
+ state = rg.bit_generator.state
+ r1 = rg.standard_normal(11, dtype=np.float32)
+ rg2 = Generator(self.bit_generator())
+ warmup(rg2)
+ rg2.bit_generator.state = state
+ r2 = rg2.standard_normal(11, dtype=np.float32)
+ assert_array_equal(r1, r2)
+ assert_equal(r1.dtype, np.float32)
+ assert_(comp_state(rg.bit_generator.state, rg2.bit_generator.state))
+
+ def test_output_fill(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ rg.bit_generator.state = state
+ rg.standard_normal(out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_normal(size=size)
+ assert_equal(direct, existing)
+
+ sized = np.empty(size)
+ rg.bit_generator.state = state
+ rg.standard_normal(out=sized, size=sized.shape)
+
+ existing = np.empty(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_normal(out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_normal(size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_uniform(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ rg.bit_generator.state = state
+ rg.random(out=existing)
+ rg.bit_generator.state = state
+ direct = rg.random(size=size)
+ assert_equal(direct, existing)
+
+ existing = np.empty(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.random(out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.random(size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_exponential(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ rg.bit_generator.state = state
+ rg.standard_exponential(out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_exponential(size=size)
+ assert_equal(direct, existing)
+
+ existing = np.empty(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_exponential(out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_exponential(size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_gamma(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ existing = np.zeros(size)
+ rg.bit_generator.state = state
+ rg.standard_gamma(1.0, out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(1.0, size=size)
+ assert_equal(direct, existing)
+
+ existing = np.zeros(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_gamma(1.0, out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(1.0, size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_filling_gamma_broadcast(self):
+ rg = self.rg
+ state = rg.bit_generator.state
+ size = (31, 7, 97)
+ mu = np.arange(97.0) + 1.0
+ existing = np.zeros(size)
+ rg.bit_generator.state = state
+ rg.standard_gamma(mu, out=existing)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(mu, size=size)
+ assert_equal(direct, existing)
+
+ existing = np.zeros(size, dtype=np.float32)
+ rg.bit_generator.state = state
+ rg.standard_gamma(mu, out=existing, dtype=np.float32)
+ rg.bit_generator.state = state
+ direct = rg.standard_gamma(mu, size=size, dtype=np.float32)
+ assert_equal(direct, existing)
+
+ def test_output_fill_error(self):
+ rg = self.rg
+ size = (31, 7, 97)
+ existing = np.empty(size)
+ with pytest.raises(TypeError):
+ rg.standard_normal(out=existing, dtype=np.float32)
+ with pytest.raises(ValueError):
+ rg.standard_normal(out=existing[::3])
+ existing = np.empty(size, dtype=np.float32)
+ with pytest.raises(TypeError):
+ rg.standard_normal(out=existing, dtype=np.float64)
+
+ existing = np.zeros(size, dtype=np.float32)
+ with pytest.raises(TypeError):
+ rg.standard_gamma(1.0, out=existing, dtype=np.float64)
+ with pytest.raises(ValueError):
+ rg.standard_gamma(1.0, out=existing[::3], dtype=np.float32)
+ existing = np.zeros(size, dtype=np.float64)
+ with pytest.raises(TypeError):
+ rg.standard_gamma(1.0, out=existing, dtype=np.float32)
+ with pytest.raises(ValueError):
+ rg.standard_gamma(1.0, out=existing[::3])
+
+ def test_integers_broadcast(self, dtype):
+ if dtype == np.bool:
+ upper = 2
+ lower = 0
+ else:
+ info = np.iinfo(dtype)
+ upper = int(info.max) + 1
+ lower = info.min
+ self._reset_state()
+ a = self.rg.integers(lower, [upper] * 10, dtype=dtype)
+ self._reset_state()
+ b = self.rg.integers([lower] * 10, upper, dtype=dtype)
+ assert_equal(a, b)
+ self._reset_state()
+ c = self.rg.integers(lower, upper, size=10, dtype=dtype)
+ assert_equal(a, c)
+ self._reset_state()
+ d = self.rg.integers(np.array(
+ [lower] * 10), np.array([upper], dtype=np.object), size=10,
+ dtype=dtype)
+ assert_equal(a, d)
+ self._reset_state()
+ e = self.rg.integers(
+ np.array([lower] * 10), np.array([upper] * 10), size=10,
+ dtype=dtype)
+ assert_equal(a, e)
+
+ self._reset_state()
+ a = self.rg.integers(0, upper, size=10, dtype=dtype)
+ self._reset_state()
+ b = self.rg.integers([upper] * 10, dtype=dtype)
+ assert_equal(a, b)
+
+ def test_integers_numpy(self, dtype):
+ high = np.array([1])
+ low = np.array([0])
+
+ out = self.rg.integers(low, high, dtype=dtype)
+ assert out.shape == (1,)
+
+ out = self.rg.integers(low[0], high, dtype=dtype)
+ assert out.shape == (1,)
+
+ out = self.rg.integers(low, high[0], dtype=dtype)
+ assert out.shape == (1,)
+
+ def test_integers_broadcast_errors(self, dtype):
+ if dtype == np.bool:
+ upper = 2
+ lower = 0
+ else:
+ info = np.iinfo(dtype)
+ upper = int(info.max) + 1
+ lower = info.min
+ with pytest.raises(ValueError):
+ self.rg.integers(lower, [upper + 1] * 10, dtype=dtype)
+ with pytest.raises(ValueError):
+ self.rg.integers(lower - 1, [upper] * 10, dtype=dtype)
+ with pytest.raises(ValueError):
+ self.rg.integers([lower - 1], [upper] * 10, dtype=dtype)
+ with pytest.raises(ValueError):
+ self.rg.integers([0], [0], dtype=dtype)
+
+
+class TestMT19937(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = MT19937
+ cls.advance = None
+ cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 32
+ cls._extra_setup()
+ cls.seed_error = ValueError
+
+ def test_numpy_state(self):
+ nprg = np.random.RandomState()
+ nprg.standard_normal(99)
+ state = nprg.get_state()
+ self.rg.bit_generator.state = state
+ state2 = self.rg.bit_generator.state
+ assert_((state[1] == state2['state']['key']).all())
+ assert_((state[2] == state2['state']['pos']))
+
+
+class TestPhilox(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = Philox
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+
+class TestSFC64(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = SFC64
+ cls.advance = None
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 192
+ cls._extra_setup()
+
+
+class TestPCG64(RNG):
+ @classmethod
+ def setup_class(cls):
+ cls.bit_generator = PCG64
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = Generator(cls.bit_generator(*cls.seed))
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+
+class TestDefaultRNG(RNG):
+ @classmethod
+ def setup_class(cls):
+ # This will duplicate some tests that directly instantiate a fresh
+ # Generator(), but that's okay.
+ cls.bit_generator = PCG64
+ cls.advance = 2**63 + 2**31 + 2**15 + 1
+ cls.seed = [12345]
+ cls.rg = np.random.default_rng(*cls.seed)
+ cls.initial_state = cls.rg.bit_generator.state
+ cls.seed_vector_bits = 64
+ cls._extra_setup()
+
+ def test_default_is_pcg64(self):
+ # In order to change the default BitGenerator, we'll go through
+ # a deprecation cycle to move to a different function.
+ assert_(isinstance(self.rg.bit_generator, PCG64))
+
+ def test_seed(self):
+ np.random.default_rng()
+ np.random.default_rng(None)
+ np.random.default_rng(12345)
+ np.random.default_rng(0)
+ np.random.default_rng(43660444402423911716352051725018508569)
+ np.random.default_rng([43660444402423911716352051725018508569,
+ 279705150948142787361475340226491943209])
+ with pytest.raises(ValueError):
+ np.random.default_rng(-1)
+ with pytest.raises(ValueError):
+ np.random.default_rng([12345, -1])
diff --git a/numpy/testing/_private/nosetester.py b/numpy/testing/_private/nosetester.py
index 1728d9d1f..19569a509 100644
--- a/numpy/testing/_private/nosetester.py
+++ b/numpy/testing/_private/nosetester.py
@@ -92,7 +92,7 @@ def run_module_suite(file_to_run=None, argv=None):
Alternatively, calling::
- >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py")
+ >>> run_module_suite(file_to_run="numpy/tests/test_matlib.py") # doctest: +SKIP
from an interpreter will run all the test routine in 'test_matlib.py'.
"""
diff --git a/numpy/testing/_private/parameterized.py b/numpy/testing/_private/parameterized.py
index 53e67517d..489d8e09a 100644
--- a/numpy/testing/_private/parameterized.py
+++ b/numpy/testing/_private/parameterized.py
@@ -45,11 +45,18 @@ except ImportError:
from unittest import TestCase
-PY3 = sys.version_info[0] == 3
PY2 = sys.version_info[0] == 2
-if PY3:
+if PY2:
+ from types import InstanceType
+ lzip = zip
+ text_type = unicode
+ bytes_type = str
+ string_types = basestring,
+ def make_method(func, instance, type):
+ return MethodType(func, instance, type)
+else:
# Python 3 doesn't have an InstanceType, so just use a dummy type.
class InstanceType():
pass
@@ -61,14 +68,6 @@ if PY3:
if instance is None:
return func
return MethodType(func, instance)
-else:
- from types import InstanceType
- lzip = zip
- text_type = unicode
- bytes_type = str
- string_types = basestring,
- def make_method(func, instance, type):
- return MethodType(func, instance, type)
_param = namedtuple("param", "args kwargs")
@@ -190,7 +189,7 @@ def parameterized_argument_value_pairs(func, p):
in zip(named_args, argspec.defaults or [])
])
- seen_arg_names = set([ n for (n, _) in result ])
+ seen_arg_names = {n for (n, _) in result}
keywords = QuietOrderedDict(sorted([
(name, p.kwargs[name])
for name in p.kwargs
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index a3832fcde..8a31fcf15 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -6,6 +6,7 @@ from __future__ import division, absolute_import, print_function
import os
import sys
+import platform
import re
import gc
import operator
@@ -19,8 +20,7 @@ from warnings import WarningMessage
import pprint
from numpy.core import(
- float32, empty, arange, array_repr, ndarray, isnat, array)
-from numpy.lib.utils import deprecate
+ intp, float32, empty, arange, array_repr, ndarray, isnat, array)
if sys.version_info[0] >= 3:
from io import StringIO
@@ -32,13 +32,14 @@ __all__ = [
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
+ 'break_cycles',
]
@@ -50,7 +51,7 @@ class KnownFailureException(Exception):
KnownFailureTest = KnownFailureException # backwards compat
verbose = 0
-IS_PYPY = '__pypy__' in sys.modules
+IS_PYPY = platform.python_implementation() == 'PyPy'
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
@@ -152,22 +153,6 @@ def gisinf(x):
return st
-@deprecate(message="numpy.testing.rand is deprecated in numpy 1.11. "
- "Use numpy.random.rand instead.")
-def rand(*args):
- """Returns an array of random numbers with the given shape.
-
- This only uses the standard library, so it is useful for testing purposes.
- """
- import random
- from numpy.core import zeros, float64
- results = zeros(args, float64)
- f = results.flat
- for i in range(len(f)):
- f[i] = random.random()
- return results
-
-
if os.name == 'nt':
# Code "stolen" from enthought/debug/memusage.py
def GetPerformanceAttributes(object, counter, instance=None,
@@ -299,6 +284,11 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
+ This function handles NaN comparisons as if NaN was a "normal" number.
+ That is, no assertion is raised if both objects have NaNs in the same
+ positions. This is in contrast to the IEEE standard on NaNs, which says
+ that NaN compared to anything must return False.
+
Parameters
----------
actual : array_like
@@ -318,13 +308,19 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
Examples
--------
>>> np.testing.assert_equal([4,5], [4,6])
- ...
- <type 'exceptions.AssertionError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Items are not equal:
item=1
ACTUAL: 5
DESIRED: 6
+ The following comparison does not raise an exception. There are NaNs
+ in the inputs, but they are in the same positions.
+
+ >>> np.testing.assert_equal(np.array([1.0, 2.0, np.nan]), [1, 2, np.nan])
+
"""
__tracebackhide__ = True # Hide traceback for py.test
if isinstance(desired, dict):
@@ -352,7 +348,7 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
# XXX: catch ValueError for subclasses of ndarray where iscomplex fail
try:
usecomplex = iscomplexobj(actual) or iscomplexobj(desired)
- except ValueError:
+ except (ValueError, TypeError):
usecomplex = False
if usecomplex:
@@ -510,21 +506,24 @@ def assert_almost_equal(actual,desired,decimal=7,err_msg='',verbose=True):
>>> import numpy.testing as npt
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334)
>>> npt.assert_almost_equal(2.3333333333333, 2.33333334, decimal=10)
- ...
- <type 'exceptions.AssertionError'>:
- Items are not equal:
- ACTUAL: 2.3333333333333002
- DESIRED: 2.3333333399999998
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 10 decimals
+ ACTUAL: 2.3333333333333
+ DESIRED: 2.33333334
>>> npt.assert_almost_equal(np.array([1.0,2.3333333333333]),
... np.array([1.0,2.33333334]), decimal=9)
- ...
- <type 'exceptions.AssertionError'>:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333333])
- y: array([ 1. , 2.33333334])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 9 decimals
+ Mismatch: 50%
+ Max absolute difference: 6.66669964e-09
+ Max relative difference: 2.85715698e-09
+ x: array([1. , 2.333333333])
+ y: array([1. , 2.33333334])
"""
__tracebackhide__ = True # Hide traceback for py.test
@@ -626,14 +625,15 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
--------
>>> np.testing.assert_approx_equal(0.12345677777777e-20, 0.1234567e-20)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345671e-20,
- significant=8)
+ ... significant=8)
>>> np.testing.assert_approx_equal(0.12345670e-20, 0.12345672e-20,
- significant=8)
- ...
- <type 'exceptions.AssertionError'>:
+ ... significant=8)
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Items are not equal to 8 significant digits:
- ACTUAL: 1.234567e-021
- DESIRED: 1.2345672000000001e-021
+ ACTUAL: 1.234567e-21
+ DESIRED: 1.2345672e-21
the evaluated condition that raises the exception is
@@ -660,10 +660,10 @@ def assert_approx_equal(actual,desired,significant=7,err_msg='',verbose=True):
sc_actual = actual/scale
except ZeroDivisionError:
sc_actual = 0.0
- msg = build_err_msg([actual, desired], err_msg,
- header='Items are not equal to %d significant digits:' %
- significant,
- verbose=verbose)
+ msg = build_err_msg(
+ [actual, desired], err_msg,
+ header='Items are not equal to %d significant digits:' % significant,
+ verbose=verbose)
try:
# If one of desired/actual is not finite, handle it specially here:
# check that both are nan if any is a nan, and test for equality
@@ -686,12 +686,14 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
header='', precision=6, equal_nan=True,
equal_inf=True):
__tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import array, isnan, inf, bool_
- from numpy.core.fromnumeric import all as npall
+ from numpy.core import array, array2string, isnan, inf, bool_, errstate, all, max, object_
x = array(x, copy=False, subok=True)
y = array(y, copy=False, subok=True)
+ # original array for output formatting
+ ox, oy = x, y
+
def isnumber(x):
return x.dtype.char in '?bhilqpBHILQPefdgFDG'
@@ -705,15 +707,20 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
at the same locations.
"""
- # Both the != True comparison here and the cast to bool_ at the end are
- # done to deal with `masked`, which cannot be compared usefully, and
- # for which np.all yields masked. The use of the function np.all is
- # for back compatibility with ndarray subclasses that changed the
- # return values of the all method. We are not committed to supporting
- # such subclasses, but some used to work.
x_id = func(x)
y_id = func(y)
- if npall(x_id == y_id) != True:
+ # We include work-arounds here to handle three types of slightly
+ # pathological ndarray subclasses:
+ # (1) all() on `masked` array scalars can return masked arrays, so we
+ # use != True
+ # (2) __eq__ on some ndarray subclasses returns Python booleans
+ # instead of element-wise comparisons, so we cast to bool_() and
+ # use isinstance(..., bool) checks
+ # (3) subclasses with bare-bones __array_function__ implementations may
+ # not implement np.all(), so favor using the .all() method
+ # We are not committed to supporting such subclasses, but it's nice to
+ # support them if possible.
+ if bool_(x_id == y_id).all() != True:
msg = build_err_msg([x, y],
err_msg + '\nx and y %s location mismatch:'
% (hasval), verbose=verbose, header=header,
@@ -721,9 +728,9 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
raise AssertionError(msg)
# If there is a scalar, then here we know the array has the same
# flag as it everywhere, so we should return the scalar flag.
- if x_id.ndim == 0:
+ if isinstance(x_id, bool) or x_id.ndim == 0:
return bool_(x_id)
- elif y_id.ndim == 0:
+ elif isinstance(x_id, bool) or y_id.ndim == 0:
return bool_(y_id)
else:
return y_id
@@ -770,20 +777,52 @@ def assert_array_compare(comparison, x, y, err_msg='', verbose=True,
if isinstance(val, bool):
cond = val
- reduced = [0]
+ reduced = array([val])
else:
reduced = val.ravel()
cond = reduced.all()
- reduced = reduced.tolist()
+
# The below comparison is a hack to ensure that fully masked
# results, for which val.ravel().all() returns np.ma.masked,
# do not trigger a failure (np.ma.masked != True evaluates as
# np.ma.masked, which is falsy).
if cond != True:
- match = 100-100.0*reduced.count(1)/len(reduced)
- msg = build_err_msg([x, y],
- err_msg
- + '\n(mismatch %s%%)' % (match,),
+ n_mismatch = reduced.size - reduced.sum(dtype=intp)
+ n_elements = flagged.size if flagged.ndim != 0 else reduced.size
+ percent_mismatch = 100 * n_mismatch / n_elements
+ remarks = [
+ 'Mismatched elements: {} / {} ({:.3g}%)'.format(
+ n_mismatch, n_elements, percent_mismatch)]
+
+ with errstate(invalid='ignore', divide='ignore'):
+ # ignore errors for non-numeric types
+ with contextlib.suppress(TypeError):
+ error = abs(x - y)
+ max_abs_error = max(error)
+ if getattr(error, 'dtype', object_) == object_:
+ remarks.append('Max absolute difference: '
+ + str(max_abs_error))
+ else:
+ remarks.append('Max absolute difference: '
+ + array2string(max_abs_error))
+
+ # note: this definition of relative error matches that one
+ # used by assert_allclose (found in np.isclose)
+ # Filter values where the divisor would be zero
+ nonzero = bool_(y != 0)
+ if all(~nonzero):
+ max_rel_error = array(inf)
+ else:
+ max_rel_error = max(error[nonzero] / abs(y[nonzero]))
+ if getattr(error, 'dtype', object_) == object_:
+ remarks.append('Max relative difference: '
+ + str(max_rel_error))
+ else:
+ remarks.append('Max relative difference: '
+ + array2string(max_rel_error))
+
+ err_msg += '\n' + '\n'.join(remarks)
+ msg = build_err_msg([ox, oy], err_msg,
verbose=verbose, header=header,
names=('x', 'y'), precision=precision)
raise AssertionError(msg)
@@ -843,14 +882,15 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
AssertionError:
Arrays are not equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 3.14159265, NaN])
- y: array([ 1. , 3.14159265, NaN])
+ Mismatch: 33.3%
+ Max absolute difference: 4.4408921e-16
+ Max relative difference: 1.41357986e-16
+ x: array([1. , 3.141593, nan])
+ y: array([1. , 3.141593, nan])
Use `assert_allclose` or one of the nulp (number of floating point values)
functions for these cases instead:
@@ -915,30 +955,33 @@ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
the first assert does not raise an exception
>>> np.testing.assert_array_almost_equal([1.0,2.333,np.nan],
- [1.0,2.333,np.nan])
+ ... [1.0,2.333,np.nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33339,np.nan], decimal=5)
- ...
- <type 'exceptions.AssertionError'>:
+ Traceback (most recent call last):
+ ...
AssertionError:
- Arrays are not almost equal
- <BLANKLINE>
- (mismatch 50.0%)
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33339, NaN])
+ Arrays are not almost equal to 5 decimals
+ Mismatch: 33.3%
+ Max absolute difference: 6.e-05
+ Max relative difference: 2.57136612e-05
+ x: array([1. , 2.33333, nan])
+ y: array([1. , 2.33339, nan])
>>> np.testing.assert_array_almost_equal([1.0,2.33333,np.nan],
... [1.0,2.33333, 5], decimal=5)
- <type 'exceptions.ValueError'>:
- ValueError:
- Arrays are not almost equal
- x: array([ 1. , 2.33333, NaN])
- y: array([ 1. , 2.33333, 5. ])
+ Traceback (most recent call last):
+ ...
+ AssertionError:
+ Arrays are not almost equal to 5 decimals
+ x and y nan location mismatch:
+ x: array([1. , 2.33333, nan])
+ y: array([1. , 2.33333, 5. ])
"""
__tracebackhide__ = True # Hide traceback for py.test
- from numpy.core import around, number, float_, result_type, array
+ from numpy.core import number, float_, result_type, array
from numpy.core.numerictypes import issubdtype
from numpy.core.fromnumeric import any as npany
@@ -1015,27 +1058,34 @@ def assert_array_less(x, y, err_msg='', verbose=True):
--------
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1.1, 2.0, np.nan])
>>> np.testing.assert_array_less([1.0, 1.0, np.nan], [1, 2.0, np.nan])
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 1., NaN])
- y: array([ 1., 2., NaN])
+ Mismatch: 33.3%
+ Max absolute difference: 1.
+ Max relative difference: 0.5
+ x: array([ 1., 1., nan])
+ y: array([ 1., 2., nan])
>>> np.testing.assert_array_less([1.0, 4.0], 3)
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Arrays are not less-ordered
- (mismatch 50.0%)
- x: array([ 1., 4.])
+ Mismatch: 50%
+ Max absolute difference: 2.
+ Max relative difference: 0.66666667
+ x: array([1., 4.])
y: array(3)
>>> np.testing.assert_array_less([1.0, 2.0, 3.0], [4])
- ...
- <type 'exceptions.ValueError'>:
+ Traceback (most recent call last):
+ ...
+ AssertionError:
Arrays are not less-ordered
(shapes (3,), (1,) mismatch)
- x: array([ 1., 2., 3.])
+ x: array([1., 2., 3.])
y: array([4])
"""
@@ -1087,7 +1137,7 @@ def assert_string_equal(actual, desired):
if desired == actual:
return
- diff = list(difflib.Differ().compare(actual.splitlines(1), desired.splitlines(1)))
+ diff = list(difflib.Differ().compare(actual.splitlines(True), desired.splitlines(True)))
diff_list = []
while diff:
d1 = diff.pop(0)
@@ -1140,7 +1190,7 @@ def rundocs(filename=None, raise_on_error=True):
argument to the ``test()`` call. For example, to run all tests (including
doctests) for `numpy.lib`:
- >>> np.lib.test(doctests=True) #doctest: +SKIP
+ >>> np.lib.test(doctests=True) # doctest: +SKIP
"""
from numpy.compat import npy_load_module
import doctest
@@ -1322,7 +1372,7 @@ def decorate_methods(cls, decorator, testmatch=None):
return
-def measure(code_str,times=1,label=None):
+def measure(code_str, times=1, label=None):
"""
Return elapsed time for executing code in the namespace of the caller.
@@ -1349,9 +1399,9 @@ def measure(code_str,times=1,label=None):
Examples
--------
- >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)',
- ... times=times)
- >>> print("Time for a single execution : ", etime / times, "s")
+ >>> times = 10
+ >>> etime = np.testing.measure('for i in range(1000): np.sqrt(i**2)', times=times)
+ >>> print("Time for a single execution : ", etime / times, "s") # doctest: +SKIP
Time for a single execution : 0.005 s
"""
@@ -1400,9 +1450,9 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
Raises an AssertionError if two objects are not equal up to desired
tolerance.
- The test is equivalent to ``allclose(actual, desired, rtol, atol)``.
- It compares the difference between `actual` and `desired` to
- ``atol + rtol * abs(desired)``.
+ The test is equivalent to ``allclose(actual, desired, rtol, atol)`` (note
+ that ``allclose`` has different default values). It compares the difference
+ between `actual` and `desired` to ``atol + rtol * abs(desired)``.
.. versionadded:: 1.5.0
@@ -1436,7 +1486,7 @@ def assert_allclose(actual, desired, rtol=1e-7, atol=0, equal_nan=True,
--------
>>> x = [1e-5, 1e-3, 1e-1]
>>> y = np.arccos(np.cos(x))
- >>> assert_allclose(x, y, rtol=1e-5, atol=0)
+ >>> np.testing.assert_allclose(x, y, rtol=1e-5, atol=0)
"""
__tracebackhide__ = True # Hide traceback for py.test
@@ -1890,7 +1940,8 @@ class clear_and_catch_warnings(warnings.catch_warnings):
Examples
--------
>>> import warnings
- >>> with clear_and_catch_warnings(modules=[np.core.fromnumeric]):
+ >>> with np.testing.clear_and_catch_warnings(
+ ... modules=[np.core.fromnumeric]):
... warnings.simplefilter('always')
... warnings.filterwarnings('ignore', module='np.core.fromnumeric')
... # do something that raises a warning but ignore those in
@@ -1971,25 +2022,28 @@ class suppress_warnings(object):
Examples
--------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
-
- Or as a decorator:
-
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
+
+ With a context manager::
+
+ with np.testing.suppress_warnings() as sup:
+ sup.filter(DeprecationWarning, "Some text")
+ sup.filter(module=np.ma.core)
+ log = sup.record(FutureWarning, "Does this occur?")
+ command_giving_warnings()
+ # The FutureWarning was given once, the filtered warnings were
+ # ignored. All other warnings abide outside settings (may be
+ # printed/error)
+ assert_(len(log) == 1)
+ assert_(len(sup.log) == 1) # also stored in log attribute
+
+ Or as a decorator::
+
+ sup = np.testing.suppress_warnings()
+ sup.filter(module=np.ma.core) # module must match exactly
+ @sup
+ def some_function():
+ # do something which causes a warning in np.ma.core
+ pass
"""
def __init__(self, forwarding_rule="always"):
self._entered = False
@@ -2202,6 +2256,7 @@ def _assert_no_gc_cycles_context(name=None):
# not meaningful to test if there is no refcounting
if not HAS_REFCOUNT:
+ yield
return
assert_(gc.isenabled())
@@ -2280,3 +2335,19 @@ def assert_no_gc_cycles(*args, **kwargs):
args = args[1:]
with _assert_no_gc_cycles_context(name=func.__name__):
func(*args, **kwargs)
+
+def break_cycles():
+ """
+ Break reference cycles by calling gc.collect
+ Objects can call other objects' methods (for instance, another object's
+ __del__) inside their own __del__. On PyPy, the interpreter only runs
+ between calls to gc.collect, so multiple calls are needed to completely
+ release all cycles.
+ """
+
+ gc.collect()
+ if IS_PYPY:
+ # interpreter runs now, to call deleted objects' __del__ methods
+ gc.collect()
+ # one more, just to make sure
+ gc.collect()
diff --git a/numpy/testing/decorators.py b/numpy/testing/decorators.py
deleted file mode 100644
index bf78be500..000000000
--- a/numpy/testing/decorators.py
+++ /dev/null
@@ -1,15 +0,0 @@
-"""
-Back compatibility decorators module. It will import the appropriate
-set of tools
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.decorators is deprecated "
- "since numpy 1.15.0, import from numpy.testing instead.",
- DeprecationWarning, stacklevel=2)
-
-from ._private.decorators import *
diff --git a/numpy/testing/noseclasses.py b/numpy/testing/noseclasses.py
deleted file mode 100644
index 5748a9a0f..000000000
--- a/numpy/testing/noseclasses.py
+++ /dev/null
@@ -1,14 +0,0 @@
-"""
-Back compatibility noseclasses module. It will import the appropriate
-set of tools
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.noseclasses is deprecated "
- "since 1.15.0, import from numpy.testing instead",
- DeprecationWarning, stacklevel=2)
-
-from ._private.noseclasses import *
diff --git a/numpy/testing/nosetester.py b/numpy/testing/nosetester.py
deleted file mode 100644
index 2ac212eee..000000000
--- a/numpy/testing/nosetester.py
+++ /dev/null
@@ -1,19 +0,0 @@
-"""
-Back compatibility nosetester module. It will import the appropriate
-set of tools
-
-"""
-from __future__ import division, absolute_import, print_function
-
-import warnings
-
-# 2018-04-04, numpy 1.15.0
-warnings.warn("Importing from numpy.testing.nosetester is deprecated "
- "since 1.15.0, import from numpy.testing instead.",
- DeprecationWarning, stacklevel=2)
-
-from ._private.nosetester import *
-
-__all__ = ['get_package_name', 'run_module_suite', 'NoseTester',
- '_numpy_tester', 'get_package_name', 'import_nose',
- 'suppress_warnings']
diff --git a/numpy/testing/print_coercion_tables.py b/numpy/testing/print_coercion_tables.py
index 3a359f472..72b22cee1 100755
--- a/numpy/testing/print_coercion_tables.py
+++ b/numpy/testing/print_coercion_tables.py
@@ -70,22 +70,24 @@ def print_coercion_table(ntypes, inputfirstvalue, inputsecondvalue, firstarray,
print(char, end=' ')
print()
-print("can cast")
-print_cancast_table(np.typecodes['All'])
-print()
-print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
-print()
-print("scalar + scalar")
-print_coercion_table(np.typecodes['All'], 0, 0, False)
-print()
-print("scalar + neg scalar")
-print_coercion_table(np.typecodes['All'], 0, -1, False)
-print()
-print("array + scalar")
-print_coercion_table(np.typecodes['All'], 0, 0, True)
-print()
-print("array + neg scalar")
-print_coercion_table(np.typecodes['All'], 0, -1, True)
-print()
-print("promote_types")
-print_coercion_table(np.typecodes['All'], 0, 0, False, True)
+
+if __name__ == '__main__':
+ print("can cast")
+ print_cancast_table(np.typecodes['All'])
+ print()
+ print("In these tables, ValueError is '!', OverflowError is '@', TypeError is '#'")
+ print()
+ print("scalar + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, False)
+ print()
+ print("scalar + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, False)
+ print()
+ print("array + scalar")
+ print_coercion_table(np.typecodes['All'], 0, 0, True)
+ print()
+ print("array + neg scalar")
+ print_coercion_table(np.typecodes['All'], 0, -1, True)
+ print()
+ print("promote_types")
+ print_coercion_table(np.typecodes['All'], 0, 0, False, True)
diff --git a/numpy/testing/tests/test_decorators.py b/numpy/testing/tests/test_decorators.py
index b8283d9de..c029bf90c 100644
--- a/numpy/testing/tests/test_decorators.py
+++ b/numpy/testing/tests/test_decorators.py
@@ -13,7 +13,9 @@ from numpy.testing import (
try:
- import nose
+ with warnings.catch_warnings():
+ warnings.simplefilter("always")
+ import nose # noqa: F401
except ImportError:
HAVE_NOSE = False
else:
diff --git a/numpy/testing/tests/test_utils.py b/numpy/testing/tests/test_utils.py
index e0d3414f7..44f93a693 100644
--- a/numpy/testing/tests/test_utils.py
+++ b/numpy/testing/tests/test_utils.py
@@ -17,6 +17,7 @@ from numpy.testing import (
clear_and_catch_warnings, suppress_warnings, assert_string_equal, assert_,
tempdir, temppath, assert_no_gc_cycles, HAS_REFCOUNT
)
+from numpy.core.overrides import ARRAY_FUNCTION_ENABLED
class _GenericTest(object):
@@ -158,6 +159,42 @@ class TestArrayEqual(_GenericTest):
self._test_equal(a, b)
self._test_equal(b, a)
+ def test_subclass_that_overrides_eq(self):
+ # While we cannot guarantee testing functions will always work for
+ # subclasses, the tests should ideally rely only on subclasses having
+ # comparison operators, not on them being able to store booleans
+ # (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
+ class MyArray(np.ndarray):
+ def __eq__(self, other):
+ return bool(np.equal(self, other).all())
+
+ def __ne__(self, other):
+ return not self == other
+
+ a = np.array([1., 2.]).view(MyArray)
+ b = np.array([2., 3.]).view(MyArray)
+ assert_(type(a == a), bool)
+ assert_(a == a)
+ assert_(a != b)
+ self._test_equal(a, a)
+ self._test_not_equal(a, b)
+ self._test_not_equal(b, a)
+
+ @pytest.mark.skipif(
+ not ARRAY_FUNCTION_ENABLED, reason='requires __array_function__')
+ def test_subclass_that_does_not_implement_npall(self):
+ class MyArray(np.ndarray):
+ def __array_function__(self, *args, **kwargs):
+ return NotImplemented
+
+ a = np.array([1., 2.]).view(MyArray)
+ b = np.array([2., 3.]).view(MyArray)
+ with assert_raises(TypeError):
+ np.all(a)
+ self._test_equal(a, a)
+ self._test_not_equal(a, b)
+ self._test_not_equal(b, a)
+
class TestBuildErrorMessage(object):
@@ -292,24 +329,29 @@ class TestEqual(TestArrayEqual):
self._test_not_equal(x, y)
def test_error_message(self):
- try:
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(np.array([1, 2]), np.array([[1, 2]]))
- except AssertionError as e:
- msg = str(e)
- msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
- msg_reference = textwrap.dedent("""\
+ msg = str(exc_info.value)
+ msg2 = msg.replace("shapes (2L,), (1L, 2L)", "shapes (2,), (1, 2)")
+ msg_reference = textwrap.dedent("""\
- Arrays are not equal
+ Arrays are not equal
- (shapes (2,), (1, 2) mismatch)
- x: array([1, 2])
- y: array([[1, 2]])""")
- try:
- assert_equal(msg, msg_reference)
- except AssertionError:
- assert_equal(msg2, msg_reference)
- else:
- raise AssertionError("Did not raise")
+ (shapes (2,), (1, 2) mismatch)
+ x: array([1, 2])
+ y: array([[1, 2]])""")
+
+ try:
+ assert_equal(msg, msg_reference)
+ except AssertionError:
+ assert_equal(msg2, msg_reference)
+
+ def test_object(self):
+ #gh-12942
+ import datetime
+ a = np.array([datetime.datetime(2000, 1, 1),
+ datetime.datetime(2000, 1, 2)])
+ self._test_not_equal(a, a[::-1])
class TestArrayAlmostEqual(_GenericTest):
@@ -469,29 +511,78 @@ class TestAlmostEqual(_GenericTest):
self._test_not_equal(x, z)
def test_error_message(self):
- """Check the message is formatted correctly for the decimal value"""
+ """Check the message is formatted correctly for the decimal value.
+ Also check the message when input includes inf or nan (gh12200)"""
x = np.array([1.00000000001, 2.00000000002, 3.00003])
y = np.array([1.00000000002, 2.00000000003, 3.00004])
- # test with a different amount of decimal digits
- # note that we only check for the formatting of the arrays themselves
- b = ('x: array([1.00000000001, 2.00000000002, 3.00003 '
- ' ])\n y: array([1.00000000002, 2.00000000003, 3.00004 ])')
- try:
+ # Test with a different amount of decimal digits
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y, decimal=12)
- except AssertionError as e:
- # remove anything that's not the array string
- assert_equal(str(e).split('%)\n ')[1], b)
-
- # with the default value of decimal digits, only the 3rd element differs
- # note that we only check for the formatting of the arrays themselves
- b = ('x: array([1. , 2. , 3.00003])\n y: array([1. , '
- '2. , 3.00004])')
- try:
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 3 / 3 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+ assert_equal(
+ msgs[6],
+ ' x: array([1.00000000001, 2.00000000002, 3.00003 ])')
+ assert_equal(
+ msgs[7],
+ ' y: array([1.00000000002, 2.00000000003, 3.00004 ])')
+
+ # With the default value of decimal digits, only the 3rd element
+ # differs. Note that we only check for the formatting of the arrays
+ # themselves.
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 3 (33.3%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.e-05')
+ assert_equal(msgs[5], 'Max relative difference: 3.33328889e-06')
+ assert_equal(msgs[6], ' x: array([1. , 2. , 3.00003])')
+ assert_equal(msgs[7], ' y: array([1. , 2. , 3.00004])')
+
+ # Check the error message when input includes inf
+ x = np.array([np.inf, 0])
+ y = np.array([np.inf, 1])
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 1 / 2 (50%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 1.')
+ assert_equal(msgs[6], ' x: array([inf, 0.])')
+ assert_equal(msgs[7], ' y: array([inf, 1.])')
+
+ # Check the error message when dividing by zero
+ x = np.array([1, 2])
+ y = np.array([0, 0])
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 2 / 2 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 2')
+ assert_equal(msgs[5], 'Max relative difference: inf')
+
+ def test_error_message_2(self):
+ """Check the message is formatted correctly when either x or y is a scalar."""
+ x = 2
+ y = np.ones(20)
+ with pytest.raises(AssertionError) as exc_info:
+ self._assert_func(x, y)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 1.')
+
+ y = 2
+ x = np.ones(20)
+ with pytest.raises(AssertionError) as exc_info:
self._assert_func(x, y)
- except AssertionError as e:
- # remove anything that's not the array string
- assert_equal(str(e).split('%)\n ')[1], b)
+ msgs = str(exc_info.value).split('\n')
+ assert_equal(msgs[3], 'Mismatched elements: 20 / 20 (100%)')
+ assert_equal(msgs[4], 'Max absolute difference: 1.')
+ assert_equal(msgs[5], 'Max relative difference: 0.5')
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
@@ -780,12 +871,13 @@ class TestAssertAllclose(object):
def test_report_fail_percentage(self):
a = np.array([1, 1, 1, 1])
b = np.array([1, 1, 1, 2])
- try:
+
+ with pytest.raises(AssertionError) as exc_info:
assert_allclose(a, b)
- msg = ''
- except AssertionError as exc:
- msg = exc.args[0]
- assert_("mismatch 25.0%" in msg)
+ msg = str(exc_info.value)
+ assert_('Mismatched elements: 1 / 4 (25%)\n'
+ 'Max absolute difference: 1\n'
+ 'Max relative difference: 0.5' in msg)
def test_equal_nan(self):
a = np.array([np.nan])
@@ -809,6 +901,15 @@ class TestAssertAllclose(object):
assert_array_less(a, b)
assert_allclose(a, b)
+ def test_report_max_relative_error(self):
+ a = np.array([0, 1])
+ b = np.array([0, 2])
+
+ with pytest.raises(AssertionError) as exc_info:
+ assert_allclose(a, b)
+ msg = str(exc_info.value)
+ assert_('Max relative difference: 0.5' in msg)
+
class TestArrayAlmostEqualNulp(object):
@@ -1068,16 +1169,14 @@ class TestStringEqual(object):
assert_string_equal("hello", "hello")
assert_string_equal("hello\nmultiline", "hello\nmultiline")
- try:
+ with pytest.raises(AssertionError) as exc_info:
assert_string_equal("foo\nbar", "hello\nbar")
- except AssertionError as exc:
- assert_equal(str(exc), "Differences in strings:\n- foo\n+ hello")
- else:
- raise AssertionError("exception not raised")
+ msg = str(exc_info.value)
+ assert_equal(msg, "Differences in strings:\n- foo\n+ hello")
assert_raises(AssertionError,
lambda: assert_string_equal("foo", "hello"))
-
+
def test_regex(self):
assert_string_equal("a+*b", "a+*b")
@@ -1383,7 +1482,7 @@ def test_tempdir():
def test_temppath():
with temppath() as fpath:
- with open(fpath, 'w') as f:
+ with open(fpath, 'w'):
pass
assert_(not os.path.isfile(fpath))
@@ -1439,6 +1538,7 @@ class TestAssertNoGcCycles(object):
with assert_raises(AssertionError):
assert_no_gc_cycles(make_cycle)
+ @pytest.mark.slow
def test_fails(self):
"""
Test that in cases where the garbage cannot be collected, we raise an
diff --git a/numpy/testing/utils.py b/numpy/testing/utils.py
index 98f19e348..975f6ad5d 100644
--- a/numpy/testing/utils.py
+++ b/numpy/testing/utils.py
@@ -7,10 +7,11 @@ from __future__ import division, absolute_import, print_function
import warnings
-# 2018-04-04, numpy 1.15.0
+# 2018-04-04, numpy 1.15.0 ImportWarning
+# 2019-09-18, numpy 1.18.0 DeprecatonWarning (changed)
warnings.warn("Importing from numpy.testing.utils is deprecated "
"since 1.15.0, import from numpy.testing instead.",
- ImportWarning, stacklevel=2)
+ DeprecationWarning, stacklevel=2)
from ._private.utils import *
@@ -19,7 +20,7 @@ __all__ = [
'assert_array_equal', 'assert_array_less', 'assert_string_equal',
'assert_array_almost_equal', 'assert_raises', 'build_err_msg',
'decorate_methods', 'jiffies', 'memusage', 'print_assert_equal',
- 'raises', 'rand', 'rundocs', 'runstring', 'verbose', 'measure',
+ 'raises', 'rundocs', 'runstring', 'verbose', 'measure',
'assert_', 'assert_array_almost_equal_nulp', 'assert_raises_regex',
'assert_array_max_ulp', 'assert_warns', 'assert_no_warnings',
'assert_allclose', 'IgnoreException', 'clear_and_catch_warnings',
diff --git a/numpy/tests/test_ctypeslib.py b/numpy/tests/test_ctypeslib.py
index 675f8d242..521208c36 100644
--- a/numpy/tests/test_ctypeslib.py
+++ b/numpy/tests/test_ctypeslib.py
@@ -2,6 +2,7 @@ from __future__ import division, absolute_import, print_function
import sys
import pytest
+import weakref
import numpy as np
from numpy.ctypeslib import ndpointer, load_library, as_array
@@ -9,20 +10,30 @@ from numpy.distutils.misc_util import get_shared_lib_extension
from numpy.testing import assert_, assert_array_equal, assert_raises, assert_equal
try:
+ import ctypes
+except ImportError:
+ ctypes = None
+else:
cdll = None
+ test_cdll = None
if hasattr(sys, 'gettotalrefcount'):
try:
cdll = load_library('_multiarray_umath_d', np.core._multiarray_umath.__file__)
except OSError:
pass
+ try:
+ test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
+ except OSError:
+ pass
if cdll is None:
cdll = load_library('_multiarray_umath', np.core._multiarray_umath.__file__)
- _HAS_CTYPE = True
-except ImportError:
- _HAS_CTYPE = False
+ if test_cdll is None:
+ test_cdll = load_library('_multiarray_tests', np.core._multiarray_tests.__file__)
+ c_forward_pointer = test_cdll.forward_pointer
-@pytest.mark.skipif(not _HAS_CTYPE,
+
+@pytest.mark.skipif(ctypes is None,
reason="ctypes not available in this python")
@pytest.mark.skipif(sys.platform == 'cygwin',
reason="Known to fail on cygwin")
@@ -108,12 +119,72 @@ class TestNdpointer(object):
assert_raises(TypeError, p.from_param, np.array([[1, 2], [3, 4]]))
def test_cache(self):
- a1 = ndpointer(dtype=np.float64)
- a2 = ndpointer(dtype=np.float64)
- assert_(a1 == a2)
+ assert_(ndpointer(dtype=np.float64) is ndpointer(dtype=np.float64))
+
+ # shapes are normalized
+ assert_(ndpointer(shape=2) is ndpointer(shape=(2,)))
+
+ # 1.12 <= v < 1.16 had a bug that made these fail
+ assert_(ndpointer(shape=2) is not ndpointer(ndim=2))
+ assert_(ndpointer(ndim=2) is not ndpointer(shape=2))
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestNdpointerCFunc(object):
+ def test_arguments(self):
+ """ Test that arguments are coerced from arrays """
+ c_forward_pointer.restype = ctypes.c_void_p
+ c_forward_pointer.argtypes = (ndpointer(ndim=2),)
+
+ c_forward_pointer(np.zeros((2, 3)))
+ # too many dimensions
+ assert_raises(
+ ctypes.ArgumentError, c_forward_pointer, np.zeros((2, 3, 4)))
+
+ @pytest.mark.parametrize(
+ 'dt', [
+ float,
+ np.dtype(dict(
+ formats=['<i4', '<i4'],
+ names=['a', 'b'],
+ offsets=[0, 2],
+ itemsize=6
+ ))
+ ], ids=[
+ 'float',
+ 'overlapping-fields'
+ ]
+ )
+ def test_return(self, dt):
+ """ Test that return values are coerced to arrays """
+ arr = np.zeros((2, 3), dt)
+ ptr_type = ndpointer(shape=arr.shape, dtype=arr.dtype)
+
+ c_forward_pointer.restype = ptr_type
+ c_forward_pointer.argtypes = (ptr_type,)
+
+ # check that the arrays are equivalent views on the same data
+ arr2 = c_forward_pointer(arr)
+ assert_equal(arr2.dtype, arr.dtype)
+ assert_equal(arr2.shape, arr.shape)
+ assert_equal(
+ arr2.__array_interface__['data'],
+ arr.__array_interface__['data']
+ )
+
+ def test_vague_return_value(self):
+ """ Test that vague ndpointer return values do not promote to arrays """
+ arr = np.zeros((2, 3))
+ ptr_type = ndpointer(dtype=arr.dtype)
+
+ c_forward_pointer.restype = ptr_type
+ c_forward_pointer.argtypes = (ptr_type,)
+
+ ret = c_forward_pointer(arr)
+ assert_(isinstance(ret, ptr_type))
-@pytest.mark.skipif(not _HAS_CTYPE,
+@pytest.mark.skipif(ctypes is None,
reason="ctypes not available on this python installation")
class TestAsArray(object):
def test_array(self):
@@ -190,3 +261,107 @@ class TestAsArray(object):
b = np.ctypeslib.as_array(newpnt, (N,))
# now delete both, which should cleanup both objects
del newpnt, b
+
+ def test_segmentation_fault(self):
+ arr = np.zeros((224, 224, 3))
+ c_arr = np.ctypeslib.as_ctypes(arr)
+ arr_ref = weakref.ref(arr)
+ del arr
+
+ # check the reference wasn't cleaned up
+ assert_(arr_ref() is not None)
+
+ # check we avoid the segfault
+ c_arr[0][0][0]
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available on this python installation")
+class TestAsCtypesType(object):
+ """ Test conversion from dtypes to ctypes types """
+ def test_scalar(self):
+ dt = np.dtype('<u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16.__ctype_le__)
+
+ dt = np.dtype('>u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16.__ctype_be__)
+
+ dt = np.dtype('u2')
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, ctypes.c_uint16)
+
+ def test_subarray(self):
+ dt = np.dtype((np.int32, (2, 3)))
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_equal(ct, 2 * (3 * ctypes.c_int32))
+
+ def test_structure(self):
+ dt = np.dtype([
+ ('a', np.uint16),
+ ('b', np.uint32),
+ ])
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Structure))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_structure_aligned(self):
+ dt = np.dtype([
+ ('a', np.uint16),
+ ('b', np.uint32),
+ ], align=True)
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Structure))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('', ctypes.c_char * 2), # padding
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_union(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.uint16, np.uint32]
+ ))
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Union))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ])
+
+ def test_padded_union(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 0],
+ formats=[np.uint16, np.uint32],
+ itemsize=5,
+ ))
+
+ ct = np.ctypeslib.as_ctypes_type(dt)
+ assert_(issubclass(ct, ctypes.Union))
+ assert_equal(ctypes.sizeof(ct), dt.itemsize)
+ assert_equal(ct._fields_, [
+ ('a', ctypes.c_uint16),
+ ('b', ctypes.c_uint32),
+ ('', ctypes.c_char * 5), # padding
+ ])
+
+ def test_overlapping(self):
+ dt = np.dtype(dict(
+ names=['a', 'b'],
+ offsets=[0, 2],
+ formats=[np.uint32, np.uint32]
+ ))
+ assert_raises(NotImplementedError, np.ctypeslib.as_ctypes_type, dt)
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
new file mode 100644
index 000000000..e3621c0fd
--- /dev/null
+++ b/numpy/tests/test_public_api.py
@@ -0,0 +1,498 @@
+from __future__ import division, absolute_import, print_function
+
+import sys
+import subprocess
+import pkgutil
+import types
+import importlib
+import warnings
+
+import numpy as np
+import numpy
+import pytest
+
+try:
+ import ctypes
+except ImportError:
+ ctypes = None
+
+
+def check_dir(module, module_name=None):
+ """Returns a mapping of all objects with the wrong __module__ attribute."""
+ if module_name is None:
+ module_name = module.__name__
+ results = {}
+ for name in dir(module):
+ item = getattr(module, name)
+ if (hasattr(item, '__module__') and hasattr(item, '__name__')
+ and item.__module__ != module_name):
+ results[name] = item.__module__ + '.' + item.__name__
+ return results
+
+
+@pytest.mark.skipif(
+ sys.version_info[0] < 3,
+ reason="NumPy exposes slightly different functions on Python 2")
+def test_numpy_namespace():
+ # None of these objects are publicly documented to be part of the main
+ # NumPy namespace (some are useful though, others need to be cleaned up)
+ undocumented = {
+ 'Tester': 'numpy.testing._private.nosetester.NoseTester',
+ '_add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
+ 'add_docstring': 'numpy.core._multiarray_umath.add_docstring',
+ 'add_newdoc': 'numpy.core.function_base.add_newdoc',
+ 'add_newdoc_ufunc': 'numpy.core._multiarray_umath._add_newdoc_ufunc',
+ 'byte_bounds': 'numpy.lib.utils.byte_bounds',
+ 'compare_chararrays': 'numpy.core._multiarray_umath.compare_chararrays',
+ 'deprecate': 'numpy.lib.utils.deprecate',
+ 'deprecate_with_doc': 'numpy.lib.utils.<lambda>',
+ 'disp': 'numpy.lib.function_base.disp',
+ 'fastCopyAndTranspose': 'numpy.core._multiarray_umath._fastCopyAndTranspose',
+ 'get_array_wrap': 'numpy.lib.shape_base.get_array_wrap',
+ 'get_include': 'numpy.lib.utils.get_include',
+ 'int_asbuffer': 'numpy.core._multiarray_umath.int_asbuffer',
+ 'mafromtxt': 'numpy.lib.npyio.mafromtxt',
+ 'ndfromtxt': 'numpy.lib.npyio.ndfromtxt',
+ 'recfromcsv': 'numpy.lib.npyio.recfromcsv',
+ 'recfromtxt': 'numpy.lib.npyio.recfromtxt',
+ 'safe_eval': 'numpy.lib.utils.safe_eval',
+ 'set_string_function': 'numpy.core.arrayprint.set_string_function',
+ 'show_config': 'numpy.__config__.show',
+ 'who': 'numpy.lib.utils.who',
+ }
+ # These built-in types are re-exported by numpy.
+ builtins = {
+ 'bool': 'builtins.bool',
+ 'complex': 'builtins.complex',
+ 'float': 'builtins.float',
+ 'int': 'builtins.int',
+ 'long': 'builtins.int',
+ 'object': 'builtins.object',
+ 'str': 'builtins.str',
+ 'unicode': 'builtins.str',
+ }
+ whitelist = dict(undocumented, **builtins)
+ bad_results = check_dir(np)
+ # pytest gives better error messages with the builtin assert than with
+ # assert_equal
+ assert bad_results == whitelist
+
+
+@pytest.mark.parametrize('name', ['testing', 'Tester'])
+def test_import_lazy_import(name):
+ """Make sure we can actually use the modules we lazy load.
+
+ While not exported as part of the public API, it was accessible. With the
+ use of __getattr__ and __dir__, this isn't always true It can happen that
+ an infinite recursion may happen.
+
+ This is the only way I found that would force the failure to appear on the
+ badly implemented code.
+
+ We also test for the presence of the lazily imported modules in dir
+
+ """
+ exe = (sys.executable, '-c', "import numpy; numpy." + name)
+ result = subprocess.check_output(exe)
+ assert not result
+
+ # Make sure they are still in the __dir__
+ assert name in dir(np)
+
+
+def test_numpy_linalg():
+ bad_results = check_dir(np.linalg)
+ assert bad_results == {}
+
+
+def test_numpy_fft():
+ bad_results = check_dir(np.fft)
+ assert bad_results == {}
+
+
+@pytest.mark.skipif(ctypes is None,
+ reason="ctypes not available in this python")
+def test_NPY_NO_EXPORT():
+ cdll = ctypes.CDLL(np.core._multiarray_tests.__file__)
+ # Make sure an arbitrary NPY_NO_EXPORT function is actually hidden
+ f = getattr(cdll, 'test_not_exported', None)
+ assert f is None, ("'test_not_exported' is mistakenly exported, "
+ "NPY_NO_EXPORT does not work")
+
+
+# Historically NumPy has not used leading underscores for private submodules
+# much. This has resulted in lots of things that look like public modules
+# (i.e. things that can be imported as `import numpy.somesubmodule.somefile`),
+# but were never intended to be public. The PUBLIC_MODULES list contains
+# modules that are either public because they were meant to be, or because they
+# contain public functions/objects that aren't present in any other namespace
+# for whatever reason and therefore should be treated as public.
+#
+# The PRIVATE_BUT_PRESENT_MODULES list contains modules that look public (lack
+# of underscores) but should not be used. For many of those modules the
+# current status is fine. For others it may make sense to work on making them
+# private, to clean up our public API and avoid confusion.
+PUBLIC_MODULES = ['numpy.' + s for s in [
+ "ctypeslib",
+ "distutils",
+ "distutils.cpuinfo",
+ "distutils.exec_command",
+ "distutils.misc_util",
+ "distutils.log",
+ "distutils.system_info",
+ "doc",
+ "doc.basics",
+ "doc.broadcasting",
+ "doc.byteswapping",
+ "doc.constants",
+ "doc.creation",
+ "doc.dispatch",
+ "doc.glossary",
+ "doc.indexing",
+ "doc.internals",
+ "doc.misc",
+ "doc.structured_arrays",
+ "doc.subclassing",
+ "doc.ufuncs",
+ "dual",
+ "f2py",
+ "fft",
+ "lib",
+ "lib.format", # was this meant to be public?
+ "lib.mixins",
+ "lib.recfunctions",
+ "lib.scimath",
+ "linalg",
+ "ma",
+ "ma.extras",
+ "ma.mrecords",
+ "matlib",
+ "polynomial",
+ "polynomial.chebyshev",
+ "polynomial.hermite",
+ "polynomial.hermite_e",
+ "polynomial.laguerre",
+ "polynomial.legendre",
+ "polynomial.polynomial",
+ "polynomial.polyutils",
+ "random",
+ "testing",
+ "version",
+]]
+
+
+PUBLIC_ALIASED_MODULES = [
+ "numpy.char",
+ "numpy.emath",
+ "numpy.rec",
+]
+
+
+PRIVATE_BUT_PRESENT_MODULES = ['numpy.' + s for s in [
+ "compat",
+ "compat.py3k",
+ "conftest",
+ "core",
+ "core.arrayprint",
+ "core.defchararray",
+ "core.einsumfunc",
+ "core.fromnumeric",
+ "core.function_base",
+ "core.getlimits",
+ "core.machar",
+ "core.memmap",
+ "core.multiarray",
+ "core.numeric",
+ "core.numerictypes",
+ "core.overrides",
+ "core.records",
+ "core.shape_base",
+ "core.umath",
+ "core.umath_tests",
+ "distutils.ccompiler",
+ "distutils.command",
+ "distutils.command.autodist",
+ "distutils.command.bdist_rpm",
+ "distutils.command.build",
+ "distutils.command.build_clib",
+ "distutils.command.build_ext",
+ "distutils.command.build_py",
+ "distutils.command.build_scripts",
+ "distutils.command.build_src",
+ "distutils.command.config",
+ "distutils.command.config_compiler",
+ "distutils.command.develop",
+ "distutils.command.egg_info",
+ "distutils.command.install",
+ "distutils.command.install_clib",
+ "distutils.command.install_data",
+ "distutils.command.install_headers",
+ "distutils.command.sdist",
+ "distutils.compat",
+ "distutils.conv_template",
+ "distutils.core",
+ "distutils.extension",
+ "distutils.fcompiler",
+ "distutils.fcompiler.absoft",
+ "distutils.fcompiler.compaq",
+ "distutils.fcompiler.environment",
+ "distutils.fcompiler.g95",
+ "distutils.fcompiler.gnu",
+ "distutils.fcompiler.hpux",
+ "distutils.fcompiler.ibm",
+ "distutils.fcompiler.intel",
+ "distutils.fcompiler.lahey",
+ "distutils.fcompiler.mips",
+ "distutils.fcompiler.nag",
+ "distutils.fcompiler.none",
+ "distutils.fcompiler.pathf95",
+ "distutils.fcompiler.pg",
+ "distutils.fcompiler.sun",
+ "distutils.fcompiler.vast",
+ "distutils.from_template",
+ "distutils.intelccompiler",
+ "distutils.lib2def",
+ "distutils.line_endings",
+ "distutils.mingw32ccompiler",
+ "distutils.msvccompiler",
+ "distutils.npy_pkg_config",
+ "distutils.numpy_distribution",
+ "distutils.pathccompiler",
+ "distutils.unixccompiler",
+ "f2py.auxfuncs",
+ "f2py.capi_maps",
+ "f2py.cb_rules",
+ "f2py.cfuncs",
+ "f2py.common_rules",
+ "f2py.crackfortran",
+ "f2py.diagnose",
+ "f2py.f2py2e",
+ "f2py.f2py_testing",
+ "f2py.f90mod_rules",
+ "f2py.func2subr",
+ "f2py.rules",
+ "f2py.use_rules",
+ "fft.helper",
+ "lib.arraypad",
+ "lib.arraysetops",
+ "lib.arrayterator",
+ "lib.financial",
+ "lib.function_base",
+ "lib.histograms",
+ "lib.index_tricks",
+ "lib.nanfunctions",
+ "lib.npyio",
+ "lib.polynomial",
+ "lib.shape_base",
+ "lib.stride_tricks",
+ "lib.twodim_base",
+ "lib.type_check",
+ "lib.ufunclike",
+ "lib.user_array", # note: not in np.lib, but probably should just be deleted
+ "lib.utils",
+ "linalg.lapack_lite",
+ "linalg.linalg",
+ "ma.bench",
+ "ma.core",
+ "ma.testutils",
+ "ma.timer_comparison",
+ "matrixlib",
+ "matrixlib.defmatrix",
+ "random.bit_generator",
+ "random.bounded_integers",
+ "random.common",
+ "random.generator",
+ "random.mt19937",
+ "random.mtrand",
+ "random.pcg64",
+ "random.philox",
+ "random.sfc64",
+ "testing.print_coercion_tables",
+ "testing.utils",
+]]
+
+
+def is_unexpected(name):
+ """Check if this needs to be considered."""
+ if '._' in name or '.tests' in name or '.setup' in name:
+ return False
+
+ if name in PUBLIC_MODULES:
+ return False
+
+ if name in PUBLIC_ALIASED_MODULES:
+ return False
+
+ if name in PRIVATE_BUT_PRESENT_MODULES:
+ return False
+
+ return True
+
+
+# These are present in a directory with an __init__.py but cannot be imported
+# code_generators/ isn't installed, but present for an inplace build
+SKIP_LIST = [
+ "numpy.core.code_generators",
+ "numpy.core.code_generators.genapi",
+ "numpy.core.code_generators.generate_umath",
+ "numpy.core.code_generators.ufunc_docstrings",
+ "numpy.core.code_generators.generate_numpy_api",
+ "numpy.core.code_generators.generate_ufunc_api",
+ "numpy.core.code_generators.numpy_api",
+ "numpy.core.cversions",
+ "numpy.core.generate_numpy_api",
+ "numpy.distutils.msvc9compiler",
+]
+
+
+def test_all_modules_are_expected():
+ """
+ Test that we don't add anything that looks like a new public module by
+ accident. Check is based on filenames.
+ """
+
+ modnames = []
+ for _, modname, ispkg in pkgutil.walk_packages(path=np.__path__,
+ prefix=np.__name__ + '.',
+ onerror=None):
+ if is_unexpected(modname) and modname not in SKIP_LIST:
+ # We have a name that is new. If that's on purpose, add it to
+ # PUBLIC_MODULES. We don't expect to have to add anything to
+ # PRIVATE_BUT_PRESENT_MODULES. Use an underscore in the name!
+ modnames.append(modname)
+
+ if modnames:
+ raise AssertionError("Found unexpected modules: {}".format(modnames))
+
+
+# Stuff that clearly shouldn't be in the API and is detected by the next test
+# below
+SKIP_LIST_2 = [
+ 'numpy.math',
+ 'numpy.distutils.log.sys',
+ 'numpy.distutils.system_info.copy',
+ 'numpy.distutils.system_info.distutils',
+ 'numpy.distutils.system_info.log',
+ 'numpy.distutils.system_info.os',
+ 'numpy.distutils.system_info.platform',
+ 'numpy.distutils.system_info.re',
+ 'numpy.distutils.system_info.shutil',
+ 'numpy.distutils.system_info.subprocess',
+ 'numpy.distutils.system_info.sys',
+ 'numpy.distutils.system_info.tempfile',
+ 'numpy.distutils.system_info.textwrap',
+ 'numpy.distutils.system_info.warnings',
+ 'numpy.doc.constants.re',
+ 'numpy.doc.constants.textwrap',
+ 'numpy.lib.emath',
+ 'numpy.lib.math',
+ 'numpy.matlib.char',
+ 'numpy.matlib.rec',
+ 'numpy.matlib.emath',
+ 'numpy.matlib.math',
+ 'numpy.matlib.linalg',
+ 'numpy.matlib.fft',
+ 'numpy.matlib.random',
+ 'numpy.matlib.ctypeslib',
+ 'numpy.matlib.ma'
+]
+
+
+def test_all_modules_are_expected_2():
+ """
+ Method checking all objects. The pkgutil-based method in
+ `test_all_modules_are_expected` does not catch imports into a namespace,
+ only filenames. So this test is more thorough, and checks this like:
+
+ import .lib.scimath as emath
+
+ To check if something in a module is (effectively) public, one can check if
+ there's anything in that namespace that's a public function/object but is
+ not exposed in a higher-level namespace. For example for a `numpy.lib`
+ submodule::
+
+ mod = np.lib.mixins
+ for obj in mod.__all__:
+ if obj in np.__all__:
+ continue
+ elif obj in np.lib.__all__:
+ continue
+
+ else:
+ print(obj)
+
+ """
+
+ def find_unexpected_members(mod_name):
+ members = []
+ module = importlib.import_module(mod_name)
+ if hasattr(module, '__all__'):
+ objnames = module.__all__
+ else:
+ objnames = dir(module)
+
+ for objname in objnames:
+ if not objname.startswith('_'):
+ fullobjname = mod_name + '.' + objname
+ if isinstance(getattr(module, objname), types.ModuleType):
+ if is_unexpected(fullobjname):
+ if fullobjname not in SKIP_LIST_2:
+ members.append(fullobjname)
+
+ return members
+
+ unexpected_members = find_unexpected_members("numpy")
+ for modname in PUBLIC_MODULES:
+ unexpected_members.extend(find_unexpected_members(modname))
+
+ if unexpected_members:
+ raise AssertionError("Found unexpected object(s) that look like "
+ "modules: {}".format(unexpected_members))
+
+
+def test_api_importable():
+ """
+ Check that all submodules listed higher up in this file can be imported
+
+ Note that if a PRIVATE_BUT_PRESENT_MODULES entry goes missing, it may
+ simply need to be removed from the list (deprecation may or may not be
+ needed - apply common sense).
+ """
+ def check_importable(module_name):
+ try:
+ importlib.import_module(module_name)
+ except (ImportError, AttributeError):
+ return False
+
+ return True
+
+ module_names = []
+ for module_name in PUBLIC_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that cannot be "
+ "imported: {}".format(module_names))
+
+ for module_name in PUBLIC_ALIASED_MODULES:
+ try:
+ eval(module_name)
+ except AttributeError:
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules in the public API that were not "
+ "found: {}".format(module_names))
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.filterwarnings('always', category=DeprecationWarning)
+ warnings.filterwarnings('always', category=ImportWarning)
+ for module_name in PRIVATE_BUT_PRESENT_MODULES:
+ if not check_importable(module_name):
+ module_names.append(module_name)
+
+ if module_names:
+ raise AssertionError("Modules that are not really public but looked "
+ "public and can not be imported: "
+ "{}".format(module_names))
diff --git a/numpy/tests/test_reloading.py b/numpy/tests/test_reloading.py
index cd42252e3..e378d1463 100644
--- a/numpy/tests/test_reloading.py
+++ b/numpy/tests/test_reloading.py
@@ -1,9 +1,9 @@
from __future__ import division, absolute_import, print_function
import sys
-import pickle
from numpy.testing import assert_raises, assert_, assert_equal
+from numpy.compat import pickle
if sys.version_info[:2] >= (3, 4):
from importlib import reload
@@ -32,5 +32,7 @@ def test_numpy_reloading():
def test_novalue():
import numpy as np
- assert_equal(repr(np._NoValue), '<no value>')
- assert_(pickle.loads(pickle.dumps(np._NoValue)) is np._NoValue)
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
+ assert_equal(repr(np._NoValue), '<no value>')
+ assert_(pickle.loads(pickle.dumps(np._NoValue,
+ protocol=proto)) is np._NoValue)
diff --git a/numpy/tests/test_scripts.py b/numpy/tests/test_scripts.py
index 33210cc42..e42dc25f9 100644
--- a/numpy/tests/test_scripts.py
+++ b/numpy/tests/test_scripts.py
@@ -7,8 +7,8 @@ from __future__ import division, print_function, absolute_import
import sys
import os
import pytest
-from os.path import join as pathjoin, isfile, dirname, basename
-from subprocess import Popen, PIPE
+from os.path import join as pathjoin, isfile, dirname
+import subprocess
import numpy as np
from numpy.compat.py3k import basestring
@@ -17,74 +17,13 @@ from numpy.testing import assert_, assert_equal
is_inplace = isfile(pathjoin(dirname(np.__file__), '..', 'setup.py'))
-def run_command(cmd, check_code=True):
- """ Run command sequence `cmd` returning exit code, stdout, stderr
-
- Parameters
- ----------
- cmd : str or sequence
- string with command name or sequence of strings defining command
- check_code : {True, False}, optional
- If True, raise error for non-zero return code
-
- Returns
- -------
- returncode : int
- return code from execution of `cmd`
- stdout : bytes (python 3) or str (python 2)
- stdout from `cmd`
- stderr : bytes (python 3) or str (python 2)
- stderr from `cmd`
-
- Raises
- ------
- RuntimeError
- If `check_code` is True, and return code !=0
- """
- cmd = [cmd] if isinstance(cmd, basestring) else list(cmd)
- if os.name == 'nt':
- # Quote any arguments with spaces. The quotes delimit the arguments
- # on Windows, and the arguments might be file paths with spaces.
- # On Unix the list elements are each separate arguments.
- cmd = ['"{0}"'.format(c) if ' ' in c else c for c in cmd]
- proc = Popen(cmd, stdout=PIPE, stderr=PIPE)
- stdout, stderr = proc.communicate()
- if proc.poll() is None:
- proc.terminate()
- if check_code and proc.returncode != 0:
- raise RuntimeError('\n'.join(
- ['Command "{0}" failed with',
- 'stdout', '------', '{1}', '',
- 'stderr', '------', '{2}']).format(cmd, stdout, stderr))
- return proc.returncode, stdout, stderr
-
-
-@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
-@pytest.mark.xfail(reason="Test is unreliable")
-def test_f2py():
- # test that we can run f2py script
-
- def try_f2py_commands(cmds):
- success = 0
- for f2py_cmd in cmds:
- try:
- code, stdout, stderr = run_command([f2py_cmd, '-v'])
- assert_equal(stdout.strip(), b'2')
- success += 1
- except Exception:
- pass
- return success
-
+def find_f2py_commands():
if sys.platform == 'win32':
- # Only the single 'f2py' script is installed in windows.
exe_dir = dirname(sys.executable)
if exe_dir.endswith('Scripts'): # virtualenv
- f2py_cmds = [os.path.join(exe_dir, 'f2py')]
+ return [os.path.join(exe_dir, 'f2py')]
else:
- f2py_cmds = [os.path.join(exe_dir, "Scripts", 'f2py')]
- success = try_f2py_commands(f2py_cmds)
- msg = "Warning: f2py not found in path"
- assert_(success == 1, msg)
+ return [os.path.join(exe_dir, "Scripts", 'f2py')]
else:
# Three scripts are installed in Unix-like systems:
# 'f2py', 'f2py{major}', and 'f2py{major.minor}'. For example,
@@ -93,7 +32,18 @@ def test_f2py():
version = sys.version_info
major = str(version.major)
minor = str(version.minor)
- f2py_cmds = ('f2py', 'f2py' + major, 'f2py' + major + '.' + minor)
- success = try_f2py_commands(f2py_cmds)
- msg = "Warning: not all of %s, %s, and %s are found in path" % f2py_cmds
- assert_(success == 3, msg)
+ return ['f2py', 'f2py' + major, 'f2py' + major + '.' + minor]
+
+
+@pytest.mark.skipif(is_inplace, reason="Cannot test f2py command inplace")
+@pytest.mark.xfail(reason="Test is unreliable")
+@pytest.mark.parametrize('f2py_cmd', find_f2py_commands())
+def test_f2py(f2py_cmd):
+ # test that we can run f2py script
+ stdout = subprocess.check_output([f2py_cmd, '-v'])
+ assert_equal(stdout.strip(), b'2')
+
+
+def test_pep338():
+ stdout = subprocess.check_output([sys.executable, '-mnumpy.f2py', '-v'])
+ assert_equal(stdout.strip(), b'2')
diff --git a/numpy/tests/test_warnings.py b/numpy/tests/test_warnings.py
index aa6f69f7e..f5560a099 100644
--- a/numpy/tests/test_warnings.py
+++ b/numpy/tests/test_warnings.py
@@ -44,7 +44,7 @@ if sys.version_info >= (3, 4):
if p.ls[-1] == 'warn' and (
len(p.ls) == 1 or p.ls[-2] == 'warnings'):
- if "testing/tests/test_warnings.py" is self.__filename:
+ if "testing/tests/test_warnings.py" == self.__filename:
# This file
return
diff --git a/pavement.py b/pavement.py
index 41acc5624..3637bc66d 100644
--- a/pavement.py
+++ b/pavement.py
@@ -4,37 +4,6 @@ possible. It relies on virtualenv to generate 'bootstrap' environments as
independent from the user system as possible (e.g. to make sure the sphinx doc
is built against the built numpy, not an installed one).
-Building a fancy dmg from scratch
-=================================
-
-Clone the numpy-macosx-installer git repo from on github into the source tree
-(numpy-macosx-installer should be in the same directory as setup.py). Then, do
-as follows::
-
- git clone git://github.com/cournape/macosx-numpy-installer
- # remove build dir, and everything generated by previous paver calls
- # (included generated installers). Use with care !
- paver nuke
- paver bootstrap && source bootstrap/bin/activate
- # Installing numpy is necessary to build the correct documentation (because
- # of autodoc)
- python setup.py install
- paver dmg
-
-Building a simple (no-superpack) windows installer from wine
-============================================================
-
-It assumes that blas/lapack are in c:\local\lib inside drive_c.
-
- paver bdist_wininst_simple
-
-You will have to configure your wine python locations (WINE_PYS).
-
-The superpack requires all the atlas libraries for every arch to be installed
-(see SITECFG), and can then be built as follows::
-
- paver bdist_superpack
-
Building changelog + notes
==========================
@@ -43,8 +12,7 @@ Assumes you have git and the binaries/tarballs in installers/::
paver write_release
paver write_note
-This automatically put the checksum into NOTES.txt, and write the Changelog
-which can be uploaded to sourceforge.
+This automatically put the checksum into README.rst, and writes the Changelog.
TODO
====
@@ -56,10 +24,6 @@ TODO
"""
from __future__ import division, print_function
-# What need to be installed to build everything on mac os x:
-# - wine: python 2.6 and 2.5 + makensis + cpuid plugin + mingw, all in the PATH
-# - paver + virtualenv
-# - full texlive
import os
import sys
import shutil
@@ -67,9 +31,31 @@ import subprocess
import re
import hashlib
+# The paver package needs to be installed to run tasks
import paver
-from paver.easy import \
- options, Bunch, task, call_task, sh, needs, cmdopts, dry
+from paver.easy import Bunch, options, task, sh
+
+
+#-----------------------------------
+# Things to be changed for a release
+#-----------------------------------
+
+# Path to the release notes
+RELEASE_NOTES = 'doc/release/1.18.0-notes.rst'
+
+
+#-------------------------------------------------------
+# Hardcoded build/install dirs, virtualenv options, etc.
+#-------------------------------------------------------
+
+# Where to put the release installers
+options(installers=Bunch(releasedir="release",
+ installersdir=os.path.join("release", "installers")),)
+
+
+#-----------------------------
+# Generate the release version
+#-----------------------------
sys.path.insert(0, os.path.dirname(__file__))
try:
@@ -90,452 +76,35 @@ finally:
sys.path.pop(0)
-#-----------------------------------
-# Things to be changed for a release
-#-----------------------------------
-
-# Source of the release notes
-RELEASE_NOTES = 'doc/release/1.16.0-notes.rst'
-
-# Start/end of the log (from git)
-LOG_START = 'maintenance/1.15.x'
-LOG_END = 'master'
-
-
-#-------------------------------------------------------
-# Hardcoded build/install dirs, virtualenv options, etc.
-#-------------------------------------------------------
-DEFAULT_PYTHON = "2.7"
-
-# Where to put the final installers, as put on sourceforge
-SUPERPACK_BUILD = 'build-superpack'
-SUPERPACK_BINDIR = os.path.join(SUPERPACK_BUILD, 'binaries')
-
-options(bootstrap=Bunch(bootstrap_dir="bootstrap"),
- virtualenv=Bunch(packages_to_install=["sphinx==1.1.3", "numpydoc"],
- no_site_packages=False),
- sphinx=Bunch(builddir="build", sourcedir="source", docroot='doc'),
- superpack=Bunch(builddir="build-superpack"),
- installers=Bunch(releasedir="release",
- installersdir=os.path.join("release", "installers")),
- doc=Bunch(doc_root="doc",
- sdir=os.path.join("doc", "source"),
- bdir=os.path.join("doc", "build"),
- bdir_latex=os.path.join("doc", "build", "latex"),
- destdir_pdf=os.path.join("build_doc", "pdf")
- ),
- html=Bunch(builddir=os.path.join("build", "html")),
- dmg=Bunch(python_version=DEFAULT_PYTHON),
- bdist_wininst_simple=Bunch(python_version=DEFAULT_PYTHON),
-)
-
-MPKG_PYTHON = {
- "2.6": ["/Library/Frameworks/Python.framework/Versions/2.6/bin/python"],
- "2.7": ["/Library/Frameworks/Python.framework/Versions/2.7/bin/python"],
- "3.2": ["/Library/Frameworks/Python.framework/Versions/3.2/bin/python3"],
- "3.3": ["/Library/Frameworks/Python.framework/Versions/3.3/bin/python3"],
- "3.4": ["/Library/Frameworks/Python.framework/Versions/3.4/bin/python3"],
-}
-
-SSE3_CFG = {'ATLAS': r'C:\local\lib\atlas\sse3'}
-SSE2_CFG = {'ATLAS': r'C:\local\lib\atlas\sse2'}
-NOSSE_CFG = {'BLAS': r'C:\local\lib\atlas\nosse', 'LAPACK': r'C:\local\lib\atlas\nosse'}
-
-SITECFG = {"sse2" : SSE2_CFG, "sse3" : SSE3_CFG, "nosse" : NOSSE_CFG}
-
-if sys.platform =="darwin":
- WINDOWS_PYTHON = {
- "3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
- "2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
- }
- WINDOWS_ENV = os.environ
- WINDOWS_ENV["DYLD_FALLBACK_LIBRARY_PATH"] = "/usr/X11/lib:/usr/lib"
- MAKENSIS = ["wine", "makensis"]
-elif sys.platform == "win32":
- WINDOWS_PYTHON = {
- "3.4": [r"C:\Python34\python.exe"],
- "2.7": [r"C:\Python27\python.exe"],
- }
- # XXX: find out which env variable is necessary to avoid the pb with python
- # 2.6 and random module when importing tempfile
- WINDOWS_ENV = os.environ
- MAKENSIS = ["makensis"]
-else:
- WINDOWS_PYTHON = {
- "3.4": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python34/python.exe"],
- "2.7": ["wine", os.environ['HOME'] + "/.wine/drive_c/Python27/python.exe"],
- }
- WINDOWS_ENV = os.environ
- MAKENSIS = ["wine", "makensis"]
-
-
-#-------------------
-# Windows installers
-#-------------------
-def superpack_name(pyver, numver):
- """Return the filename of the superpack installer."""
- return 'numpy-%s-win32-superpack-python%s.exe' % (numver, pyver)
-
-def internal_wininst_name(arch):
- """Return the name of the wininst as it will be inside the superpack (i.e.
- with the arch encoded."""
- ext = '.exe'
- return "numpy-%s-%s%s" % (FULLVERSION, arch, ext)
-
-def wininst_name(pyver):
- """Return the name of the installer built by wininst command."""
- ext = '.exe'
- return "numpy-%s.win32-py%s%s" % (FULLVERSION, pyver, ext)
-
-def prepare_nsis_script(pyver, numver):
- if not os.path.exists(SUPERPACK_BUILD):
- os.makedirs(SUPERPACK_BUILD)
-
- tpl = os.path.join('tools/win32build/nsis_scripts', 'numpy-superinstaller.nsi.in')
- source = open(tpl, 'r')
- target = open(os.path.join(SUPERPACK_BUILD, 'numpy-superinstaller.nsi'), 'w')
-
- installer_name = superpack_name(pyver, numver)
- cnt = "".join(source.readlines())
- cnt = cnt.replace('@NUMPY_INSTALLER_NAME@', installer_name)
- for arch in ['nosse', 'sse2', 'sse3']:
- cnt = cnt.replace('@%s_BINARY@' % arch.upper(),
- internal_wininst_name(arch))
-
- target.write(cnt)
-
-def bdist_wininst_arch(pyver, arch):
- """Arch specific wininst build."""
- if os.path.exists("build"):
- shutil.rmtree("build")
-
- _bdist_wininst(pyver, SITECFG[arch])
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_superpack(options):
- """Build all arch specific wininst installers."""
- pyver = options.python_version
- def copy_bdist(arch):
- # Copy the wininst in dist into the release directory
- source = os.path.join('dist', wininst_name(pyver))
- target = os.path.join(SUPERPACK_BINDIR, internal_wininst_name(arch))
- if os.path.exists(target):
- os.remove(target)
- if not os.path.exists(os.path.dirname(target)):
- os.makedirs(os.path.dirname(target))
- try:
- os.rename(source, target)
- except OSError:
- # When git is installed on OS X but not under Wine, the name of the
- # .exe has "-Unknown" in it instead of the correct git revision.
- # Try to fix this here:
- revidx = source.index(".dev-") + 5
- gitrev = source[revidx:revidx+7]
- os.rename(source.replace(gitrev, "Unknown"), target)
-
- bdist_wininst_arch(pyver, 'nosse')
- copy_bdist("nosse")
- bdist_wininst_arch(pyver, 'sse2')
- copy_bdist("sse2")
- bdist_wininst_arch(pyver, 'sse3')
- copy_bdist("sse3")
-
- idirs = options.installers.installersdir
- pyver = options.python_version
- prepare_nsis_script(pyver, FULLVERSION)
- subprocess.check_call(MAKENSIS + ['numpy-superinstaller.nsi'],
- cwd=SUPERPACK_BUILD)
-
- # Copy the superpack into installers dir
- if not os.path.exists(idirs):
- os.makedirs(idirs)
-
- source = os.path.join(SUPERPACK_BUILD, superpack_name(pyver, FULLVERSION))
- target = os.path.join(idirs, superpack_name(pyver, FULLVERSION))
- shutil.copy(source, target)
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_nosse(options):
- """Build the nosse wininst installer."""
- bdist_wininst_arch(options.python_version, 'nosse')
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_sse2(options):
- """Build the sse2 wininst installer."""
- bdist_wininst_arch(options.python_version, 'sse2')
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_sse3(options):
- """Build the sse3 wininst installer."""
- bdist_wininst_arch(options.python_version, 'sse3')
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def bdist_wininst_simple():
- """Simple wininst-based installer."""
- pyver = options.bdist_wininst_simple.python_version
- _bdist_wininst(pyver)
-
-def _bdist_wininst(pyver, cfg_env=None):
- cmd = WINDOWS_PYTHON[pyver] + ['setup.py', 'build', '-c', 'mingw32', 'bdist_wininst']
- if cfg_env:
- for k, v in WINDOWS_ENV.items():
- cfg_env[k] = v
- else:
- cfg_env = WINDOWS_ENV
- subprocess.check_call(cmd, env=cfg_env)
-
-#----------------
-# Bootstrap stuff
-#----------------
-@task
-def bootstrap(options):
- """create virtualenv in ./bootstrap"""
- try:
- import virtualenv
- except ImportError as e:
- raise RuntimeError("virtualenv is needed for bootstrap")
-
- bdir = options.bootstrap_dir
- if not os.path.exists(bdir):
- os.makedirs(bdir)
- bscript = "boostrap.py"
-
- options.virtualenv.script_name = os.path.join(options.bootstrap_dir,
- bscript)
- options.virtualenv.no_site_packages = False
- options.bootstrap.no_site_packages = False
- call_task('paver.virtual.bootstrap')
- sh('cd %s; %s %s' % (bdir, sys.executable, bscript))
-
-@task
-def clean():
- """Remove build, dist, egg-info garbage."""
- d = ['build', 'dist', 'numpy.egg-info']
- for i in d:
- if os.path.exists(i):
- shutil.rmtree(i)
-
- bdir = os.path.join('doc', options.sphinx.builddir)
- if os.path.exists(bdir):
- shutil.rmtree(bdir)
-
-@task
-def clean_bootstrap():
- bdir = os.path.join(options.bootstrap.bootstrap_dir)
- if os.path.exists(bdir):
- shutil.rmtree(bdir)
-
-@task
-@needs('clean', 'clean_bootstrap')
-def nuke(options):
- """Remove everything: build dir, installers, bootstrap dirs, etc..."""
- for d in [options.superpack.builddir, options.installers.releasedir]:
- if os.path.exists(d):
- shutil.rmtree(d)
-
-#---------------------
-# Documentation tasks
-#---------------------
-@task
-def html(options):
- """Build numpy documentation and put it into build/docs"""
- # Don't use paver html target because of numpy bootstrapping problems
- bdir = os.path.join("doc", options.sphinx.builddir, "html")
- if os.path.exists(bdir):
- shutil.rmtree(bdir)
- subprocess.check_call(["make", "html"], cwd="doc")
- html_destdir = options.html.builddir
- if os.path.exists(html_destdir):
- shutil.rmtree(html_destdir)
- shutil.copytree(bdir, html_destdir)
-
-@task
-def latex():
- """Build numpy documentation in latex format."""
- subprocess.check_call(["make", "latex"], cwd="doc")
-
-@task
-@needs('latex')
-def pdf():
- sdir = options.doc.sdir
- bdir = options.doc.bdir
- bdir_latex = options.doc.bdir_latex
- destdir_pdf = options.doc.destdir_pdf
-
- def build_pdf():
- subprocess.check_call(["make", "all-pdf"], cwd=str(bdir_latex))
- dry("Build pdf doc", build_pdf)
-
- if os.path.exists(destdir_pdf):
- shutil.rmtree(destdir_pdf)
- os.makedirs(destdir_pdf)
-
- user = os.path.join(bdir_latex, "numpy-user.pdf")
- shutil.copy(user, os.path.join(destdir_pdf, "userguide.pdf"))
- ref = os.path.join(bdir_latex, "numpy-ref.pdf")
- shutil.copy(ref, os.path.join(destdir_pdf, "reference.pdf"))
-
-#------------------
-# Mac OS X targets
-#------------------
-def dmg_name(fullversion, pyver, osxver=None):
- """Return name for dmg installer.
-
- Notes
- -----
- Python 2.7 has two binaries, one for 10.3 (ppc, i386) and one for 10.6
- (i386, x86_64). All other Python versions at python.org at the moment
- have binaries for 10.3 only. The "macosx%s" part of the dmg name should
- correspond to the python.org naming scheme.
- """
- # assume that for the py2.7/osx10.6 build the deployment target is set
- # (should be done in the release script).
- if not osxver:
- osxver = os.environ.get('MACOSX_DEPLOYMENT_TARGET', '10.3')
- return "numpy-%s-py%s-python.org-macosx%s.dmg" % (fullversion, pyver,
- osxver)
-
-def macosx_version():
- if not sys.platform == 'darwin':
- raise ValueError("Not darwin ??")
- st = subprocess.Popen(["sw_vers"], stdout=subprocess.PIPE)
- out = st.stdout.readlines()
- ver = re.compile(r"ProductVersion:\s+([0-9]+)\.([0-9]+)\.([0-9]+)")
- for i in out:
- m = ver.match(i)
- if m:
- return m.groups()
-
-def mpkg_name(pyver):
- maj, min = macosx_version()[:2]
- # Note that bdist_mpkg breaks this if building a dev version with a git
- # commit string attached. make_fullplatcomponents() in
- # bdist_mpkg/cmd_bdist_mpkg.py replaces '-' with '_', comment this out if
- # needed.
- return "numpy-%s-py%s-macosx%s.%s.mpkg" % (FULLVERSION, pyver, maj, min)
-
-def _build_mpkg(pyver):
- # account for differences between Python 2.7.1 versions from python.org
- if os.environ.get('MACOSX_DEPLOYMENT_TARGET', None) == "10.6":
- ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch x86_64 -Wl,-search_paths_first"
- else:
- ldflags = "-undefined dynamic_lookup -bundle -arch i386 -arch ppc -Wl,-search_paths_first"
-
- ldflags += " -L%s" % os.path.join(os.path.dirname(__file__), "build")
- sh("LDFLAGS='%s' %s setup.py bdist_mpkg" % (ldflags, " ".join(MPKG_PYTHON[pyver])))
-
-@task
-def simple_dmg():
- pyver = "2.6"
- src_dir = "dmg-source"
-
- # Clean the source dir
- if os.path.exists(src_dir):
- shutil.rmtree(src_dir)
- os.makedirs(src_dir)
-
- # Build the mpkg
- clean()
- _build_mpkg(pyver)
-
- # Build the dmg
- shutil.copytree(os.path.join("dist", mpkg_name(pyver)),
- os.path.join(src_dir, mpkg_name(pyver)))
- _create_dmg(pyver, src_dir, "NumPy Universal %s" % FULLVERSION)
-
-@task
-def bdist_mpkg(options):
- call_task("clean")
- try:
- pyver = options.bdist_mpkg.python_version
- except AttributeError:
- pyver = options.python_version
-
- _build_mpkg(pyver)
-
-def _create_dmg(pyver, src_dir, volname=None):
- # Build the dmg
- image_name = dmg_name(FULLVERSION, pyver)
- if os.path.exists(image_name):
- os.remove(image_name)
- cmd = ["hdiutil", "create", image_name, "-srcdir", src_dir]
- if volname:
- cmd.extend(["-volname", "'%s'" % volname])
- sh(" ".join(cmd))
-
-@task
-@cmdopts([("python-version=", "p", "python version")])
-def dmg(options):
- try:
- pyver = options.dmg.python_version
- except Exception:
- pyver = DEFAULT_PYTHON
- idirs = options.installers.installersdir
-
- # Check if docs exist. If not, say so and quit.
- ref = os.path.join(options.doc.destdir_pdf, "reference.pdf")
- user = os.path.join(options.doc.destdir_pdf, "userguide.pdf")
- if (not os.path.exists(ref)) or (not os.path.exists(user)):
- import warnings
- warnings.warn("Docs need to be built first! Can't find them.", stacklevel=2)
-
- # Build the mpkg package
- call_task("clean")
- _build_mpkg(pyver)
-
- macosx_installer_dir = "tools/numpy-macosx-installer"
- dmg = os.path.join(macosx_installer_dir, dmg_name(FULLVERSION, pyver))
- if os.path.exists(dmg):
- os.remove(dmg)
-
- # Clean the image source
- content = os.path.join(macosx_installer_dir, 'content')
- if os.path.exists(content):
- shutil.rmtree(content)
- os.makedirs(content)
-
- # Copy mpkg into image source
- mpkg_source = os.path.join("dist", mpkg_name(pyver))
- mpkg_target = os.path.join(content, "numpy-%s-py%s.mpkg" % (FULLVERSION, pyver))
- shutil.copytree(mpkg_source, mpkg_target)
-
- # Copy docs into image source
- pdf_docs = os.path.join(content, "Documentation")
- if os.path.exists(pdf_docs):
- shutil.rmtree(pdf_docs)
- os.makedirs(pdf_docs)
- shutil.copy(user, os.path.join(pdf_docs, "userguide.pdf"))
- shutil.copy(ref, os.path.join(pdf_docs, "reference.pdf"))
-
- # Build the dmg
- cmd = ["./new-create-dmg", "--pkgname", os.path.basename(mpkg_target),
- "--volname", "numpy", os.path.basename(dmg), "./content"]
- st = subprocess.check_call(cmd, cwd=macosx_installer_dir)
-
- source = dmg
- target = os.path.join(idirs, os.path.basename(dmg))
- if not os.path.exists(os.path.dirname(target)):
- os.makedirs(os.path.dirname(target))
- shutil.copy(source, target)
-
#--------------------------
# Source distribution stuff
#--------------------------
-def tarball_name(type='gztar'):
+def tarball_name(ftype='gztar'):
+ """Generate source distribution name
+
+ Parameters
+ ----------
+ ftype : {'zip', 'gztar'}
+ Type of archive, default is 'gztar'.
+
+ """
root = 'numpy-%s' % FULLVERSION
- if type == 'gztar':
+ if ftype == 'gztar':
return root + '.tar.gz'
- elif type == 'zip':
+ elif ftype == 'zip':
return root + '.zip'
raise ValueError("Unknown type %s" % type)
@task
def sdist(options):
+ """Make source distributions.
+
+ Parameters
+ ----------
+ options :
+ Set by ``task`` decorator.
+
+ """
# First clean the repo and update submodules (for up-to-date doc html theme
# and Sphinx extensions)
sh('git clean -xdf')
@@ -546,36 +115,82 @@ def sdist(options):
# do not play well together.
# Cython is run over all Cython files in setup.py, so generated C files
# will be included.
- sh('python setup.py sdist --formats=gztar,zip')
+ sh('python3 setup.py sdist --formats=gztar,zip')
# Copy the superpack into installers dir
idirs = options.installers.installersdir
if not os.path.exists(idirs):
os.makedirs(idirs)
- for t in ['gztar', 'zip']:
- source = os.path.join('dist', tarball_name(t))
- target = os.path.join(idirs, tarball_name(t))
+ for ftype in ['gztar', 'zip']:
+ source = os.path.join('dist', tarball_name(ftype))
+ target = os.path.join(idirs, tarball_name(ftype))
shutil.copy(source, target)
-def _compute_hash(idirs, algo):
+
+#-------------
+# README stuff
+#-------------
+
+def _compute_hash(idirs, hashfunc):
+ """Hash files using given hashfunc.
+
+ Parameters
+ ----------
+ idirs : directory path
+ Directory containing files to be hashed.
+ hashfunc : hash function
+ Function to be used to hash the files.
+
+ """
released = paver.path.path(idirs).listdir()
checksums = []
- for f in sorted(released):
- with open(f, 'r') as _file:
- m = algo(_file.read())
- checksums.append('%s %s' % (m.hexdigest(), os.path.basename(f)))
+ for fpath in sorted(released):
+ with open(fpath, 'rb') as fin:
+ fhash = hashfunc(fin.read())
+ checksums.append(
+ '%s %s' % (fhash.hexdigest(), os.path.basename(fpath)))
return checksums
+
def compute_md5(idirs):
+ """Compute md5 hash of files in idirs.
+
+ Parameters
+ ----------
+ idirs : directory path
+ Directory containing files to be hashed.
+
+ """
return _compute_hash(idirs, hashlib.md5)
+
def compute_sha256(idirs):
+ """Compute sha256 hash of files in idirs.
+
+ Parameters
+ ----------
+ idirs : directory path
+ Directory containing files to be hashed.
+
+ """
# better checksum so gpg signed README.rst containing the sums can be used
# to verify the binaries instead of signing all binaries
return _compute_hash(idirs, hashlib.sha256)
+
def write_release_task(options, filename='README'):
+ """Append hashes of release files to release notes.
+
+ Parameters
+ ----------
+ options :
+ Set by ``task`` decorator.
+ filename : string
+ Filename of the modified notes. The file is written
+ in the release directory.
+
+ """
idirs = options.installers.installersdir
source = paver.path.path(RELEASE_NOTES)
target = paver.path.path(filename + '.rst')
@@ -619,30 +234,19 @@ SHA256
ftarget.write(mdtext)
-def write_log_task(options, filename='Changelog'):
- st = subprocess.Popen(
- ['git', 'log', '--no-merges', '--use-mailmap',
- '%s..%s' % (LOG_START, LOG_END)],
- stdout=subprocess.PIPE)
-
- out = st.communicate()[0]
- a = open(filename, 'w')
- a.writelines(out)
- a.close()
-
-
@task
def write_release(options):
- write_release_task(options)
-
+ """Write the README files.
-@task
-def write_log(options):
- write_log_task(options)
+ Two README files are generated from the release notes, one in ``rst``
+ markup for the general release, the other in ``md`` markup for the github
+ release notes.
+ Parameters
+ ----------
+ options :
+ Set by ``task`` decorator.
-@task
-def write_release_and_log(options):
+ """
rdir = options.installers.releasedir
write_release_task(options, os.path.join(rdir, 'README'))
- write_log_task(options, os.path.join(rdir, 'Changelog'))
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 000000000..918cbb278
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,71 @@
+[build-system]
+# Minimum requirements for the build system to execute.
+requires = [
+ "setuptools",
+ "wheel",
+ "Cython>=0.29.13", # Note: keep in sync with tools/cythonize.py
+]
+
+
+[tool.towncrier]
+ # Do no set this since it is hard to import numpy inside the source directory
+ # the name is hardcoded. Use "--version 1.18.0" to set the version
+ single_file = true
+ filename = "doc/source/release/{version}-notes.rst"
+ directory = "doc/release/upcoming_changes/"
+ issue_format = "`gh-{issue} <https://github.com/numpy/numpy/pull/{issue}>`__"
+ template = "doc/release/upcoming_changes/template.rst"
+ underlines = "~="
+ all_bullets = false
+
+
+ [[tool.towncrier.type]]
+ directory = "highlight"
+ name = "Highlights"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "new_function"
+ name = "New functions"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "deprecation"
+ name = "Deprecations"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "future"
+ name = "Future Changes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "expired"
+ name = "Expired deprecations"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "compatibility"
+ name = "Compatibility notes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "c_api"
+ name = "C API changes"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "new_feature"
+ name = "New Features"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "improvement"
+ name = "Improvements"
+ showcontent = true
+
+ [[tool.towncrier.type]]
+ directory = "change"
+ name = "Changes"
+ showcontent = true
+
diff --git a/pytest.ini b/pytest.ini
index 1a49e5dea..4748e3575 100644
--- a/pytest.ini
+++ b/pytest.ini
@@ -1,7 +1,7 @@
[pytest]
addopts = -l
norecursedirs = doc tools numpy/linalg/lapack_lite numpy/core/code_generators
-doctest_optionflags = NORMALIZE_WHITESPACE
+doctest_optionflags = NORMALIZE_WHITESPACE ELLIPSIS ALLOW_UNICODE ALLOW_BYTES
filterwarnings =
error
diff --git a/runtests.py b/runtests.py
index 81c7c103f..a38054f86 100755
--- a/runtests.py
+++ b/runtests.py
@@ -18,6 +18,10 @@ Run a debugger:
$ gdb --args python runtests.py [...other args...]
+Disable pytest capturing of output by using its '-s' option:
+
+ $ python runtests.py -- -s
+
Generate C code coverage listing under build/lcov/:
(requires http://ltp.sourceforge.net/coverage/lcov.php)
@@ -67,14 +71,18 @@ def main(argv):
parser = ArgumentParser(usage=__doc__.lstrip())
parser.add_argument("--verbose", "-v", action="count", default=1,
help="more verbosity")
+ parser.add_argument("--debug-info", action="store_true",
+ help=("add --verbose-cfg to build_src to show compiler "
+ "configuration output while creating "
+ "_numpyconfig.h and config.h"))
parser.add_argument("--no-build", "-n", action="store_true", default=False,
help="do not build the project (use system installed version)")
parser.add_argument("--build-only", "-b", action="store_true", default=False,
help="just build, do not run any tests")
parser.add_argument("--doctests", action="store_true", default=False,
help="Run doctests in module")
- #parser.add_argument("--refguide-check", action="store_true", default=False,
- #help="Run refguide check (do not run regular tests.)")
+ parser.add_argument("--refguide-check", action="store_true", default=False,
+ help="Run refguide (doctest) check (do not run regular tests.)")
parser.add_argument("--coverage", action="store_true", default=False,
help=("report coverage of project code. HTML output goes "
"under build/coverage"))
@@ -106,6 +114,8 @@ def main(argv):
help="Debug build")
parser.add_argument("--parallel", "-j", type=int, default=0,
help="Number of parallel jobs during build")
+ parser.add_argument("--warn-error", action="store_true",
+ help="Set -Werror to convert all compiler warnings to errors")
parser.add_argument("--show-build-log", action="store_true",
help="Show build output rather than using a log file")
parser.add_argument("--bench", action="store_true",
@@ -202,6 +212,14 @@ def main(argv):
shutil.rmtree(dst_dir)
extra_argv += ['--cov-report=html:' + dst_dir]
+ if args.refguide_check:
+ cmd = [os.path.join(ROOT_DIR, 'tools', 'refguide_check.py'),
+ '--doctests']
+ if args.submodule:
+ cmd += [args.submodule]
+ os.execv(sys.executable, [sys.executable] + cmd)
+ sys.exit(0)
+
if args.bench:
# Run ASV
items = extra_argv
@@ -251,8 +269,6 @@ def main(argv):
ret = subprocess.call(cmd, cwd=os.path.join(ROOT_DIR, 'benchmarks'))
sys.exit(ret)
- test_dir = os.path.join(ROOT_DIR, 'build', 'test')
-
if args.build_only:
sys.exit(0)
else:
@@ -335,7 +351,6 @@ def build_project(args):
# add flags used as werrors
warnings_as_errors = ' '.join([
# from tools/travis-test.sh
- '-Werror=declaration-after-statement',
'-Werror=vla',
'-Werror=nonnull',
'-Werror=pointer-arith',
@@ -361,6 +376,10 @@ def build_project(args):
cmd += ["build"]
if args.parallel > 1:
cmd += ["-j", str(args.parallel)]
+ if args.debug_info:
+ cmd += ["build_src", "--verbose-cfg"]
+ if args.warn_error:
+ cmd += ["--warn-error"]
# Install; avoid producing eggs so numpy can be imported from dst_dir.
cmd += ['install', '--prefix=' + dst_dir,
'--single-version-externally-managed',
diff --git a/setup.py b/setup.py
index cc20fa61d..068f0f405 100755
--- a/setup.py
+++ b/setup.py
@@ -27,13 +27,10 @@ import subprocess
import textwrap
-if sys.version_info[:2] < (2, 7) or (3, 0) <= sys.version_info[:2] < (3, 4):
- raise RuntimeError("Python version 2.7 or >= 3.4 required.")
+if sys.version_info[:2] < (3, 5):
+ raise RuntimeError("Python version >= 3.5 required.")
-if sys.version_info[0] >= 3:
- import builtins
-else:
- import __builtin__ as builtins
+import builtins
CLASSIFIERS = """\
@@ -43,13 +40,11 @@ Intended Audience :: Developers
License :: OSI Approved
Programming Language :: C
Programming Language :: Python
-Programming Language :: Python :: 2
-Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
-Programming Language :: Python :: 3.4
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
+Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: Implementation :: CPython
Topic :: Software Development
Topic :: Scientific/Engineering
@@ -60,7 +55,7 @@ Operating System :: MacOS
"""
MAJOR = 1
-MINOR = 16
+MINOR = 18
MICRO = 0
ISRELEASED = False
VERSION = '%d.%d.%d' % (MAJOR, MINOR, MICRO)
@@ -79,13 +74,17 @@ def git_version():
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
- out = subprocess.Popen(cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
+ out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, env=env)
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
GIT_REVISION = out.strip().decode('ascii')
- except OSError:
+ except (subprocess.SubprocessError, OSError):
+ GIT_REVISION = "Unknown"
+
+ if not GIT_REVISION:
+ # this shouldn't happen but apparently can (see gh-8512)
GIT_REVISION = "Unknown"
return GIT_REVISION
@@ -114,8 +113,8 @@ def get_version_info():
try:
from numpy.version import git_revision as GIT_REVISION
except ImportError:
- raise ImportError("Unable to import git_revision. Try removing " \
- "numpy/version.py and the build directory " \
+ raise ImportError("Unable to import git_revision. Try removing "
+ "numpy/version.py and the build directory "
"before building.")
else:
GIT_REVISION = "Unknown"
@@ -192,23 +191,52 @@ def check_submodules():
raise ValueError('Submodule not clean: %s' % line)
+class concat_license_files():
+ """Merge LICENSE.txt and LICENSES_bundled.txt for sdist creation
+
+ Done this way to keep LICENSE.txt in repo as exact BSD 3-clause (see
+ gh-13447). This makes GitHub state correctly how NumPy is licensed.
+ """
+ def __init__(self):
+ self.f1 = 'LICENSE.txt'
+ self.f2 = 'LICENSES_bundled.txt'
+
+ def __enter__(self):
+ """Concatenate files and remove LICENSES_bundled.txt"""
+ with open(self.f1, 'r') as f1:
+ self.bsd_text = f1.read()
+
+ with open(self.f1, 'a') as f1:
+ with open(self.f2, 'r') as f2:
+ self.bundled_text = f2.read()
+ f1.write('\n\n')
+ f1.write(self.bundled_text)
+
+ def __exit__(self, exception_type, exception_value, traceback):
+ """Restore content of both files"""
+ with open(self.f1, 'w') as f:
+ f.write(self.bsd_text)
+
+
from distutils.command.sdist import sdist
class sdist_checked(sdist):
""" check submodules on sdist to prevent incomplete tarballs """
def run(self):
check_submodules()
- sdist.run(self)
+ with concat_license_files():
+ sdist.run(self)
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
- p = subprocess.call([sys.executable,
- os.path.join(cwd, 'tools', 'cythonize.py'),
- 'numpy/random'],
- cwd=cwd)
- if p != 0:
- raise RuntimeError("Running cythonize failed!")
+ for d in ('random',):
+ p = subprocess.call([sys.executable,
+ os.path.join(cwd, 'tools', 'cythonize.py'),
+ 'numpy/{0}'.format(d)],
+ cwd=cwd)
+ if p != 0:
+ raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
@@ -239,7 +267,7 @@ def parse_setuppy_commands():
# below and not standalone. Hence they're not added to good_commands.
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
- 'bdist_wininst', 'bdist_msi', 'bdist_mpkg')
+ 'bdist_wininst', 'bdist_msi', 'bdist_mpkg', 'build_src')
for command in good_commands:
if command in args:
@@ -341,7 +369,7 @@ def parse_setuppy_commands():
def setup_package():
- src_path = os.path.dirname(os.path.abspath(sys.argv[0]))
+ src_path = os.path.dirname(os.path.abspath(__file__))
old_path = os.getcwd()
os.chdir(src_path)
sys.path.insert(0, src_path)
@@ -370,12 +398,18 @@ def setup_package():
url = "https://www.numpy.org",
author = "Travis E. Oliphant et al.",
download_url = "https://pypi.python.org/pypi/numpy",
+ project_urls={
+ "Bug Tracker": "https://github.com/numpy/numpy/issues",
+ "Documentation": "https://docs.scipy.org/doc/numpy/",
+ "Source Code": "https://github.com/numpy/numpy",
+ },
license = 'BSD',
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"],
test_suite='nose.collector',
- cmdclass={"sdist": sdist_checked},
- python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*',
+ cmdclass={"sdist": sdist_checked,
+ },
+ python_requires='>=3.5',
zip_safe=False,
entry_points={
'console_scripts': f2py_cmds
@@ -393,8 +427,8 @@ def setup_package():
if run_build:
from numpy.distutils.core import setup
cwd = os.path.abspath(os.path.dirname(__file__))
- if not os.path.exists(os.path.join(cwd, 'PKG-INFO')):
- # Generate Cython sources, unless building from source release
+ if not 'sdist' in sys.argv:
+ # Generate Cython sources, unless we're generating an sdist
generate_cython()
metadata['configuration'] = configuration
diff --git a/shippable.yml b/shippable.yml
index 6a92c0f34..af3cfaa04 100644
--- a/shippable.yml
+++ b/shippable.yml
@@ -1,17 +1,17 @@
branches:
only:
- master
+ - maintenance/*
language: python
python:
# use versions available for job image
- # aarch64_u16pytall:v6.7.4
+ # aarch64_u16pytall:v6.7.4
# (what we currently have access to by default)
# this is a bit restrictive in terms
# of version availability / control,
# but it is convenient
- - 2.7
- 3.7
runtime:
@@ -22,16 +22,16 @@ runtime:
build:
ci:
# install dependencies
- - sudo apt-get install gcc gfortran libblas-dev liblapack-dev
- # add pathlib for Python 2, otherwise many tests are skipped
+ - sudo apt-get install gcc gfortran
+ - target=$(python tools/openblas_support.py)
+ - sudo cp -r "${target}"/64/lib/* /usr/lib
+ - sudo cp "${target}"/64/include/* /usr/include
- pip install --upgrade pip
+
# we will pay the ~13 minute cost of compiling Cython only when a new
# version is scraped in by pip; otherwise, use the cached
# wheel shippable places on Amazon S3 after we build it once
- - pip install cython --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
- - pip install pathlib
- # install pytz for datetime testing
- - pip install pytz
+ - pip install -r test_requirements.txt --cache-dir=/root/.cache/pip/wheels/$SHIPPABLE_PYTHON_VERSION
# install pytest-xdist to leverage a second core
# for unit tests
- pip install pytest-xdist
@@ -41,18 +41,20 @@ build:
# build first and adjust PATH so f2py is found in scripts dir
# use > 1 core for build sometimes slows down a fair bit,
# other times modestly speeds up, so avoid for now
- - python setup.py install
+ - pip install .
- extra_directories=($SHIPPABLE_REPO_DIR/build/*scripts*)
- extra_path=$(printf "%s:" "${extra_directories[@]}")
- export PATH="${extra_path}${PATH}"
+ # check OpenBLAS version
+ - python tools/openblas_support.py --check_version 0.3.7
# run the test suite
- - python runtests.py -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
+ - python runtests.py --debug-info --show-build-log -- -rsx --junit-xml=$SHIPPABLE_REPO_DIR/shippable/testresults/tests.xml -n 2 --durations=10
cache: true
cache_dir_list:
# the NumPy project uses a single Amazon S3 cache
# so upload the parent path of the Python-specific
- # version paths to avoid i.e., 2.7 overwriting
+ # version paths to avoid i.e., 3.6 overwriting
# 3.7 pip cache (seems to be an issue)
- /root/.cache/pip/wheels
diff --git a/site.cfg.example b/site.cfg.example
index 48b17fbdf..b6b0175d6 100644
--- a/site.cfg.example
+++ b/site.cfg.example
@@ -4,11 +4,11 @@
# packages will use all sections so you should leave out sections that your
# package does not use.
-# To assist automatic installation like easy_install, the user's home directory
+# To assist automatic installation like pip, the user's home directory
# will also be checked for the file ~/.numpy-site.cfg .
# The format of the file is that of the standard library's ConfigParser module.
-# No interpolation is allowed, RawConfigParser class being used to load it.
+# No interpolation is allowed; the RawConfigParser class is being used to load it.
#
# https://docs.python.org/library/configparser.html
#
@@ -21,6 +21,7 @@
# with. Note that these should be just the names, not the filenames. For
# example, the file "libfoo.so" would become simply "foo".
# libraries = lapack,f77blas,cblas,atlas
+# This setting is available for *all* sections.
#
# library_dirs
# List of directories to add to the library search path when compiling
@@ -46,7 +47,7 @@
# LAPACK libraries are one example. However, most dependencies are more
# complicated and require actual installation that you need to do
# yourself.
-# src_dirs = /home/rkern/src/BLAS_SRC:/home/rkern/src/LAPACK_SRC
+# src_dirs = /home/username/src/BLAS_SRC:/home/username/src/LAPACK_SRC
#
# search_static_first
# Boolean (one of (0, false, no, off) for False or (1, true, yes, on) for
@@ -64,14 +65,14 @@
#
# extra_compile_args
# Add additional arguments to the compilation of sources.
-# Simple variable with no parsing done.
+# Split into arguments in a platform-appropriate way.
# Provide a single line with all complete flags.
# extra_compile_args = -g -ftree-vectorize
#
# extra_link_args
# Add additional arguments when libraries/executables
# are linked.
-# Simple variable with no parsing done.
+# Split into arguments in a platform-appropriate way.
# Provide a single line with all complete flags.
# extra_link_args = -lgfortran
#
@@ -87,13 +88,13 @@
#include_dirs = /usr/local/include
#
-# Atlas
+# ATLAS
# -----
-# Atlas is an open source optimized implementation of the BLAS and Lapack
-# routines. NumPy will try to build against Atlas by default when available in
-# the system library dirs. To build numpy against a custom installation of
-# Atlas you can add an explicit section such as the following. Here we assume
-# that Atlas was configured with ``prefix=/opt/atlas``.
+# ATLAS is an open source optimized implementation of the BLAS and LAPACK
+# routines. NumPy will try to build against ATLAS by default when available in
+# the system library dirs. To build NumPy against a custom installation of
+# ATLAS you can add an explicit section such as the following. Here we assume
+# that ATLAS was configured with ``prefix=/opt/atlas``.
#
# [atlas]
# library_dirs = /opt/atlas/lib
@@ -101,9 +102,9 @@
# OpenBLAS
# --------
-# OpenBLAS is another open source optimized implementation of BLAS and Lapack
-# and can be seen as an alternative to Atlas. To build numpy against OpenBLAS
-# instead of Atlas, use this section instead of the above, adjusting as needed
+# OpenBLAS is another open source optimized implementation of BLAS and LAPACK
+# and can be seen as an alternative to ATLAS. To build NumPy against OpenBLAS
+# instead of ATLAS, use this section instead of the above, adjusting as needed
# for your configuration (in the following example we installed OpenBLAS with
# ``make install PREFIX=/opt/OpenBLAS``.
# OpenBLAS is generically installed as a shared library, to force the OpenBLAS
@@ -114,7 +115,7 @@
# way Python's multiprocessing is implemented, a multithreaded OpenBLAS can
# cause programs using both to hang as soon as a worker process is forked on
# POSIX systems (Linux, Mac).
-# This is fixed in Openblas 0.2.9 for the pthread build, the OpenMP build using
+# This is fixed in OpenBLAS 0.2.9 for the pthread build, the OpenMP build using
# GNU openmp is as of gcc-4.9 not fixed yet.
# Python 3.4 will introduce a new feature in multiprocessing, called the
# "forkserver", which solves this problem. For older versions, make sure
@@ -135,7 +136,7 @@
# ----
# BLIS (https://github.com/flame/blis) also provides a BLAS interface. It's a
# relatively new library, its performance in some cases seems to match that of
-# MKL and OpenBLAS, but it hasn't been benchmarked with NumPy or Scipy yet.
+# MKL and OpenBLAS, but it hasn't been benchmarked with NumPy or SciPy yet.
#
# Notes on compiling BLIS itself:
# - the CBLAS interface (needed by NumPy) isn't built by default; define
@@ -152,37 +153,50 @@
# include_dirs = /home/username/blis/include/blis
# runtime_library_dirs = /home/username/blis/lib
+# libFLAME
+# --------
+# libFLAME (https://www.cs.utexas.edu/~flame/web/libFLAME.html) provides a
+# LAPACK interface. It's a relatively new library, its performance in some
+# cases seems to match that of MKL and OpenBLAS.
+# It hasn't been benchmarked with NumPy or SciPy yet.
+#
+# Notes on compiling libFLAME itself:
+# - the LAPACK interface (needed by NumPy) isn't built by default; please
+# configure with ``./configure --enable-lapack2flame``.
+#
+# [flame]
+# libraries = flame
+# library_dirs = /home/username/flame/lib
+# runtime_library_dirs = /home/username/flame/lib
+
# MKL
#----
# Intel MKL is Intel's very optimized yet proprietary implementation of BLAS and
-# Lapack. Find the latest info on building numpy with Intel MKL in this article:
+# LAPACK. Find the latest info on building NumPy with Intel MKL in this article:
# https://software.intel.com/en-us/articles/numpyscipy-with-intel-mkl
# Assuming you installed the mkl in /opt/intel/compilers_and_libraries_2018/linux/mkl,
# for 64 bits code at Linux:
# [mkl]
# library_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/lib/intel64
# include_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/include
-# mkl_libs = mkl_rt
-# lapack_libs = 
+# libraries = mkl_rt
#
# For 32 bit code at Linux:
# [mkl]
# library_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/lib/ia32
# include_dirs = /opt/intel/compilers_and_libraries_2018/linux/mkl/include
-# mkl_libs = mkl_rt
-# lapack_libs = 
+# libraries = mkl_rt
#
-# On win-64, the following options compiles numpy with the MKL library
+# On win-64, the following options compiles NumPy with the MKL library
# dynamically linked.
# [mkl]
# include_dirs = C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\mkl\include
# library_dirs = C:\Program Files (x86)\IntelSWTools\compilers_and_libraries_2018\windows\mkl\lib\intel64
-# mkl_libs = mkl_rt
-# lapack_libs =
+# libraries = mkl_rt
-# ACCELERATE
+# Accelerate
# ----------
-# Accelerate/vecLib is an OSX framework providing a BLAS and LAPACK implementations.
+# Accelerate/vecLib is an OSX framework providing a BLAS and LAPACK implementation.
#
# [accelerate]
# libraries = Accelerate, vecLib
@@ -195,22 +209,22 @@
# better performance. Note that the AMD library has nothing to do with AMD
# (Advanced Micro Devices), the CPU company.
#
-# UMFPACK is not used by numpy.
+# UMFPACK is not used by NumPy.
#
# https://www.cise.ufl.edu/research/sparse/umfpack/
# https://www.cise.ufl.edu/research/sparse/amd/
# https://scikit-umfpack.github.io/scikit-umfpack/
#
#[amd]
-#amd_libs = amd
+#libraries = amd
#
#[umfpack]
-#umfpack_libs = umfpack
+#libraries = umfpack
# FFT libraries
# -------------
# There are two FFT libraries that we can configure here: FFTW (2 and 3) and djbfft.
-# Note that these libraries are not used by for numpy or scipy.
+# Note that these libraries are not used by NumPy or SciPy.
#
# http://fftw.org/
# https://cr.yp.to/djbfft.html
diff --git a/test_requirements.txt b/test_requirements.txt
new file mode 100644
index 000000000..ea2a4bfbf
--- /dev/null
+++ b/test_requirements.txt
@@ -0,0 +1,7 @@
+cython==0.29.13
+pytest==5.2.1
+pytz==2019.3
+pytest-cov==2.8.1
+pickle5; python_version == '3.7'
+pickle5; python_version == '3.6' and platform_python_implementation != 'PyPy'
+nose
diff --git a/tools/changelog.py b/tools/changelog.py
index 84e046c5f..b135b14e5 100755
--- a/tools/changelog.py
+++ b/tools/changelog.py
@@ -42,8 +42,10 @@ import codecs
from git import Repo
from github import Github
-UTF8Writer = codecs.getwriter('utf8')
-sys.stdout = UTF8Writer(sys.stdout)
+if sys.version_info.major < 3:
+ UTF8Writer = codecs.getwriter('utf8')
+ sys.stdout = UTF8Writer(sys.stdout)
+
this_repo = Repo(os.path.join(os.path.dirname(__file__), ".."))
author_msg =\
diff --git a/tools/ci/appveyor/requirements.txt b/tools/ci/appveyor/requirements.txt
deleted file mode 100644
index fba8260da..000000000
--- a/tools/ci/appveyor/requirements.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-cython
-nose
-pytest-timeout
-pytest-xdist
-pytest-env
-pytest-faulthandler
diff --git a/tools/ci/test_all_newsfragments_used.py b/tools/ci/test_all_newsfragments_used.py
new file mode 100755
index 000000000..6c4591fd8
--- /dev/null
+++ b/tools/ci/test_all_newsfragments_used.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+import sys
+import toml
+import os
+
+path = toml.load("pyproject.toml")["tool"]["towncrier"]["directory"]
+
+fragments = os.listdir(path)
+fragments.remove("README.rst")
+fragments.remove("template.rst")
+
+if fragments:
+ print("The following files were not found by towncrier:")
+ print(" " + " \n".join(fragments))
+ sys.exit(1)
diff --git a/tools/cythonize.py b/tools/cythonize.py
index 9e2af840d..5bea2d4ec 100755
--- a/tools/cythonize.py
+++ b/tools/cythonize.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python
+#!/usr/bin/env python3
""" cythonize
Cythonize pyx files into C files as needed.
@@ -52,32 +52,27 @@ except NameError:
# Rules
#
def process_pyx(fromfile, tofile):
- flags = ['--fast-fail']
+ flags = ['-3', '--fast-fail']
if tofile.endswith('.cxx'):
- flags += ['--cplus']
+ flags.append('--cplus')
try:
# try the cython in the installed python first (somewhat related to scipy/scipy#2397)
from Cython.Compiler.Version import version as cython_version
except ImportError:
- # if that fails, use the one on the path, which might be the wrong version
- try:
- # Try the one on the path as a last resort
- subprocess.check_call(
- ['cython'] + flags + ["-o", tofile, fromfile])
- except OSError:
- raise OSError('Cython needs to be installed')
+ # The `cython` command need not point to the version installed in the
+ # Python running this script, so raise an error to avoid the chance of
+ # using the wrong version of Cython.
+ raise OSError('Cython needs to be installed in Python as a module')
else:
# check the version, and invoke through python
from distutils.version import LooseVersion
- # requiring the newest version on all pythons doesn't work, since
- # we're relying on the version of the distribution cython. Add new
- # versions as they become required for new python versions.
- if sys.version_info[:2] < (3, 7):
- required_version = LooseVersion('0.19')
- else:
- required_version = LooseVersion('0.28')
+ # Cython 0.29.13 is required for Python 3.8 and there are
+ # other fixes in the 0.29 series that are needed even for earlier
+ # Python versions.
+ # Note: keep in sync with that in pyproject.toml
+ required_version = LooseVersion('0.29.13')
if LooseVersion(cython_version) < required_version:
raise RuntimeError('Building {} requires Cython >= {}'.format(
@@ -99,6 +94,17 @@ def process_tempita_pyx(fromfile, tofile):
process_pyx(pyxfile, tofile)
+def process_tempita_pyd(fromfile, tofile):
+ import npy_tempita as tempita
+
+ assert fromfile.endswith('.pxd.in')
+ assert tofile.endswith('.pxd')
+ with open(fromfile, "r") as f:
+ tmpl = f.read()
+ pyxcontent = tempita.sub(tmpl)
+ with open(tofile, "w") as f:
+ f.write(pyxcontent)
+
def process_tempita_pxi(fromfile, tofile):
import npy_tempita as tempita
@@ -110,10 +116,24 @@ def process_tempita_pxi(fromfile, tofile):
with open(tofile, "w") as f:
f.write(pyxcontent)
+def process_tempita_pxd(fromfile, tofile):
+ import npy_tempita as tempita
+
+ assert fromfile.endswith('.pxd.in')
+ assert tofile.endswith('.pxd')
+ with open(fromfile, "r") as f:
+ tmpl = f.read()
+ pyxcontent = tempita.sub(tmpl)
+ with open(tofile, "w") as f:
+ f.write(pyxcontent)
+
rules = {
- # fromext : function
- '.pyx' : process_pyx,
- '.pyx.in' : process_tempita_pyx
+ # fromext : function, toext
+ '.pyx' : (process_pyx, '.c'),
+ '.pyx.in' : (process_tempita_pyx, '.c'),
+ '.pxi.in' : (process_tempita_pxi, '.pxi'),
+ '.pxd.in' : (process_tempita_pxd, '.pxd'),
+ '.pyd.in' : (process_tempita_pyd, '.pyd'),
}
#
# Hash db
@@ -179,38 +199,32 @@ def process(path, fromfile, tofile, processor_function, hash_db):
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
- for cur_dir, dirs, files in os.walk(root_dir):
- # .pxi or .pxi.in files are most likely dependencies for
- # .pyx files, so we need to process them first
- files.sort(key=lambda name: (name.endswith('.pxi') or
- name.endswith('.pxi.in')),
- reverse=True)
-
- for filename in files:
- in_file = os.path.join(cur_dir, filename + ".in")
- if filename.endswith('.pyx') and os.path.isfile(in_file):
- continue
- elif filename.endswith('.pxi.in'):
- toext = '.pxi'
- fromext = '.pxi.in'
+ files = [x for x in os.listdir(root_dir) if not os.path.isdir(x)]
+ # .pxi or .pxi.in files are most likely dependencies for
+ # .pyx files, so we need to process them first
+ files.sort(key=lambda name: (name.endswith('.pxi') or
+ name.endswith('.pxi.in') or
+ name.endswith('.pxd.in')),
+ reverse=True)
+
+ for filename in files:
+ in_file = os.path.join(root_dir, filename + ".in")
+ for fromext, value in rules.items():
+ if filename.endswith(fromext):
+ if not value:
+ break
+ function, toext = value
+ if toext == '.c':
+ with open(os.path.join(root_dir, filename), 'rb') as f:
+ data = f.read()
+ m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
+ if m:
+ toext = ".cxx"
fromfile = filename
- function = process_tempita_pxi
tofile = filename[:-len(fromext)] + toext
- process(cur_dir, fromfile, tofile, function, hash_db)
+ process(root_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
- else:
- for fromext, function in rules.items():
- if filename.endswith(fromext):
- toext = ".c"
- with open(os.path.join(cur_dir, filename), 'rb') as f:
- data = f.read()
- m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
- if m:
- toext = ".cxx"
- fromfile = filename
- tofile = filename[:-len(fromext)] + toext
- process(cur_dir, fromfile, tofile, function, hash_db)
- save_hashes(hash_db, HASH_FILE)
+ break
def main():
try:
diff --git a/tools/npy_tempita/__init__.py b/tools/npy_tempita/__init__.py
index dfb40e965..f75f23a21 100644
--- a/tools/npy_tempita/__init__.py
+++ b/tools/npy_tempita/__init__.py
@@ -105,21 +105,21 @@ class Template(object):
def __init__(self, content, name=None, namespace=None, stacklevel=None,
get_template=None, default_inherit=None, line_offset=0,
- delimeters=None):
+ delimiters=None):
self.content = content
- # set delimeters
- if delimeters is None:
- delimeters = (self.default_namespace['start_braces'],
+ # set delimiters
+ if delimiters is None:
+ delimiters = (self.default_namespace['start_braces'],
self.default_namespace['end_braces'])
else:
- assert len(delimeters) == 2 and all(
- [isinstance(delimeter, basestring_)
- for delimeter in delimeters])
+ assert len(delimiters) == 2 and all(
+ [isinstance(delimiter, basestring_)
+ for delimiter in delimiters])
self.default_namespace = self.__class__.default_namespace.copy()
- self.default_namespace['start_braces'] = delimeters[0]
- self.default_namespace['end_braces'] = delimeters[1]
- self.delimeters = delimeters
+ self.default_namespace['start_braces'] = delimiters[0]
+ self.default_namespace['end_braces'] = delimiters[1]
+ self.delimiters = delimiters
self._unicode = is_unicode(content)
if name is None and stacklevel is not None:
@@ -143,7 +143,7 @@ class Template(object):
self.name = name
self._parsed = parse(
content, name=name, line_offset=line_offset,
- delimeters=self.delimeters)
+ delimiters=self.delimiters)
if namespace is None:
namespace = {}
self.namespace = namespace
@@ -392,9 +392,9 @@ class Template(object):
return msg
-def sub(content, delimeters=None, **kw):
+def sub(content, delimiters=None, **kw):
name = kw.get('__name')
- tmpl = Template(content, name=name, delimeters=delimeters)
+ tmpl = Template(content, name=name, delimiters=delimiters)
return tmpl.substitute(kw)
@@ -652,28 +652,28 @@ del _Empty
############################################################
-def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
- if delimeters is None:
- delimeters = (Template.default_namespace['start_braces'],
+def lex(s, name=None, trim_whitespace=True, line_offset=0, delimiters=None):
+ if delimiters is None:
+ delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
in_expr = False
chunks = []
last = 0
last_pos = (line_offset + 1, 1)
- token_re = re.compile(r'%s|%s' % (re.escape(delimeters[0]),
- re.escape(delimeters[1])))
+ token_re = re.compile(r'%s|%s' % (re.escape(delimiters[0]),
+ re.escape(delimiters[1])))
for match in token_re.finditer(s):
expr = match.group(0)
pos = find_position(s, match.end(), last, last_pos)
- if expr == delimeters[0] and in_expr:
- raise TemplateError('%s inside expression' % delimeters[0],
+ if expr == delimiters[0] and in_expr:
+ raise TemplateError('%s inside expression' % delimiters[0],
position=pos,
name=name)
- elif expr == delimeters[1] and not in_expr:
- raise TemplateError('%s outside expression' % delimeters[1],
+ elif expr == delimiters[1] and not in_expr:
+ raise TemplateError('%s outside expression' % delimiters[1],
position=pos,
name=name)
- if expr == delimeters[0]:
+ if expr == delimiters[0]:
part = s[last:match.start()]
if part:
chunks.append(part)
@@ -684,7 +684,7 @@ def lex(s, name=None, trim_whitespace=True, line_offset=0, delimeters=None):
last = match.end()
last_pos = pos
if in_expr:
- raise TemplateError('No %s to finish last expression' % delimeters[1],
+ raise TemplateError('No %s to finish last expression' % delimiters[1],
name=name, position=last_pos)
part = s[last:]
if part:
@@ -822,12 +822,12 @@ def find_position(string, index, last_index, last_pos):
return (last_pos[0] + lines, column)
-def parse(s, name=None, line_offset=0, delimeters=None):
+def parse(s, name=None, line_offset=0, delimiters=None):
- if delimeters is None:
- delimeters = (Template.default_namespace['start_braces'],
+ if delimiters is None:
+ delimiters = (Template.default_namespace['start_braces'],
Template.default_namespace['end_braces'])
- tokens = lex(s, name=name, line_offset=line_offset, delimeters=delimeters)
+ tokens = lex(s, name=name, line_offset=line_offset, delimiters=delimiters)
result = []
while tokens:
next_chunk, tokens = parse_expr(tokens, name)
diff --git a/tools/npy_tempita/_looper.py b/tools/npy_tempita/_looper.py
index dcb206642..047bf5292 100644
--- a/tools/npy_tempita/_looper.py
+++ b/tools/npy_tempita/_looper.py
@@ -77,53 +77,53 @@ class loop_pos(object):
return '<loop pos=%r at %r>' % (
self.seq[self.pos], self.pos)
+ @property
def index(self):
return self.pos
- index = property(index)
+ @property
def number(self):
return self.pos + 1
- number = property(number)
+ @property
def item(self):
return self.seq[self.pos]
- item = property(item)
+ @property
def __next__(self):
try:
return self.seq[self.pos + 1]
except IndexError:
return None
- __next__ = property(__next__)
if sys.version < "3":
next = __next__
+ @property
def previous(self):
if self.pos == 0:
return None
return self.seq[self.pos - 1]
- previous = property(previous)
+ @property
def odd(self):
return not self.pos % 2
- odd = property(odd)
+ @property
def even(self):
return self.pos % 2
- even = property(even)
+ @property
def first(self):
return self.pos == 0
- first = property(first)
+ @property
def last(self):
return self.pos == len(self.seq) - 1
- last = property(last)
+ @property
def length(self):
return len(self.seq)
- length = property(length)
def first_group(self, getter=None):
"""
diff --git a/tools/npy_tempita/compat3.py b/tools/npy_tempita/compat3.py
index eb890ca14..01d771345 100644
--- a/tools/npy_tempita/compat3.py
+++ b/tools/npy_tempita/compat3.py
@@ -5,7 +5,7 @@ import sys
__all__ = ['PY3', 'b', 'basestring_', 'bytes', 'next', 'is_unicode',
'iteritems']
-PY3 = True if sys.version_info[0] == 3 else False
+PY3 = True if sys.version_info[0] >= 3 else False
if sys.version_info[0] < 3:
diff --git a/tools/openblas_support.py b/tools/openblas_support.py
new file mode 100644
index 000000000..964adce6e
--- /dev/null
+++ b/tools/openblas_support.py
@@ -0,0 +1,229 @@
+from __future__ import division, absolute_import, print_function
+import os
+import sys
+import textwrap
+import platform
+try:
+ from urllib.request import urlopen
+ from urllib.error import HTTPError
+except:
+ #Python2
+ from urllib2 import urlopen, HTTPError
+
+from tempfile import mkstemp, gettempdir
+import zipfile
+import tarfile
+
+OPENBLAS_V = 'v0.3.7'
+OPENBLAS_LONG = 'v0.3.7'
+BASE_LOC = ''
+RACKSPACE = 'https://3f23b170c54c2533c070-1c8a9b3114517dc5fe17b7c3f8c63a43.ssl.cf2.rackcdn.com'
+ARCHITECTURES = ['', 'windows', 'darwin', 'arm', 'x86', 'ppc64']
+
+IS_32BIT = sys.maxsize < 2**32
+def get_arch():
+ if platform.system() == 'Windows':
+ ret = 'windows'
+ elif platform.system() == 'Darwin':
+ ret = 'darwin'
+ # Python3 returns a named tuple, but Python2 does not, so we are stuck
+ elif 'arm' in os.uname()[-1]:
+ ret = 'arm';
+ elif 'aarch64' in os.uname()[-1]:
+ ret = 'arm';
+ elif 'x86' in os.uname()[-1]:
+ ret = 'x86'
+ elif 'ppc64' in os.uname()[-1]:
+ ret = 'ppc64'
+ else:
+ ret = ''
+ assert ret in ARCHITECTURES
+ return ret
+
+def download_openblas(target, arch):
+ filename = ''
+ if arch == 'arm':
+ # ARMv8 OpenBLAS built using script available here:
+ # https://github.com/tylerjereddy/openblas-static-gcc/tree/master/ARMv8
+ # build done on GCC compile farm machine named gcc115
+ # tarball uploaded manually to an unshared Dropbox location
+ filename = ('https://www.dropbox.com/s/vdeckao4omss187/'
+ 'openblas-{}-armv8.tar.gz?dl=1'.format(OPENBLAS_V))
+ typ = 'tar.gz'
+ elif arch == 'ppc64':
+ # build script for POWER8 OpenBLAS available here:
+ # https://github.com/tylerjereddy/openblas-static-gcc/blob/master/power8
+ # built on GCC compile farm machine named gcc112
+ # manually uploaded tarball to an unshared Dropbox location
+ filename = ('https://www.dropbox.com/s/yt0d2j86x1j8nh1/'
+ 'openblas-{}-ppc64le-power8.tar.gz?dl=1'.format(OPENBLAS_V))
+ typ = 'tar.gz'
+ elif arch == 'darwin':
+ filename = '{0}/openblas-{1}-macosx_10_9_x86_64-gf_1becaaa.tar.gz'.format(
+ RACKSPACE, OPENBLAS_LONG)
+ typ = 'tar.gz'
+ elif arch == 'windows':
+ if IS_32BIT:
+ suffix = 'win32-gcc_7_1_0.zip'
+ else:
+ suffix = 'win_amd64-gcc_7_1_0.zip'
+ filename = '{0}/openblas-{1}-{2}'.format(RACKSPACE, OPENBLAS_LONG, suffix)
+ typ = 'zip'
+ elif arch == 'x86':
+ if IS_32BIT:
+ suffix = 'manylinux1_i686.tar.gz'
+ else:
+ suffix = 'manylinux1_x86_64.tar.gz'
+ filename = '{0}/openblas-{1}-{2}'.format(RACKSPACE, OPENBLAS_LONG, suffix)
+ typ = 'tar.gz'
+ if not filename:
+ return None
+ try:
+ with open(target, 'wb') as fid:
+ fid.write(urlopen(filename).read())
+ except HTTPError:
+ print('Could not download "%s"' % filename)
+ return None
+ return typ
+
+def setup_openblas(arch=get_arch()):
+ '''
+ Download and setup an openblas library for building. If successful,
+ the configuration script will find it automatically.
+
+ Returns
+ -------
+ msg : str
+ path to extracted files on success, otherwise indicates what went wrong
+ To determine success, do ``os.path.exists(msg)``
+ '''
+ _, tmp = mkstemp()
+ if not arch:
+ raise ValueError('unknown architecture')
+ typ = download_openblas(tmp, arch)
+ if not typ:
+ return ''
+ if arch == 'windows':
+ if not typ == 'zip':
+ return 'expecting to download zipfile on windows, not %s' % str(typ)
+ return unpack_windows_zip(tmp)
+ else:
+ if not typ == 'tar.gz':
+ return 'expecting to download tar.gz, not %s' % str(typ)
+ return unpack_targz(tmp)
+
+def unpack_windows_zip(fname):
+ import sysconfig
+ with zipfile.ZipFile(fname, 'r') as zf:
+ # Get the openblas.a file, but not openblas.dll.a nor openblas.dev.a
+ lib = [x for x in zf.namelist() if OPENBLAS_LONG in x and
+ x.endswith('a') and not x.endswith('dll.a') and
+ not x.endswith('dev.a')]
+ if not lib:
+ return 'could not find libopenblas_%s*.a ' \
+ 'in downloaded zipfile' % OPENBLAS_LONG
+ target = os.path.join(gettempdir(), 'openblas.a')
+ with open(target, 'wb') as fid:
+ fid.write(zf.read(lib[0]))
+ return target
+
+def unpack_targz(fname):
+ target = os.path.join(gettempdir(), 'openblas')
+ if not os.path.exists(target):
+ os.mkdir(target)
+ with tarfile.open(fname, 'r') as zf:
+ # TODO: check that all the zf.getnames() files do not escape the
+ # extract directory (no leading '../', '/')
+ zf.extractall(target)
+ return target
+
+def make_init(dirname):
+ '''
+ Create a _distributor_init.py file for OpenBlas
+ '''
+ with open(os.path.join(dirname, '_distributor_init.py'), 'wt') as fid:
+ fid.write(textwrap.dedent("""
+ '''
+ Helper to preload windows dlls to prevent dll not found errors.
+ Once a DLL is preloaded, its namespace is made available to any
+ subsequent DLL. This file originated in the numpy-wheels repo,
+ and is created as part of the scripts that build the wheel.
+ '''
+ import os
+ from ctypes import WinDLL
+ import glob
+ if os.name == 'nt':
+ # convention for storing / loading the DLL from
+ # numpy/.libs/, if present
+ try:
+ basedir = os.path.dirname(__file__)
+ except:
+ pass
+ else:
+ libs_dir = os.path.abspath(os.path.join(basedir, '.libs'))
+ DLL_filenames = []
+ if os.path.isdir(libs_dir):
+ for filename in glob.glob(os.path.join(libs_dir,
+ '*openblas*dll')):
+ # NOTE: would it change behavior to load ALL
+ # DLLs at this path vs. the name restriction?
+ WinDLL(os.path.abspath(filename))
+ DLL_filenames.append(filename)
+ if len(DLL_filenames) > 1:
+ import warnings
+ warnings.warn("loaded more than 1 DLL from .libs:\\n%s" %
+ "\\n".join(DLL_filenames),
+ stacklevel=1)
+ """))
+
+def test_setup(arches):
+ '''
+ Make sure all the downloadable files exist and can be opened
+ '''
+ for arch in arches:
+ if arch == '':
+ continue
+ try:
+ target = setup_openblas(arch)
+ except:
+ print('Could not setup %s' % arch)
+ raise
+ if not target:
+ raise RuntimeError('Could not setup %s' % arch)
+ print(target)
+
+def test_version(expected_version):
+ """
+ Assert that expected OpenBLAS version is
+ actually available via NumPy
+ """
+ import numpy
+ import ctypes
+
+ dll = ctypes.CDLL(numpy.core._multiarray_umath.__file__)
+ get_config = dll.openblas_get_config
+ get_config.restype=ctypes.c_char_p
+ res = get_config()
+ print('OpenBLAS get_config returned', str(res))
+ check_str = b'OpenBLAS %s' % expected_version[0].encode()
+ assert check_str in res
+
+if __name__ == '__main__':
+ import argparse
+ parser = argparse.ArgumentParser(
+ description='Download and expand an OpenBLAS archive for this ' \
+ 'architecture')
+ parser.add_argument('--test', nargs='*', default=None,
+ help='Test different architectures. "all", or any of %s' % ARCHITECTURES)
+ parser.add_argument('--check_version', nargs=1, default=None,
+ help='Check provided OpenBLAS version string against available OpenBLAS')
+ args = parser.parse_args()
+ if args.check_version is not None:
+ test_version(args.check_version)
+ elif args.test is None:
+ print(setup_openblas())
+ else:
+ if len(args.test) == 0 or 'all' in args.test:
+ test_setup(ARCHITECTURES)
+ else:
+ test_setup(args.test)
diff --git a/tools/pypy-test.sh b/tools/pypy-test.sh
new file mode 100755
index 000000000..f4d56ba1a
--- /dev/null
+++ b/tools/pypy-test.sh
@@ -0,0 +1,48 @@
+#!/usr/bin/env bash
+
+# Exit if a command fails
+set -e
+set -o pipefail
+# Print expanded commands
+set -x
+
+sudo apt-get -yq update
+sudo apt-get -yq install libatlas-base-dev liblapack-dev gfortran-5
+F77=gfortran-5 F90=gfortran-5 \
+
+# Download the proper OpenBLAS x64 precompiled library
+target=$(python tools/openblas_support.py)
+echo getting OpenBLAS into $target
+export LD_LIBRARY_PATH=$target/usr/local/lib
+export LIB=$target/usr/local/lib
+export INCLUDE=$target/usr/local/include
+
+# Use a site.cfg to build with local openblas
+cat << EOF > site.cfg
+[openblas]
+libraries = openblas
+library_dirs = $target/usr/local/lib:$LIB
+include_dirs = $target/usr/local/lib:$LIB
+runtime_library_dirs = $target/usr/local/lib
+EOF
+
+echo getting PyPy 3.6 nightly
+wget -q http://buildbot.pypy.org/nightly/py3.6/pypy-c-jit-latest-linux64.tar.bz2 -O pypy.tar.bz2
+mkdir -p pypy3
+(cd pypy3; tar --strip-components=1 -xf ../pypy.tar.bz2)
+pypy3/bin/pypy3 -mensurepip
+pypy3/bin/pypy3 -m pip install --upgrade pip setuptools
+pypy3/bin/pypy3 -m pip install --user -r test_requirements.txt --no-warn-script-location
+
+echo
+echo pypy3 version
+pypy3/bin/pypy3 -c "import sys; print(sys.version)"
+echo
+
+pypy3/bin/pypy3 runtests.py --debug-info --show-build-log -v -- -rsx \
+ --junitxml=junit/test-results.xml --durations 10
+
+echo Make sure the correct openblas has been linked in
+
+pypy3/bin/pip install .
+pypy3/bin/pypy3 tools/openblas_support.py --check_version "$OpenBLAS_version"
diff --git a/tools/refguide_check.py b/tools/refguide_check.py
new file mode 100644
index 000000000..c20807267
--- /dev/null
+++ b/tools/refguide_check.py
@@ -0,0 +1,958 @@
+#!/usr/bin/env python
+"""
+refguide_check.py [OPTIONS] [-- ARGS]
+
+Check for a NumPy submodule whether the objects in its __all__ dict
+correspond to the objects included in the reference guide.
+
+Example of usage::
+
+ $ python refguide_check.py optimize
+
+Note that this is a helper script to be able to check if things are missing;
+the output of this script does need to be checked manually. In some cases
+objects are left out of the refguide for a good reason (it's an alias of
+another function, or deprecated, or ...)
+
+Another use of this helper script is to check validity of code samples
+in docstrings. This is different from doctesting [we do not aim to have
+numpy docstrings doctestable!], this is just to make sure that code in
+docstrings is valid python::
+
+ $ python refguide_check.py --doctests optimize
+
+"""
+from __future__ import print_function
+
+import sys
+import os
+import re
+import copy
+import inspect
+import warnings
+import doctest
+import tempfile
+import io
+import docutils.core
+from docutils.parsers.rst import directives
+import shutil
+import glob
+from doctest import NORMALIZE_WHITESPACE, ELLIPSIS, IGNORE_EXCEPTION_DETAIL
+from argparse import ArgumentParser
+from pkg_resources import parse_version
+
+import sphinx
+import numpy as np
+
+sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'doc', 'sphinxext'))
+from numpydoc.docscrape_sphinx import get_doc_object
+
+if parse_version(sphinx.__version__) >= parse_version('1.5'):
+ # Enable specific Sphinx directives
+ from sphinx.directives import SeeAlso, Only
+ directives.register_directive('seealso', SeeAlso)
+ directives.register_directive('only', Only)
+else:
+ # Remove sphinx directives that don't run without Sphinx environment.
+ # Sphinx < 1.5 installs all directives on import...
+ directives._directives.pop('versionadded', None)
+ directives._directives.pop('versionchanged', None)
+ directives._directives.pop('moduleauthor', None)
+ directives._directives.pop('sectionauthor', None)
+ directives._directives.pop('codeauthor', None)
+ directives._directives.pop('toctree', None)
+
+
+BASE_MODULE = "numpy"
+
+PUBLIC_SUBMODULES = [
+ 'core',
+ 'doc.structured_arrays',
+ 'f2py',
+ 'linalg',
+ 'lib',
+ 'lib.recfunctions',
+ 'fft',
+ 'ma',
+ 'polynomial',
+ 'matrixlib',
+ 'random',
+ 'testing',
+]
+
+# Docs for these modules are included in the parent module
+OTHER_MODULE_DOCS = {
+ 'fftpack.convolve': 'fftpack',
+ 'io.wavfile': 'io',
+ 'io.arff': 'io',
+}
+
+# these names are known to fail doctesting and we like to keep it that way
+# e.g. sometimes pseudocode is acceptable etc
+DOCTEST_SKIPLIST = set([
+ # cases where NumPy docstrings import things from SciPy:
+ 'numpy.lib.vectorize',
+ 'numpy.random.standard_gamma',
+ 'numpy.random.gamma',
+ 'numpy.random.vonmises',
+ 'numpy.random.power',
+ 'numpy.random.zipf',
+ # remote / local file IO with DataSource is problematic in doctest:
+ 'numpy.lib.DataSource',
+ 'numpy.lib.Repository',
+])
+
+# these names are not required to be present in ALL despite being in
+# autosummary:: listing
+REFGUIDE_ALL_SKIPLIST = [
+ r'scipy\.sparse\.linalg',
+ r'scipy\.spatial\.distance',
+ r'scipy\.linalg\.blas\.[sdczi].*',
+ r'scipy\.linalg\.lapack\.[sdczi].*',
+]
+
+# these names are not required to be in an autosummary:: listing
+# despite being in ALL
+REFGUIDE_AUTOSUMMARY_SKIPLIST = [
+ # NOTE: should NumPy have a better match between autosummary
+ # listings and __all__? For now, TR isn't convinced this is a
+ # priority -- focus on just getting docstrings executed / correct
+ r'numpy\.*',
+]
+# deprecated windows in scipy.signal namespace
+for name in ('barthann', 'bartlett', 'blackmanharris', 'blackman', 'bohman',
+ 'boxcar', 'chebwin', 'cosine', 'exponential', 'flattop',
+ 'gaussian', 'general_gaussian', 'hamming', 'hann', 'hanning',
+ 'kaiser', 'nuttall', 'parzen', 'slepian', 'triang', 'tukey'):
+ REFGUIDE_AUTOSUMMARY_SKIPLIST.append(r'scipy\.signal\.' + name)
+
+HAVE_MATPLOTLIB = False
+
+
+def short_path(path, cwd=None):
+ """
+ Return relative or absolute path name, whichever is shortest.
+ """
+ if not isinstance(path, str):
+ return path
+ if cwd is None:
+ cwd = os.getcwd()
+ abspath = os.path.abspath(path)
+ relpath = os.path.relpath(path, cwd)
+ if len(abspath) <= len(relpath):
+ return abspath
+ return relpath
+
+
+def find_names(module, names_dict):
+ # Refguide entries:
+ #
+ # - 3 spaces followed by function name, and maybe some spaces, some
+ # dashes, and an explanation; only function names listed in
+ # refguide are formatted like this (mostly, there may be some false
+ # positives)
+ #
+ # - special directives, such as data and function
+ #
+ # - (scipy.constants only): quoted list
+ #
+ patterns = [
+ r"^\s\s\s([a-z_0-9A-Z]+)(\s+-+.*)?$",
+ r"^\.\. (?:data|function)::\s*([a-z_0-9A-Z]+)\s*$"
+ ]
+
+ if module.__name__ == 'scipy.constants':
+ patterns += ["^``([a-z_0-9A-Z]+)``"]
+
+ patterns = [re.compile(pattern) for pattern in patterns]
+ module_name = module.__name__
+
+ for line in module.__doc__.splitlines():
+ res = re.search(r"^\s*\.\. (?:currentmodule|module):: ([a-z0-9A-Z_.]+)\s*$", line)
+ if res:
+ module_name = res.group(1)
+ continue
+
+ for pattern in patterns:
+ res = re.match(pattern, line)
+ if res is not None:
+ name = res.group(1)
+ entry = '.'.join([module_name, name])
+ names_dict.setdefault(module_name, set()).add(name)
+ break
+
+
+def get_all_dict(module):
+ """Return a copy of the __all__ dict with irrelevant items removed."""
+ if hasattr(module, "__all__"):
+ all_dict = copy.deepcopy(module.__all__)
+ else:
+ all_dict = copy.deepcopy(dir(module))
+ all_dict = [name for name in all_dict
+ if not name.startswith("_")]
+ for name in ['absolute_import', 'division', 'print_function']:
+ try:
+ all_dict.remove(name)
+ except ValueError:
+ pass
+ if not all_dict:
+ # Must be a pure documentation module like doc.structured_arrays
+ all_dict.append('__doc__')
+
+ # Modules are almost always private; real submodules need a separate
+ # run of refguide_check.
+ all_dict = [name for name in all_dict
+ if not inspect.ismodule(getattr(module, name, None))]
+
+ deprecated = []
+ not_deprecated = []
+ for name in all_dict:
+ f = getattr(module, name, None)
+ if callable(f) and is_deprecated(f):
+ deprecated.append(name)
+ else:
+ not_deprecated.append(name)
+
+ others = set(dir(module)).difference(set(deprecated)).difference(set(not_deprecated))
+
+ return not_deprecated, deprecated, others
+
+
+def compare(all_dict, others, names, module_name):
+ """Return sets of objects only in __all__, refguide, or completely missing."""
+ only_all = set()
+ for name in all_dict:
+ if name not in names:
+ for pat in REFGUIDE_AUTOSUMMARY_SKIPLIST:
+ if re.match(pat, module_name + '.' + name):
+ break
+ else:
+ only_all.add(name)
+
+ only_ref = set()
+ missing = set()
+ for name in names:
+ if name not in all_dict:
+ for pat in REFGUIDE_ALL_SKIPLIST:
+ if re.match(pat, module_name + '.' + name):
+ if name not in others:
+ missing.add(name)
+ break
+ else:
+ only_ref.add(name)
+
+ return only_all, only_ref, missing
+
+def is_deprecated(f):
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("error")
+ try:
+ f(**{"not a kwarg":None})
+ except DeprecationWarning:
+ return True
+ except Exception:
+ pass
+ return False
+
+def check_items(all_dict, names, deprecated, others, module_name, dots=True):
+ num_all = len(all_dict)
+ num_ref = len(names)
+
+ output = ""
+
+ output += "Non-deprecated objects in __all__: %i\n" % num_all
+ output += "Objects in refguide: %i\n\n" % num_ref
+
+ only_all, only_ref, missing = compare(all_dict, others, names, module_name)
+ dep_in_ref = set(only_ref).intersection(deprecated)
+ only_ref = set(only_ref).difference(deprecated)
+
+ if len(dep_in_ref) > 0:
+ output += "Deprecated objects in refguide::\n\n"
+ for name in sorted(deprecated):
+ output += " " + name + "\n"
+
+ if len(only_all) == len(only_ref) == len(missing) == 0:
+ if dots:
+ output_dot('.')
+ return [(None, True, output)]
+ else:
+ if len(only_all) > 0:
+ output += "ERROR: objects in %s.__all__ but not in refguide::\n\n" % module_name
+ for name in sorted(only_all):
+ output += " " + name + "\n"
+
+ output += "\nThis issue can be fixed by adding these objects to\n"
+ output += "the function listing in __init__.py for this module\n"
+
+ if len(only_ref) > 0:
+ output += "ERROR: objects in refguide but not in %s.__all__::\n\n" % module_name
+ for name in sorted(only_ref):
+ output += " " + name + "\n"
+
+ output += "\nThis issue should likely be fixed by removing these objects\n"
+ output += "from the function listing in __init__.py for this module\n"
+ output += "or adding them to __all__.\n"
+
+ if len(missing) > 0:
+ output += "ERROR: missing objects::\n\n"
+ for name in sorted(missing):
+ output += " " + name + "\n"
+
+ if dots:
+ output_dot('F')
+ return [(None, False, output)]
+
+
+def validate_rst_syntax(text, name, dots=True):
+ if text is None:
+ if dots:
+ output_dot('E')
+ return False, "ERROR: %s: no documentation" % (name,)
+
+ ok_unknown_items = set([
+ 'mod', 'currentmodule', 'autosummary', 'data', 'attr',
+ 'obj', 'versionadded', 'versionchanged', 'module', 'class',
+ 'ref', 'func', 'toctree', 'moduleauthor', 'term', 'c:member',
+ 'sectionauthor', 'codeauthor', 'eq', 'doi', 'DOI', 'arXiv', 'arxiv'
+ ])
+
+ # Run through docutils
+ error_stream = io.StringIO()
+
+ def resolve(name, is_label=False):
+ return ("http://foo", name)
+
+ token = '<RST-VALIDATE-SYNTAX-CHECK>'
+
+ docutils.core.publish_doctree(
+ text, token,
+ settings_overrides = dict(halt_level=5,
+ traceback=True,
+ default_reference_context='title-reference',
+ default_role='emphasis',
+ link_base='',
+ resolve_name=resolve,
+ stylesheet_path='',
+ raw_enabled=0,
+ file_insertion_enabled=0,
+ warning_stream=error_stream))
+
+ # Print errors, disregarding unimportant ones
+ error_msg = error_stream.getvalue()
+ errors = error_msg.split(token)
+ success = True
+ output = ""
+
+ for error in errors:
+ lines = error.splitlines()
+ if not lines:
+ continue
+
+ m = re.match(r'.*Unknown (?:interpreted text role|directive type) "(.*)".*$', lines[0])
+ if m:
+ if m.group(1) in ok_unknown_items:
+ continue
+
+ m = re.match(r'.*Error in "math" directive:.*unknown option: "label"', " ".join(lines), re.S)
+ if m:
+ continue
+
+ output += name + lines[0] + "::\n " + "\n ".join(lines[1:]).rstrip() + "\n"
+ success = False
+
+ if not success:
+ output += " " + "-"*72 + "\n"
+ for lineno, line in enumerate(text.splitlines()):
+ output += " %-4d %s\n" % (lineno+1, line)
+ output += " " + "-"*72 + "\n\n"
+
+ if dots:
+ output_dot('.' if success else 'F')
+ return success, output
+
+
+def output_dot(msg='.', stream=sys.stderr):
+ stream.write(msg)
+ stream.flush()
+
+
+def check_rest(module, names, dots=True):
+ """
+ Check reStructuredText formatting of docstrings
+
+ Returns: [(name, success_flag, output), ...]
+ """
+
+ try:
+ skip_types = (dict, str, unicode, float, int)
+ except NameError:
+ # python 3
+ skip_types = (dict, str, float, int)
+
+
+ results = []
+
+ if module.__name__[6:] not in OTHER_MODULE_DOCS:
+ results += [(module.__name__,) +
+ validate_rst_syntax(inspect.getdoc(module),
+ module.__name__, dots=dots)]
+
+ for name in names:
+ full_name = module.__name__ + '.' + name
+ obj = getattr(module, name, None)
+
+ if obj is None:
+ results.append((full_name, False, "%s has no docstring" % (full_name,)))
+ continue
+ elif isinstance(obj, skip_types):
+ continue
+
+ if inspect.ismodule(obj):
+ text = inspect.getdoc(obj)
+ else:
+ try:
+ text = str(get_doc_object(obj))
+ except Exception:
+ import traceback
+ results.append((full_name, False,
+ "Error in docstring format!\n" +
+ traceback.format_exc()))
+ continue
+
+ m = re.search("([\x00-\x09\x0b-\x1f])", text)
+ if m:
+ msg = ("Docstring contains a non-printable character %r! "
+ "Maybe forgot r\"\"\"?" % (m.group(1),))
+ results.append((full_name, False, msg))
+ continue
+
+ try:
+ src_file = short_path(inspect.getsourcefile(obj))
+ except TypeError:
+ src_file = None
+
+ if src_file:
+ file_full_name = src_file + ':' + full_name
+ else:
+ file_full_name = full_name
+
+ results.append((full_name,) + validate_rst_syntax(text, file_full_name, dots=dots))
+
+ return results
+
+
+### Doctest helpers ####
+
+# the namespace to run examples in
+DEFAULT_NAMESPACE = {'np': np}
+
+# the namespace to do checks in
+CHECK_NAMESPACE = {
+ 'np': np,
+ 'assert_allclose': np.testing.assert_allclose,
+ 'assert_equal': np.testing.assert_equal,
+ # recognize numpy repr's
+ 'array': np.array,
+ 'matrix': np.matrix,
+ 'int64': np.int64,
+ 'uint64': np.uint64,
+ 'int8': np.int8,
+ 'int32': np.int32,
+ 'float32': np.float32,
+ 'float64': np.float64,
+ 'dtype': np.dtype,
+ 'nan': np.nan,
+ 'NaN': np.nan,
+ 'inf': np.inf,
+ 'Inf': np.inf,}
+
+
+class DTRunner(doctest.DocTestRunner):
+ DIVIDER = "\n"
+
+ def __init__(self, item_name, checker=None, verbose=None, optionflags=0):
+ self._item_name = item_name
+ doctest.DocTestRunner.__init__(self, checker=checker, verbose=verbose,
+ optionflags=optionflags)
+
+ def _report_item_name(self, out, new_line=False):
+ if self._item_name is not None:
+ if new_line:
+ out("\n")
+ self._item_name = None
+
+ def report_start(self, out, test, example):
+ self._checker._source = example.source
+ return doctest.DocTestRunner.report_start(self, out, test, example)
+
+ def report_success(self, out, test, example, got):
+ if self._verbose:
+ self._report_item_name(out, new_line=True)
+ return doctest.DocTestRunner.report_success(self, out, test, example, got)
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ self._report_item_name(out)
+ return doctest.DocTestRunner.report_unexpected_exception(
+ self, out, test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ self._report_item_name(out)
+ return doctest.DocTestRunner.report_failure(self, out, test,
+ example, got)
+
+class Checker(doctest.OutputChecker):
+ obj_pattern = re.compile('at 0x[0-9a-fA-F]+>')
+ int_pattern = re.compile('^[0-9]+L?$')
+ vanilla = doctest.OutputChecker()
+ rndm_markers = {'# random', '# Random', '#random', '#Random', "# may vary",
+ "# uninitialized", "#uninitialized"}
+ stopwords = {'plt.', '.hist', '.show', '.ylim', '.subplot(',
+ 'set_title', 'imshow', 'plt.show', '.axis(', '.plot(',
+ '.bar(', '.title', '.ylabel', '.xlabel', 'set_ylim', 'set_xlim',
+ '# reformatted', '.set_xlabel(', '.set_ylabel(', '.set_zlabel(',
+ '.set(xlim=', '.set(ylim=', '.set(xlabel=', '.set(ylabel='}
+
+ def __init__(self, parse_namedtuples=True, ns=None, atol=1e-8, rtol=1e-2):
+ self.parse_namedtuples = parse_namedtuples
+ self.atol, self.rtol = atol, rtol
+ if ns is None:
+ self.ns = dict(CHECK_NAMESPACE)
+ else:
+ self.ns = ns
+
+ def check_output(self, want, got, optionflags):
+ # cut it short if they are equal
+ if want == got:
+ return True
+
+ # skip stopwords in source
+ if any(word in self._source for word in self.stopwords):
+ return True
+
+ # skip random stuff
+ if any(word in want for word in self.rndm_markers):
+ return True
+
+ # skip function/object addresses
+ if self.obj_pattern.search(got):
+ return True
+
+ # ignore comments (e.g. signal.freqresp)
+ if want.lstrip().startswith("#"):
+ return True
+
+ # python 2 long integers are equal to python 3 integers
+ if self.int_pattern.match(want) and self.int_pattern.match(got):
+ if want.rstrip("L\r\n") == got.rstrip("L\r\n"):
+ return True
+
+ # try the standard doctest
+ try:
+ if self.vanilla.check_output(want, got, optionflags):
+ return True
+ except Exception:
+ pass
+
+ # OK then, convert strings to objects
+ try:
+ a_want = eval(want, dict(self.ns))
+ a_got = eval(got, dict(self.ns))
+ except Exception:
+ # Maybe we're printing a numpy array? This produces invalid python
+ # code: `print(np.arange(3))` produces "[0 1 2]" w/o commas between
+ # values. So, reinsert commas and retry.
+ # TODO: handle (1) abberivation (`print(np.arange(10000))`), and
+ # (2) n-dim arrays with n > 1
+ s_want = want.strip()
+ s_got = got.strip()
+ cond = (s_want.startswith("[") and s_want.endswith("]") and
+ s_got.startswith("[") and s_got.endswith("]"))
+ if cond:
+ s_want = ", ".join(s_want[1:-1].split())
+ s_got = ", ".join(s_got[1:-1].split())
+ return self.check_output(s_want, s_got, optionflags)
+
+ if not self.parse_namedtuples:
+ return False
+ # suppose that "want" is a tuple, and "got" is smth like
+ # MoodResult(statistic=10, pvalue=0.1).
+ # Then convert the latter to the tuple (10, 0.1),
+ # and then compare the tuples.
+ try:
+ num = len(a_want)
+ regex = ('[\w\d_]+\(' +
+ ', '.join(['[\w\d_]+=(.+)']*num) +
+ '\)')
+ grp = re.findall(regex, got.replace('\n', ' '))
+ if len(grp) > 1: # no more than one for now
+ return False
+ # fold it back to a tuple
+ got_again = '(' + ', '.join(grp[0]) + ')'
+ return self.check_output(want, got_again, optionflags)
+ except Exception:
+ return False
+
+ # ... and defer to numpy
+ try:
+ return self._do_check(a_want, a_got)
+ except Exception:
+ # heterog tuple, eg (1, np.array([1., 2.]))
+ try:
+ return all(self._do_check(w, g) for w, g in zip(a_want, a_got))
+ except (TypeError, ValueError):
+ return False
+
+ def _do_check(self, want, got):
+ # This should be done exactly as written to correctly handle all of
+ # numpy-comparable objects, strings, and heterogeneous tuples
+ try:
+ if want == got:
+ return True
+ except Exception:
+ pass
+ return np.allclose(want, got, atol=self.atol, rtol=self.rtol)
+
+
+def _run_doctests(tests, full_name, verbose, doctest_warnings):
+ """Run modified doctests for the set of `tests`.
+
+ Returns: list of [(success_flag, output), ...]
+ """
+ flags = NORMALIZE_WHITESPACE | ELLIPSIS | IGNORE_EXCEPTION_DETAIL
+ runner = DTRunner(full_name, checker=Checker(), optionflags=flags,
+ verbose=verbose)
+
+ output = []
+ success = True
+ def out(msg):
+ output.append(msg)
+
+ class MyStderr(object):
+ """Redirect stderr to the current stdout"""
+ def write(self, msg):
+ if doctest_warnings:
+ sys.stdout.write(msg)
+ else:
+ out(msg)
+
+ # a flush method is required when a doctest uses multiprocessing
+ # multiprocessing/popen_fork.py flushes sys.stderr
+ def flush(self):
+ if doctest_warnings:
+ sys.stdout.flush()
+
+ # Run tests, trying to restore global state afterward
+ old_printoptions = np.get_printoptions()
+ old_errstate = np.seterr()
+ old_stderr = sys.stderr
+ cwd = os.getcwd()
+ tmpdir = tempfile.mkdtemp()
+ sys.stderr = MyStderr()
+ try:
+ os.chdir(tmpdir)
+
+ # try to ensure random seed is NOT reproducible
+ np.random.seed(None)
+
+ for t in tests:
+ t.filename = short_path(t.filename, cwd)
+ fails, successes = runner.run(t, out=out)
+ if fails > 0:
+ success = False
+ finally:
+ sys.stderr = old_stderr
+ os.chdir(cwd)
+ shutil.rmtree(tmpdir)
+ np.set_printoptions(**old_printoptions)
+ np.seterr(**old_errstate)
+
+ return success, output
+
+
+def check_doctests(module, verbose, ns=None,
+ dots=True, doctest_warnings=False):
+ """Check code in docstrings of the module's public symbols.
+
+ Returns: list of [(item_name, success_flag, output), ...]
+ """
+ if ns is None:
+ ns = dict(DEFAULT_NAMESPACE)
+
+ # Loop over non-deprecated items
+ results = []
+
+ for name in get_all_dict(module)[0]:
+ full_name = module.__name__ + '.' + name
+
+ if full_name in DOCTEST_SKIPLIST:
+ continue
+
+ try:
+ obj = getattr(module, name)
+ except AttributeError:
+ import traceback
+ results.append((full_name, False,
+ "Missing item!\n" +
+ traceback.format_exc()))
+ continue
+
+ finder = doctest.DocTestFinder()
+ try:
+ tests = finder.find(obj, name, globs=dict(ns))
+ except Exception:
+ import traceback
+ results.append((full_name, False,
+ "Failed to get doctests!\n" +
+ traceback.format_exc()))
+ continue
+
+ success, output = _run_doctests(tests, full_name, verbose,
+ doctest_warnings)
+
+ if dots:
+ output_dot('.' if success else 'F')
+
+ results.append((full_name, success, "".join(output)))
+
+ if HAVE_MATPLOTLIB:
+ import matplotlib.pyplot as plt
+ plt.close('all')
+
+ return results
+
+
+def check_doctests_testfile(fname, verbose, ns=None,
+ dots=True, doctest_warnings=False):
+ """Check code in a text file.
+
+ Mimic `check_doctests` above, differing mostly in test discovery.
+ (which is borrowed from stdlib's doctest.testfile here,
+ https://github.com/python-git/python/blob/master/Lib/doctest.py)
+
+ Returns: list of [(item_name, success_flag, output), ...]
+
+ Notes
+ -----
+
+ We also try to weed out pseudocode:
+ * We maintain a list of exceptions which signal pseudocode,
+ * We split the text file into "blocks" of code separated by empty lines
+ and/or intervening text.
+ * If a block contains a marker, the whole block is then assumed to be
+ pseudocode. It is then not being doctested.
+
+ The rationale is that typically, the text looks like this:
+
+ blah
+ <BLANKLINE>
+ >>> from numpy import some_module # pseudocode!
+ >>> func = some_module.some_function
+ >>> func(42) # still pseudocode
+ 146
+ <BLANKLINE>
+ blah
+ <BLANKLINE>
+ >>> 2 + 3 # real code, doctest it
+ 5
+
+ """
+ results = []
+
+ if ns is None:
+ ns = dict(DEFAULT_NAMESPACE)
+
+ _, short_name = os.path.split(fname)
+ if short_name in DOCTEST_SKIPLIST:
+ return results
+
+ full_name = fname
+ if sys.version_info.major <= 2:
+ with open(fname) as f:
+ text = f.read()
+ else:
+ with open(fname, encoding='utf-8') as f:
+ text = f.read()
+
+ PSEUDOCODE = set(['some_function', 'some_module', 'import example',
+ 'ctypes.CDLL', # likely need compiling, skip it
+ 'integrate.nquad(func,' # ctypes integrate tutotial
+ ])
+
+ # split the text into "blocks" and try to detect and omit pseudocode blocks.
+ parser = doctest.DocTestParser()
+ good_parts = []
+ for part in text.split('\n\n'):
+ tests = parser.get_doctest(part, ns, fname, fname, 0)
+ if any(word in ex.source for word in PSEUDOCODE
+ for ex in tests.examples):
+ # omit it
+ pass
+ else:
+ # `part` looks like a good code, let's doctest it
+ good_parts += [part]
+
+ # Reassemble the good bits and doctest them:
+ good_text = '\n\n'.join(good_parts)
+ tests = parser.get_doctest(good_text, ns, fname, fname, 0)
+ success, output = _run_doctests([tests], full_name, verbose,
+ doctest_warnings)
+
+ if dots:
+ output_dot('.' if success else 'F')
+
+ results.append((full_name, success, "".join(output)))
+
+ if HAVE_MATPLOTLIB:
+ import matplotlib.pyplot as plt
+ plt.close('all')
+
+ return results
+
+
+def init_matplotlib():
+ global HAVE_MATPLOTLIB
+
+ try:
+ import matplotlib
+ matplotlib.use('Agg')
+ HAVE_MATPLOTLIB = True
+ except ImportError:
+ HAVE_MATPLOTLIB = False
+
+
+def main(argv):
+ parser = ArgumentParser(usage=__doc__.lstrip())
+ parser.add_argument("module_names", metavar="SUBMODULES", default=[],
+ nargs='*', help="Submodules to check (default: all public)")
+ parser.add_argument("--doctests", action="store_true", help="Run also doctests")
+ parser.add_argument("-v", "--verbose", action="count", default=0)
+ parser.add_argument("--doctest-warnings", action="store_true",
+ help="Enforce warning checking for doctests")
+ parser.add_argument("--skip-tutorial", action="store_true",
+ help="Skip running doctests in the tutorial.")
+ args = parser.parse_args(argv)
+
+ modules = []
+ names_dict = {}
+
+ if args.module_names:
+ args.skip_tutorial = True
+ else:
+ args.module_names = list(PUBLIC_SUBMODULES)
+
+ os.environ['SCIPY_PIL_IMAGE_VIEWER'] = 'true'
+
+ module_names = list(args.module_names)
+ for name in list(module_names):
+ if name in OTHER_MODULE_DOCS:
+ name = OTHER_MODULE_DOCS[name]
+ if name not in module_names:
+ module_names.append(name)
+
+ for submodule_name in module_names:
+ module_name = BASE_MODULE + '.' + submodule_name
+ __import__(module_name)
+ module = sys.modules[module_name]
+
+ if submodule_name not in OTHER_MODULE_DOCS:
+ find_names(module, names_dict)
+
+ if submodule_name in args.module_names:
+ modules.append(module)
+
+ dots = True
+ success = True
+ results = []
+
+ print("Running checks for %d modules:" % (len(modules),))
+
+ if args.doctests or not args.skip_tutorial:
+ init_matplotlib()
+
+ for module in modules:
+ if dots:
+ if module is not modules[0]:
+ sys.stderr.write(' ')
+ sys.stderr.write(module.__name__ + ' ')
+ sys.stderr.flush()
+
+ all_dict, deprecated, others = get_all_dict(module)
+ names = names_dict.get(module.__name__, set())
+
+ mod_results = []
+ mod_results += check_items(all_dict, names, deprecated, others, module.__name__)
+ mod_results += check_rest(module, set(names).difference(deprecated),
+ dots=dots)
+ if args.doctests:
+ mod_results += check_doctests(module, (args.verbose >= 2), dots=dots,
+ doctest_warnings=args.doctest_warnings)
+
+ for v in mod_results:
+ assert isinstance(v, tuple), v
+
+ results.append((module, mod_results))
+
+ if dots:
+ sys.stderr.write("\n")
+ sys.stderr.flush()
+
+ if not args.skip_tutorial:
+ base_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')
+ tut_path = os.path.join(base_dir, 'doc', 'source', 'tutorial', '*.rst')
+ print('\nChecking tutorial files at %s:' % os.path.relpath(tut_path, os.getcwd()))
+ for filename in sorted(glob.glob(tut_path)):
+ if dots:
+ sys.stderr.write('\n')
+ sys.stderr.write(os.path.split(filename)[1] + ' ')
+ sys.stderr.flush()
+
+ tut_results = check_doctests_testfile(filename, (args.verbose >= 2),
+ dots=dots, doctest_warnings=args.doctest_warnings)
+
+ def scratch(): pass # stub out a "module", see below
+ scratch.__name__ = filename
+ results.append((scratch, tut_results))
+
+ if dots:
+ sys.stderr.write("\n")
+ sys.stderr.flush()
+
+ # Report results
+ all_success = True
+
+ for module, mod_results in results:
+ success = all(x[1] for x in mod_results)
+ all_success = all_success and success
+
+ if success and args.verbose == 0:
+ continue
+
+ print("")
+ print("=" * len(module.__name__))
+ print(module.__name__)
+ print("=" * len(module.__name__))
+ print("")
+
+ for name, success, output in mod_results:
+ if name is None:
+ if not success or args.verbose >= 1:
+ print(output.strip())
+ print("")
+ elif not success or (args.verbose >= 2 and output.strip()):
+ print(name)
+ print("-"*len(name))
+ print("")
+ print(output.strip())
+ print("")
+
+ if all_success:
+ print("\nOK: refguide and doctests checks passed!")
+ sys.exit(0)
+ else:
+ print("\nERROR: refguide or doctests have errors")
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main(argv=sys.argv[1:])
diff --git a/tools/swig/test/testFarray.py b/tools/swig/test/testFarray.py
index 0037dc9b3..e8bf711c5 100755
--- a/tools/swig/test/testFarray.py
+++ b/tools/swig/test/testFarray.py
@@ -15,7 +15,7 @@ else: BadListError = ValueError
# Add the distutils-generated build directory to the python search path and then
# import the extension module
-libDir = "lib.%s-%s" % (get_platform(), sys.version[:3])
+libDir = "lib.{}-{}.{}".format(get_platform(), *sys.version_info[:2])
sys.path.insert(0, os.path.join("build", libDir))
import Farray
diff --git a/tools/swig/test/testFlat.py b/tools/swig/test/testFlat.py
index bd96bc778..71be277b1 100755
--- a/tools/swig/test/testFlat.py
+++ b/tools/swig/test/testFlat.py
@@ -37,7 +37,7 @@ class FlatTestCase(unittest.TestCase):
x = np.frombuffer(pack_output, dtype=self.typeCode)
y = x.copy()
process(y)
- self.assertEquals(np.all((x+1)==y),True)
+ self.assertEqual(np.all((x+1)==y),True)
def testProcess3D(self):
"Test Process function 3D array"
@@ -50,7 +50,7 @@ class FlatTestCase(unittest.TestCase):
x.shape = (2,3,4)
y = x.copy()
process(y)
- self.assertEquals(np.all((x+1)==y),True)
+ self.assertEqual(np.all((x+1)==y),True)
def testProcess3DTranspose(self):
"Test Process function 3D array, FORTRAN order"
@@ -63,7 +63,7 @@ class FlatTestCase(unittest.TestCase):
x.shape = (2,3,4)
y = x.copy()
process(y.T)
- self.assertEquals(np.all((x.T+1)==y.T),True)
+ self.assertEqual(np.all((x.T+1)==y.T),True)
def testProcessNoncontiguous(self):
"Test Process function with non-contiguous array, which should raise an error"
diff --git a/tools/swig/test/testFortran.py b/tools/swig/test/testFortran.py
index b7783be90..426e8943d 100644
--- a/tools/swig/test/testFortran.py
+++ b/tools/swig/test/testFortran.py
@@ -31,14 +31,14 @@ class FortranTestCase(unittest.TestCase):
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray(np.arange(9).reshape(3, 3),
self.typeCode)
- self.assertEquals(second(matrix), 3)
+ self.assertEqual(second(matrix), 3)
def testSecondElementObject(self):
"Test Fortran matrix initialized from nested list fortranarray"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
second = Fortran.__dict__[self.typeStr + "SecondElement"]
matrix = np.asfortranarray([[0, 1, 2], [3, 4, 5], [6, 7, 8]], self.typeCode)
- self.assertEquals(second(matrix), 3)
+ self.assertEqual(second(matrix), 3)
######################################################################
diff --git a/tools/swig/test/testMatrix.py b/tools/swig/test/testMatrix.py
index 7127678f7..065be0d44 100755
--- a/tools/swig/test/testMatrix.py
+++ b/tools/swig/test/testMatrix.py
@@ -30,7 +30,7 @@ class MatrixTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
det = Matrix.__dict__[self.typeStr + "Det"]
matrix = [[8, 7], [6, 9]]
- self.assertEquals(det(matrix), 30)
+ self.assertEqual(det(matrix), 30)
# Test (type IN_ARRAY2[ANY][ANY]) typemap
def testDetBadList(self):
@@ -69,7 +69,7 @@ class MatrixTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
max = Matrix.__dict__[self.typeStr + "Max"]
matrix = [[6, 5, 4], [3, 2, 1]]
- self.assertEquals(max(matrix), 6)
+ self.assertEqual(max(matrix), 6)
# Test (type* IN_ARRAY2, int DIM1, int DIM2) typemap
def testMaxBadList(self):
@@ -99,7 +99,7 @@ class MatrixTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
min = Matrix.__dict__[self.typeStr + "Min"]
matrix = [[9, 8], [7, 6], [5, 4]]
- self.assertEquals(min(matrix), 4)
+ self.assertEqual(min(matrix), 4)
# Test (int DIM1, int DIM2, type* IN_ARRAY2) typemap
def testMinBadList(self):
@@ -130,7 +130,7 @@ class MatrixTestCase(unittest.TestCase):
scale = Matrix.__dict__[self.typeStr + "Scale"]
matrix = np.array([[1, 2, 3], [2, 1, 2], [3, 2, 1]], self.typeCode)
scale(matrix, 4)
- self.assertEquals((matrix == [[4, 8, 12], [8, 4, 8], [12, 8, 4]]).all(), True)
+ self.assertEqual((matrix == [[4, 8, 12], [8, 4, 8], [12, 8, 4]]).all(), True)
# Test (type INPLACE_ARRAY2[ANY][ANY]) typemap
def testScaleWrongDim(self):
@@ -236,8 +236,8 @@ class MatrixTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
luSplit = Matrix.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
- self.assertEquals((lower == [[1, 0, 0], [4, 5, 0], [7, 8, 9]]).all(), True)
- self.assertEquals((upper == [[0, 2, 3], [0, 0, 6], [0, 0, 0]]).all(), True)
+ self.assertEqual((lower == [[1, 0, 0], [4, 5, 0], [7, 8, 9]]).all(), True)
+ self.assertEqual((upper == [[0, 2, 3], [0, 0, 6], [0, 0, 0]]).all(), True)
######################################################################
diff --git a/tools/swig/test/testSuperTensor.py b/tools/swig/test/testSuperTensor.py
index cdd88530b..97fe80c33 100644
--- a/tools/swig/test/testSuperTensor.py
+++ b/tools/swig/test/testSuperTensor.py
@@ -73,7 +73,7 @@ class SuperTensorTestCase(unittest.TestCase):
print(self.typeStr, "... ", file=sys.stderr)
max = SuperTensor.__dict__[self.typeStr + "Max"]
supertensor = [[[[1, 2], [3, 4]], [[5, 6], [7, 8]]], [[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]
- self.assertEquals(max(supertensor), 8)
+ self.assertEqual(max(supertensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
@@ -103,7 +103,7 @@ class SuperTensorTestCase(unittest.TestCase):
print(self.typeStr, "... ", file=sys.stderr)
min = SuperTensor.__dict__[self.typeStr + "Min"]
supertensor = [[[[9, 8], [7, 6]], [[5, 4], [3, 2]]], [[[9, 8], [7, 6]], [[5, 4], [3, 2]]]]
- self.assertEquals(min(supertensor), 2)
+ self.assertEqual(min(supertensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
@@ -135,7 +135,7 @@ class SuperTensorTestCase(unittest.TestCase):
supertensor = np.arange(3*3*3*3, dtype=self.typeCode).reshape((3, 3, 3, 3))
answer = supertensor.copy()*4
scale(supertensor, 4)
- self.assertEquals((supertensor == answer).all(), True)
+ self.assertEqual((supertensor == answer).all(), True)
# Test (type INPLACE_ARRAY3[ANY][ANY][ANY]) typemap
def testScaleWrongType(self):
@@ -252,8 +252,8 @@ class SuperTensorTestCase(unittest.TestCase):
answer_upper = [[[[0, 0], [0, 1]], [[0, 1], [1, 1]]], [[[0, 1], [1, 1]], [[1, 1], [1, 1]]]]
answer_lower = [[[[1, 1], [1, 0]], [[1, 0], [0, 0]]], [[[1, 0], [0, 0]], [[0, 0], [0, 0]]]]
lower, upper = luSplit(supertensor)
- self.assertEquals((lower == answer_lower).all(), True)
- self.assertEquals((upper == answer_upper).all(), True)
+ self.assertEqual((lower == answer_lower).all(), True)
+ self.assertEqual((upper == answer_upper).all(), True)
######################################################################
diff --git a/tools/swig/test/testTensor.py b/tools/swig/test/testTensor.py
index 61dc82090..ac1b7491a 100755
--- a/tools/swig/test/testTensor.py
+++ b/tools/swig/test/testTensor.py
@@ -34,7 +34,7 @@ class TensorTestCase(unittest.TestCase):
tensor = [[[0, 1], [2, 3]],
[[3, 2], [1, 0]]]
if isinstance(self.result, int):
- self.assertEquals(norm(tensor), self.result)
+ self.assertEqual(norm(tensor), self.result)
else:
self.assertAlmostEqual(norm(tensor), self.result, 6)
@@ -79,7 +79,7 @@ class TensorTestCase(unittest.TestCase):
max = Tensor.__dict__[self.typeStr + "Max"]
tensor = [[[1, 2], [3, 4]],
[[5, 6], [7, 8]]]
- self.assertEquals(max(tensor), 8)
+ self.assertEqual(max(tensor), 8)
# Test (type* IN_ARRAY3, int DIM1, int DIM2, int DIM3) typemap
def testMaxBadList(self):
@@ -111,7 +111,7 @@ class TensorTestCase(unittest.TestCase):
min = Tensor.__dict__[self.typeStr + "Min"]
tensor = [[[9, 8], [7, 6]],
[[5, 4], [3, 2]]]
- self.assertEquals(min(tensor), 2)
+ self.assertEqual(min(tensor), 2)
# Test (int DIM1, int DIM2, int DIM3, type* IN_ARRAY3) typemap
def testMinBadList(self):
@@ -145,7 +145,7 @@ class TensorTestCase(unittest.TestCase):
[[0, 1, 0], [1, 0, 1], [0, 1, 0]],
[[1, 0, 1], [0, 1, 0], [1, 0, 1]]], self.typeCode)
scale(tensor, 4)
- self.assertEquals((tensor == [[[4, 0, 4], [0, 4, 0], [4, 0, 4]],
+ self.assertEqual((tensor == [[[4, 0, 4], [0, 4, 0], [4, 0, 4]],
[[0, 4, 0], [4, 0, 4], [0, 4, 0]],
[[4, 0, 4], [0, 4, 0], [4, 0, 4]]]).all(), True)
@@ -264,9 +264,9 @@ class TensorTestCase(unittest.TestCase):
luSplit = Tensor.__dict__[self.typeStr + "LUSplit"]
lower, upper = luSplit([[[1, 1], [1, 1]],
[[1, 1], [1, 1]]])
- self.assertEquals((lower == [[[1, 1], [1, 0]],
+ self.assertEqual((lower == [[[1, 1], [1, 0]],
[[1, 0], [0, 0]]]).all(), True)
- self.assertEquals((upper == [[[0, 0], [0, 1]],
+ self.assertEqual((upper == [[[0, 0], [0, 1]],
[[0, 1], [1, 1]]]).all(), True)
######################################################################
diff --git a/tools/swig/test/testVector.py b/tools/swig/test/testVector.py
index eaaa75102..45e763b36 100755
--- a/tools/swig/test/testVector.py
+++ b/tools/swig/test/testVector.py
@@ -29,7 +29,7 @@ class VectorTestCase(unittest.TestCase):
"Test length function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
length = Vector.__dict__[self.typeStr + "Length"]
- self.assertEquals(length([5, 12, 0]), 13)
+ self.assertEqual(length([5, 12, 0]), 13)
# Test the (type IN_ARRAY1[ANY]) typemap
def testLengthBadList(self):
@@ -64,7 +64,7 @@ class VectorTestCase(unittest.TestCase):
"Test prod function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
prod = Vector.__dict__[self.typeStr + "Prod"]
- self.assertEquals(prod([1, 2, 3, 4]), 24)
+ self.assertEqual(prod([1, 2, 3, 4]), 24)
# Test the (type* IN_ARRAY1, int DIM1) typemap
def testProdBadList(self):
@@ -92,7 +92,7 @@ class VectorTestCase(unittest.TestCase):
"Test sum function"
print(self.typeStr, "... ", end=' ', file=sys.stderr)
sum = Vector.__dict__[self.typeStr + "Sum"]
- self.assertEquals(sum([5, 6, 7, 8]), 26)
+ self.assertEqual(sum([5, 6, 7, 8]), 26)
# Test the (int DIM1, type* IN_ARRAY1) typemap
def testSumBadList(self):
@@ -122,7 +122,7 @@ class VectorTestCase(unittest.TestCase):
reverse = Vector.__dict__[self.typeStr + "Reverse"]
vector = np.array([1, 2, 4], self.typeCode)
reverse(vector)
- self.assertEquals((vector == [4, 2, 1]).all(), True)
+ self.assertEqual((vector == [4, 2, 1]).all(), True)
# Test the (type INPLACE_ARRAY1[ANY]) typemap
def testReverseWrongDim(self):
@@ -225,8 +225,8 @@ class VectorTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
eoSplit = Vector.__dict__[self.typeStr + "EOSplit"]
even, odd = eoSplit([1, 2, 3])
- self.assertEquals((even == [1, 0, 3]).all(), True)
- self.assertEquals((odd == [0, 2, 0]).all(), True)
+ self.assertEqual((even == [1, 0, 3]).all(), True)
+ self.assertEqual((odd == [0, 2, 0]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwos(self):
@@ -234,7 +234,7 @@ class VectorTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
twos = Vector.__dict__[self.typeStr + "Twos"]
vector = twos(5)
- self.assertEquals((vector == [2, 2, 2, 2, 2]).all(), True)
+ self.assertEqual((vector == [2, 2, 2, 2, 2]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testTwosNonInt(self):
@@ -249,7 +249,7 @@ class VectorTestCase(unittest.TestCase):
print(self.typeStr, "... ", end=' ', file=sys.stderr)
threes = Vector.__dict__[self.typeStr + "Threes"]
vector = threes(6)
- self.assertEquals((vector == [3, 3, 3, 3, 3, 3]).all(), True)
+ self.assertEqual((vector == [3, 3, 3, 3, 3, 3]).all(), True)
# Test the (type* ARGOUT_ARRAY1, int DIM1) typemap
def testThreesNonInt(self):
diff --git a/tools/test-installed-numpy.py b/tools/test-installed-numpy.py
deleted file mode 100644
index 14f11b7ed..000000000
--- a/tools/test-installed-numpy.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-from __future__ import division, absolute_import, print_function
-
-# A simple script to test the installed version of numpy by calling
-# 'numpy.test()'. Key features:
-# -- convenient command-line syntax
-# -- sets exit status appropriately, useful for automated test environments
-
-# It would be better to set this up as a module in the numpy namespace, so
-# that it could be run as:
-# python -m numpy.run_tests <args>
-# But, python2.4's -m switch only works with top-level modules, not modules
-# that are inside packages. So, once we drop 2.4 support, maybe...
-
-import sys, os
-# In case we are run from the source directory, we don't want to import numpy
-# from there, we want to import the installed version:
-sys.path.pop(0)
-
-from optparse import OptionParser
-parser = OptionParser("usage: %prog [options] -- [nosetests options]")
-parser.add_option("-v", "--verbose",
- action="count", dest="verbose", default=1,
- help="increase verbosity")
-parser.add_option("--doctests",
- action="store_true", dest="doctests", default=False,
- help="Run doctests in module")
-parser.add_option("--coverage",
- action="store_true", dest="coverage", default=False,
- help="report coverage of NumPy code (requires 'pytest-cov' module")
-parser.add_option("-m", "--mode",
- action="store", dest="mode", default="fast",
- help="'fast', 'full', or something that could be "
- "passed to pytest [default: %default]")
-(options, args) = parser.parse_args()
-
-import numpy
-
-# Check that NPY_RELAXED_STRIDES_CHECKING is active when set.
-# The same flags check is also used in the tests to switch behavior.
-if (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "1") != "0"):
- if not numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING set, but not active.')
- sys.exit(1)
-elif numpy.ones((10, 1), order='C').flags.f_contiguous:
- print('NPY_RELAXED_STRIDES_CHECKING not set, but active.')
- sys.exit(1)
-
-if options.coverage:
- # Produce code coverage XML report for codecov.io
- args += ["--cov-report=xml"]
-
-result = numpy.test(options.mode,
- verbose=options.verbose,
- extra_argv=args,
- doctests=options.doctests,
- coverage=options.coverage)
-
-if result:
- sys.exit(0)
-else:
- sys.exit(1)
diff --git a/tools/travis-before-install.sh b/tools/travis-before-install.sh
index c334e91ae..072ad3bf6 100755
--- a/tools/travis-before-install.sh
+++ b/tools/travis-before-install.sh
@@ -4,6 +4,15 @@ uname -a
free -m
df -h
ulimit -a
+
+if [ -n "$PPC64_LE" ]; then
+ pwd
+ ls -ltrh
+ target=$(python tools/openblas_support.py)
+ sudo cp -r $target/64/lib/* /usr/lib
+ sudo cp $target/64/include/* /usr/include
+fi
+
mkdir builds
pushd builds
@@ -21,11 +30,8 @@ fi
source venv/bin/activate
python -V
-if [ -n "$INSTALL_PICKLE5" ]; then
- pip install pickle5
-fi
+popd
pip install --upgrade pip setuptools
-pip install nose pytz cython pytest
+pip install -r test_requirements.txt
if [ -n "$USE_ASV" ]; then pip install asv; fi
-popd
diff --git a/tools/travis-test.sh b/tools/travis-test.sh
index 2a16b37a3..6094f0ee6 100755
--- a/tools/travis-test.sh
+++ b/tools/travis-test.sh
@@ -25,14 +25,14 @@ if [ -n "$PYTHON_OPTS" ]; then
fi
# make some warnings fatal, mostly to match windows compilers
-werrors="-Werror=declaration-after-statement -Werror=vla "
-werrors+="-Werror=nonnull -Werror=pointer-arith"
+werrors="-Werror=vla -Werror=nonnull -Werror=pointer-arith"
+werrors="$werrors -Werror=implicit-function-declaration"
# build with c99 by default
setup_base()
{
- # use default python flags but remoge sign-compare
+ # use default python flags but remove sign-compare
sysflags="$($PYTHON -c "from distutils import sysconfig; \
print (sysconfig.get_config_var('CFLAGS'))")"
export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare"
@@ -52,7 +52,7 @@ setup_base()
else
# Python3.5-dbg on travis seems to need this
export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
- $PYTHON setup.py build_ext --inplace 2>&1 | tee log
+ $PYTHON setup.py build build_src --verbose-cfg build_ext --inplace 2>&1 | tee log
fi
grep -v "_configtest" log \
| grep -vE "ld returned 1|no previously-included files matching|manifest_maker: standard file '-c'" \
@@ -63,56 +63,14 @@ setup_base()
fi
}
-setup_chroot()
-{
- # this can all be replaced with:
- # apt-get install libpython2.7-dev:i386
- # CC="gcc -m32" LDSHARED="gcc -m32 -shared" LDFLAGS="-m32 -shared" \
- # linux32 python setup.py build
- # when travis updates to ubuntu 14.04
- #
- # NumPy may not distinguish between 64 and 32 bit ATLAS in the
- # configuration stage.
- DIR=$1
- set -u
- sudo debootstrap --variant=buildd --include=fakeroot,build-essential \
- --arch=$ARCH --foreign $DIST $DIR
- sudo chroot $DIR ./debootstrap/debootstrap --second-stage
-
- # put the numpy repo in the chroot directory
- sudo rsync -a $TRAVIS_BUILD_DIR $DIR/
-
- # set up repos in the chroot directory for installing packages
- echo deb http://archive.ubuntu.com/ubuntu/ \
- $DIST main restricted universe multiverse \
- | sudo tee -a $DIR/etc/apt/sources.list
- echo deb http://archive.ubuntu.com/ubuntu/ \
- $DIST-updates main restricted universe multiverse \
- | sudo tee -a $DIR/etc/apt/sources.list
- echo deb http://security.ubuntu.com/ubuntu \
- $DIST-security main restricted universe multiverse \
- | sudo tee -a $DIR/etc/apt/sources.list
-
- sudo chroot $DIR bash -c "apt-get update"
- # faster operation with preloaded eatmydata
- sudo chroot $DIR bash -c "apt-get install -qq -y eatmydata"
- echo '/usr/$LIB/libeatmydata.so' | \
- sudo tee -a $DIR/etc/ld.so.preload
-
- # install needed packages
- sudo chroot $DIR bash -c "apt-get install -qq -y \
- libatlas-base-dev gfortran python-dev python-nose python-pip cython \
- python-pytest"
-}
-
run_test()
{
+ $PIP install -r test_requirements.txt
if [ -n "$USE_DEBUG" ]; then
export PYTHONPATH=$PWD
fi
if [ -n "$RUN_COVERAGE" ]; then
- pip install pytest-cov
COVERAGE_FLAG=--coverage
fi
@@ -123,11 +81,18 @@ run_test()
INSTALLDIR=$($PYTHON -c \
"import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
+
+ if [ -n "$PPC64_LE" ]; then
+ $PYTHON ../tools/openblas_support.py --check_version $OpenBLAS_version
+ fi
+
if [ -n "$RUN_FULL_TESTS" ]; then
export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
- $PYTHON ../tools/test-installed-numpy.py -v --mode=full $COVERAGE_FLAG
+ $PYTHON -b ../runtests.py -n -v --durations 10 --mode=full $COVERAGE_FLAG
else
- $PYTHON ../tools/test-installed-numpy.py -v
+ # disable --durations temporarily, pytest currently aborts
+ # when that is used with python3.6-dbg
+ $PYTHON ../runtests.py -n -v # --durations 10
fi
if [ -n "$RUN_COVERAGE" ]; then
@@ -153,6 +118,7 @@ run_test()
if [ -n "$USE_ASV" ]; then
pushd ../benchmarks
+ $PYTHON `which asv` check --python=same
$PYTHON `which asv` machine --machine travis
$PYTHON `which asv` dev 2>&1| tee asv-output.log
if grep -q Traceback asv-output.log; then
@@ -185,21 +151,17 @@ if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
export F90='gfortran --coverage'
export LDFLAGS='--coverage'
fi
- $PYTHON setup.py bdist_wheel
+ $PYTHON setup.py build build_src --verbose-cfg bdist_wheel
# Make another virtualenv to install into
virtualenv --python=`which $PYTHON` venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- pip install --pre --no-index --upgrade --find-links=. numpy
- pip install nose pytest
-
- if [ -n "$INSTALL_PICKLE5" ]; then
- pip install pickle5
- fi
-
+ $PIP install --pre --no-index --upgrade --find-links=. numpy
popd
+
run_test
+
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# use an up-to-date pip / setuptools inside the venv
$PIP install -U virtualenv
@@ -215,23 +177,9 @@ elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
- pip install numpy*
- pip install nose pytest
- if [ -n "$INSTALL_PICKLE5" ]; then
- pip install pickle5
- fi
-
+ $PIP install numpy*
popd
run_test
-elif [ -n "$USE_CHROOT" ] && [ $# -eq 0 ]; then
- DIR=/chroot
- setup_chroot $DIR
- # the chroot'ed environment will not have the current locale,
- # avoid any warnings which may disturb testing
- export LANG=C LC_ALL=C
- # run again in chroot with this time testing
- sudo linux32 chroot $DIR bash -c \
- "cd numpy && PYTHON=python PIP=pip IN_CHROOT=1 $0 test"
else
setup_base
run_test
diff --git a/tox.ini b/tox.ini
index c7df36e23..5a6d074aa 100644
--- a/tox.ini
+++ b/tox.ini
@@ -13,9 +13,9 @@
# - Use pip to install the numpy sdist into the virtualenv
# - Run the numpy tests
# To run against a specific subset of Python versions, use:
-# tox -e py27
+# tox -e py37
-# Extra arguments will be passed to test-installed-numpy.py. To run
+# Extra arguments will be passed to runtests.py. To run
# the full testsuite:
# tox full
# To run with extra verbosity:
@@ -26,25 +26,20 @@
[tox]
envlist =
- py27,py34,py35,py36,
- py27-not-relaxed-strides,py34-not-relaxed-strides
+ py35,py36,py37,
+ py37-not-relaxed-strides
[testenv]
-deps=
- nose
+deps= -Ur{toxinidir}/test_requirements.txt
changedir={envdir}
-commands={envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}
+commands={envpython} -b {toxinidir}/runtests.py --mode=full {posargs:}
-[testenv:py27-not-relaxed-strides]
-basepython=python2.7
-env=NPY_RELAXED_STRIDES_CHECKING=0
-
-[testenv:py34-not-relaxed-strides]
-basepython=python3.4
+[testenv:py37-not-relaxed-strides]
+basepython=python3.7
env=NPY_RELAXED_STRIDES_CHECKING=0
# Not run by default. Set up the way you want then use 'tox -e debug'
# if you want it:
[testenv:debug]
basepython=python-dbg
-commands=gdb --args {envpython} {toxinidir}/tools/test-installed-numpy.py --mode=full {posargs:}
+commands=gdb --args {envpython} {toxinidir}/runtests.py --mode=full {posargs:}