summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore6
-rw-r--r--_randomgen/MANIFEST.in10
-rw-r--r--_randomgen/README.md16
-rw-r--r--_randomgen/README.rst22
-rw-r--r--_randomgen/appveyor.yml33
-rw-r--r--_randomgen/ci/conda-install.sh20
-rw-r--r--_randomgen/ci/pypi-install.sh3
-rw-r--r--_randomgen/doc/Makefile20
-rw-r--r--_randomgen/doc/make.bat36
-rw-r--r--_randomgen/doc/source/conf.py221
-rw-r--r--_randomgen/github_deploy_key_bashtage_randomgen.enc1
-rw-r--r--_randomgen/randomgen/__init__.py22
-rw-r--r--_randomgen/randomgen/_testing.py315
-rw-r--r--_randomgen/randomgen/examples/cython/extending_distributions.pyx48
-rw-r--r--_randomgen/randomgen/legacy/__init__.py3
-rw-r--r--_randomgen/randomgen/legacy/legacy.pyx1988
-rw-r--r--_randomgen/randomgen/src/xoroshiro128/xoroshiro128.c58
-rw-r--r--_randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c73
-rw-r--r--_randomgen/randomgen/tests/data/xoroshiro128-testset-1.csv1001
-rw-r--r--_randomgen/randomgen/tests/data/xoroshiro128-testset-2.csv1001
-rw-r--r--_randomgen/randomgen/tests/test_legacy.py17
-rw-r--r--_randomgen/requirements.txt4
-rw-r--r--_randomgen/setup.cfg11
-rw-r--r--_randomgen/versioneer.py1822
-rw-r--r--doc/Makefile3
-rw-r--r--doc/source/conf.py25
-rw-r--r--doc/source/reference/randomgen/brng/dsfmt.rst (renamed from _randomgen/doc/source/brng/dsfmt.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/index.rst (renamed from _randomgen/doc/source/brng/index.rst)0
-rw-r--r--doc/source/reference/randomgen/brng/mt19937.rst (renamed from _randomgen/doc/source/brng/mt19937.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/pcg32.rst (renamed from _randomgen/doc/source/brng/pcg32.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/pcg64.rst (renamed from _randomgen/doc/source/brng/pcg64.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/philox.rst (renamed from _randomgen/doc/source/brng/philox.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/threefry.rst (renamed from _randomgen/doc/source/brng/threefry.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/threefry32.rst (renamed from _randomgen/doc/source/brng/threefry32.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/xoroshiro128.rst (renamed from _randomgen/doc/source/brng/xoroshiro128.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/xorshift1024.rst (renamed from _randomgen/doc/source/brng/xorshift1024.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/xoshiro256starstar.rst (renamed from _randomgen/doc/source/brng/xoshiro256starstar.rst)5
-rw-r--r--doc/source/reference/randomgen/brng/xoshiro512starstar.rst (renamed from _randomgen/doc/source/brng/xoshiro512starstar.rst)5
-rw-r--r--doc/source/reference/randomgen/change-log.rst (renamed from _randomgen/doc/source/change-log.rst)0
-rw-r--r--doc/source/reference/randomgen/entropy.rst (renamed from _randomgen/doc/source/entropy.rst)2
-rw-r--r--doc/source/reference/randomgen/extending.rst (renamed from _randomgen/doc/source/extending.rst)0
-rw-r--r--doc/source/reference/randomgen/generator.rst (renamed from _randomgen/doc/source/generator.rst)11
-rw-r--r--doc/source/reference/randomgen/index.rst (renamed from _randomgen/doc/source/index.rst)10
-rw-r--r--doc/source/reference/randomgen/legacy.rst (renamed from _randomgen/doc/source/legacy.rst)23
-rw-r--r--doc/source/reference/randomgen/multithreading.rst (renamed from _randomgen/doc/source/multithreading.rst)0
-rw-r--r--doc/source/reference/randomgen/new-or-different.rst (renamed from _randomgen/doc/source/new-or-different.rst)0
-rw-r--r--doc/source/reference/randomgen/parallel.rst (renamed from _randomgen/doc/source/parallel.rst)0
-rw-r--r--doc/source/reference/randomgen/performance.py (renamed from _randomgen/doc/source/performance.py)0
-rw-r--r--doc/source/reference/randomgen/performance.rst (renamed from _randomgen/doc/source/performance.rst)0
-rw-r--r--doc/source/reference/randomgen/references.rst (renamed from _randomgen/doc/source/references.rst)0
-rw-r--r--doc/source/reference/routines.rst1
-rw-r--r--numpy/random/__init__.py7
-rw-r--r--numpy/random/mtrand/distributions.c4
-rw-r--r--numpy/random/mtrand/initarray.c1
-rw-r--r--numpy/random/mtrand/numpy.pxd2
-rw-r--r--numpy/random/randomgen/LICENSE.md (renamed from _randomgen/LICENSE.md)47
-rw-r--r--numpy/random/randomgen/__init__.py21
-rw-r--r--numpy/random/randomgen/_pickle.py (renamed from _randomgen/randomgen/pickle.py)53
-rw-r--r--numpy/random/randomgen/_version.py (renamed from _randomgen/randomgen/_version.py)0
-rw-r--r--numpy/random/randomgen/bounded_integers.pxd.in (renamed from _randomgen/randomgen/bounded_integers.pxd.in)17
-rw-r--r--numpy/random/randomgen/bounded_integers.pyx.in (renamed from _randomgen/randomgen/bounded_integers.pyx.in)20
-rw-r--r--numpy/random/randomgen/common.pxd (renamed from _randomgen/randomgen/common.pxd)28
-rw-r--r--numpy/random/randomgen/common.pyx (renamed from _randomgen/randomgen/common.pyx)277
-rw-r--r--numpy/random/randomgen/distributions.pxd (renamed from _randomgen/randomgen/distributions.pxd)8
-rw-r--r--numpy/random/randomgen/dsfmt.pyx (renamed from _randomgen/randomgen/dsfmt.pyx)139
-rw-r--r--numpy/random/randomgen/entropy.pyx (renamed from _randomgen/randomgen/entropy.pyx)7
-rw-r--r--numpy/random/randomgen/examples/cython/extending.pyx (renamed from _randomgen/randomgen/examples/cython/extending.pyx)14
-rw-r--r--numpy/random/randomgen/examples/cython/extending_distributions.pyx51
-rw-r--r--numpy/random/randomgen/examples/cython/setup.py (renamed from _randomgen/randomgen/examples/cython/setup.py)0
-rw-r--r--numpy/random/randomgen/examples/numba/extending.py (renamed from _randomgen/randomgen/examples/numba/extending.py)2
-rw-r--r--numpy/random/randomgen/examples/numba/extending_distributions.py (renamed from _randomgen/randomgen/examples/numba/extending_distributions.py)0
-rw-r--r--numpy/random/randomgen/generator.pyx (renamed from _randomgen/randomgen/generator.pyx)866
-rw-r--r--numpy/random/randomgen/legacy/__init__.py3
-rw-r--r--numpy/random/randomgen/legacy/legacy_distributions.pxd (renamed from _randomgen/randomgen/legacy/legacy_distributions.pxd)6
-rw-r--r--numpy/random/randomgen/mt19937.pyx (renamed from _randomgen/randomgen/mt19937.pyx)126
-rw-r--r--numpy/random/randomgen/mtrand.pyx4223
-rw-r--r--numpy/random/randomgen/pcg32.pyx (renamed from _randomgen/randomgen/pcg32.pyx)117
-rw-r--r--numpy/random/randomgen/pcg64.pyx (renamed from _randomgen/randomgen/pcg64.pyx)174
-rw-r--r--numpy/random/randomgen/philox.pyx (renamed from _randomgen/randomgen/philox.pyx)123
-rw-r--r--numpy/random/randomgen/setup.py191
-rw-r--r--numpy/random/randomgen/src/aligned_malloc/aligned_malloc.c (renamed from _randomgen/randomgen/src/aligned_malloc/aligned_malloc.c)0
-rw-r--r--numpy/random/randomgen/src/aligned_malloc/aligned_malloc.h (renamed from _randomgen/randomgen/src/aligned_malloc/aligned_malloc.h)0
-rw-r--r--numpy/random/randomgen/src/common/LICENSE.md (renamed from _randomgen/randomgen/src/common/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/common/inttypes.h (renamed from _randomgen/randomgen/src/common/inttypes.h)0
-rw-r--r--numpy/random/randomgen/src/common/stdint.h (renamed from _randomgen/randomgen/src/common/stdint.h)0
-rw-r--r--numpy/random/randomgen/src/distributions/LICENSE.md (renamed from _randomgen/randomgen/src/legacy/LICENSE.md)31
-rw-r--r--numpy/random/randomgen/src/distributions/binomial.h (renamed from _randomgen/randomgen/src/distributions/binomial.h)0
-rw-r--r--numpy/random/randomgen/src/distributions/distributions.c (renamed from _randomgen/randomgen/src/distributions/distributions.c)22
-rw-r--r--numpy/random/randomgen/src/distributions/distributions.h (renamed from _randomgen/randomgen/src/distributions/distributions.h)2
-rw-r--r--numpy/random/randomgen/src/distributions/ziggurat.h (renamed from _randomgen/randomgen/src/distributions/ziggurat.h)0
-rw-r--r--numpy/random/randomgen/src/distributions/ziggurat_constants.h (renamed from _randomgen/randomgen/src/distributions/ziggurat_constants.h)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/128-bit-jump.poly.txt (renamed from _randomgen/randomgen/src/dsfmt/128-bit-jump.poly.txt)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/96-bit-jump.poly.txt (renamed from _randomgen/randomgen/src/dsfmt/96-bit-jump.poly.txt)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/LICENSE.md (renamed from _randomgen/randomgen/src/dsfmt/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/calc-jump.cpp (renamed from _randomgen/randomgen/src/dsfmt/calc-jump.cpp)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-benchmark.c (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-benchmark.c)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-calc-jump.hpp (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-calc-jump.hpp)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-common.h (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-common.h)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-jump.c (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-jump.c)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-jump.h (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-jump.h)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-params.h (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-params.h)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-params19937.h (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-params19937.h)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-poly.h (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-poly.h)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT-test-gen.c (renamed from _randomgen/randomgen/src/dsfmt/dSFMT-test-gen.c)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT.c (renamed from _randomgen/randomgen/src/dsfmt/dSFMT.c)0
-rw-r--r--numpy/random/randomgen/src/dsfmt/dSFMT.h (renamed from _randomgen/randomgen/src/dsfmt/dSFMT.h)0
-rw-r--r--numpy/random/randomgen/src/entropy/LICENSE.md (renamed from _randomgen/randomgen/src/entropy/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/entropy/entropy.c (renamed from _randomgen/randomgen/src/entropy/entropy.c)0
-rw-r--r--numpy/random/randomgen/src/entropy/entropy.h (renamed from _randomgen/randomgen/src/entropy/entropy.h)0
-rw-r--r--numpy/random/randomgen/src/legacy/LICENSE.md (renamed from _randomgen/randomgen/src/distributions/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/legacy/distributions-boxmuller.c (renamed from _randomgen/randomgen/src/legacy/distributions-boxmuller.c)9
-rw-r--r--numpy/random/randomgen/src/legacy/distributions-boxmuller.h (renamed from _randomgen/randomgen/src/legacy/distributions-boxmuller.h)0
-rw-r--r--numpy/random/randomgen/src/mt19937/LICENSE.md (renamed from _randomgen/randomgen/src/mt19937/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937-benchmark.c (renamed from _randomgen/randomgen/src/mt19937/mt19937-benchmark.c)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937-jump.c (renamed from _randomgen/randomgen/src/mt19937/mt19937-jump.c)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937-jump.h (renamed from _randomgen/randomgen/src/mt19937/mt19937-jump.h)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937-poly.h (renamed from _randomgen/randomgen/src/mt19937/mt19937-poly.h)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937-test-data-gen.c (renamed from _randomgen/randomgen/src/mt19937/mt19937-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937.c (renamed from _randomgen/randomgen/src/mt19937/mt19937.c)0
-rw-r--r--numpy/random/randomgen/src/mt19937/mt19937.h (renamed from _randomgen/randomgen/src/mt19937/mt19937.h)0
-rw-r--r--numpy/random/randomgen/src/mt19937/randomkit.c (renamed from _randomgen/randomgen/src/mt19937/randomkit.c)0
-rw-r--r--numpy/random/randomgen/src/mt19937/randomkit.h (renamed from _randomgen/randomgen/src/mt19937/randomkit.h)0
-rw-r--r--numpy/random/randomgen/src/pcg32/LICENSE.md (renamed from _randomgen/randomgen/src/pcg32/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/pcg32/pcg-advance-64.c (renamed from _randomgen/randomgen/src/pcg32/pcg-advance-64.c)0
-rw-r--r--numpy/random/randomgen/src/pcg32/pcg32-test-data-gen.c (renamed from _randomgen/randomgen/src/pcg32/pcg32-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/pcg32/pcg32.c (renamed from _randomgen/randomgen/src/pcg32/pcg32.c)0
-rw-r--r--numpy/random/randomgen/src/pcg32/pcg32.h (renamed from _randomgen/randomgen/src/pcg32/pcg32.h)4
-rw-r--r--numpy/random/randomgen/src/pcg32/pcg_variants.h (renamed from _randomgen/randomgen/src/pcg32/pcg_variants.h)0
-rw-r--r--numpy/random/randomgen/src/pcg64/LICENSE.md (renamed from _randomgen/randomgen/src/pcg64/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/pcg64/pcg64-benchmark.c (renamed from _randomgen/randomgen/src/pcg64/pcg64-benchmark.c)0
-rw-r--r--numpy/random/randomgen/src/pcg64/pcg64-test-data-gen.c (renamed from _randomgen/randomgen/src/pcg64/pcg64-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/pcg64/pcg64.c (renamed from _randomgen/randomgen/src/pcg64/pcg64.c)0
-rw-r--r--numpy/random/randomgen/src/pcg64/pcg64.h (renamed from _randomgen/randomgen/src/pcg64/pcg64.h)4
-rw-r--r--numpy/random/randomgen/src/pcg64/pcg64.orig.c (renamed from _randomgen/randomgen/src/pcg64/pcg64.orig.c)0
-rw-r--r--numpy/random/randomgen/src/pcg64/pcg64.orig.h (renamed from _randomgen/randomgen/src/pcg64/pcg64.orig.h)0
-rw-r--r--numpy/random/randomgen/src/philox/LICENSE.md (renamed from _randomgen/randomgen/src/philox/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/philox/philox-benchmark.c (renamed from _randomgen/randomgen/src/philox/philox-benchmark.c)0
-rw-r--r--numpy/random/randomgen/src/philox/philox-test-data-gen.c (renamed from _randomgen/randomgen/src/philox/philox-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/philox/philox.c (renamed from _randomgen/randomgen/src/philox/philox.c)0
-rw-r--r--numpy/random/randomgen/src/philox/philox.h (renamed from _randomgen/randomgen/src/philox/philox.h)0
-rw-r--r--numpy/random/randomgen/src/splitmix64/LICENSE.md (renamed from _randomgen/randomgen/src/splitmix64/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/splitmix64/splitmix64.c (renamed from _randomgen/randomgen/src/splitmix64/splitmix64.c)0
-rw-r--r--numpy/random/randomgen/src/splitmix64/splitmix64.h (renamed from _randomgen/randomgen/src/splitmix64/splitmix64.h)0
-rw-r--r--numpy/random/randomgen/src/splitmix64/splitmix64.orig.c (renamed from _randomgen/randomgen/src/splitmix64/splitmix64.orig.c)0
-rw-r--r--numpy/random/randomgen/src/threefry/LICENSE.md (renamed from _randomgen/randomgen/src/threefry/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/threefry/threefry-benchmark.c (renamed from _randomgen/randomgen/src/threefry/threefry-benchmark.c)0
-rw-r--r--numpy/random/randomgen/src/threefry/threefry-orig.c (renamed from _randomgen/randomgen/src/threefry/threefry-orig.c)0
-rw-r--r--numpy/random/randomgen/src/threefry/threefry-test-data-gen.c (renamed from _randomgen/randomgen/src/threefry/threefry-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/threefry/threefry.c (renamed from _randomgen/randomgen/src/threefry/threefry.c)0
-rw-r--r--numpy/random/randomgen/src/threefry/threefry.h (renamed from _randomgen/randomgen/src/threefry/threefry.h)0
-rw-r--r--numpy/random/randomgen/src/threefry32/LICENSE.md (renamed from _randomgen/randomgen/src/threefry32/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/threefry32/threefry32-test-data-gen.c (renamed from _randomgen/randomgen/src/threefry32/threefry32-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/threefry32/threefry32.c (renamed from _randomgen/randomgen/src/threefry32/threefry32.c)0
-rw-r--r--numpy/random/randomgen/src/threefry32/threefry32.h (renamed from _randomgen/randomgen/src/threefry32/threefry32.h)0
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/LICENSE.md (renamed from _randomgen/randomgen/src/xoroshiro128/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/xoroshiro128-benchmark.c (renamed from _randomgen/randomgen/src/xoroshiro128/xoroshiro128-benchmark.c)6
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/xoroshiro128-test-data-gen.c (renamed from _randomgen/randomgen/src/xoroshiro128/xoroshiro128-test-data-gen.c)33
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/xoroshiro128.c60
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/xoroshiro128.h (renamed from _randomgen/randomgen/src/xoroshiro128/xoroshiro128.h)22
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c102
-rw-r--r--numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.h (renamed from _randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.h)0
-rw-r--r--numpy/random/randomgen/src/xorshift1024/LICENSE.md (renamed from _randomgen/randomgen/src/xorshift1024/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/xorshift1024/xorshift1024-benchmark.c (renamed from _randomgen/randomgen/src/xorshift1024/xorshift1024-benchmark.c)0
-rw-r--r--numpy/random/randomgen/src/xorshift1024/xorshift1024-test-data-gen.c (renamed from _randomgen/randomgen/src/xorshift1024/xorshift1024-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/xorshift1024/xorshift1024.c (renamed from _randomgen/randomgen/src/xorshift1024/xorshift1024.c)0
-rw-r--r--numpy/random/randomgen/src/xorshift1024/xorshift1024.h (renamed from _randomgen/randomgen/src/xorshift1024/xorshift1024.h)0
-rw-r--r--numpy/random/randomgen/src/xorshift1024/xorshift1024.orig.c (renamed from _randomgen/randomgen/src/xorshift1024/xorshift1024.orig.c)4
-rw-r--r--numpy/random/randomgen/src/xorshift1024/xorshift1024.orig.h (renamed from _randomgen/randomgen/src/xorshift1024/xorshift1024.orig.h)0
-rw-r--r--numpy/random/randomgen/src/xoshiro256starstar/LICENSE.md (renamed from _randomgen/randomgen/src/xoshiro256starstar/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar-test-data-gen.c (renamed from _randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.c (renamed from _randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.c)0
-rw-r--r--numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.h (renamed from _randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.h)0
-rw-r--r--numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.c (renamed from _randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.c)0
-rw-r--r--numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.h (renamed from _randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.h)0
-rw-r--r--numpy/random/randomgen/src/xoshiro512starstar/LICENSE.md (renamed from _randomgen/randomgen/src/xoshiro512starstar/LICENSE.md)0
-rw-r--r--numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar-test-data-gen.c (renamed from _randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar-test-data-gen.c)0
-rw-r--r--numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.c (renamed from _randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.c)0
-rw-r--r--numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.h (renamed from _randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.h)0
-rw-r--r--numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.c (renamed from _randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.c)0
-rw-r--r--numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.h (renamed from _randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.h)0
-rw-r--r--numpy/random/randomgen/tests/__init__.py (renamed from _randomgen/randomgen/tests/__init__.py)0
-rw-r--r--numpy/random/randomgen/tests/data/__init__.py (renamed from _randomgen/randomgen/tests/data/__init__.py)0
-rw-r--r--numpy/random/randomgen/tests/data/dSFMT-testset-1.csv (renamed from _randomgen/randomgen/tests/data/dSFMT-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/dSFMT-testset-2.csv (renamed from _randomgen/randomgen/tests/data/dSFMT-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/mt19937-testset-1.csv (renamed from _randomgen/randomgen/tests/data/mt19937-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/mt19937-testset-2.csv (renamed from _randomgen/randomgen/tests/data/mt19937-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/pcg32-testset-1.csv (renamed from _randomgen/randomgen/tests/data/pcg32-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/pcg32-testset-2.csv (renamed from _randomgen/randomgen/tests/data/pcg32-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/pcg64-testset-1.csv (renamed from _randomgen/randomgen/tests/data/pcg64-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/pcg64-testset-2.csv (renamed from _randomgen/randomgen/tests/data/pcg64-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/philox-testset-1.csv (renamed from _randomgen/randomgen/tests/data/philox-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/philox-testset-2.csv (renamed from _randomgen/randomgen/tests/data/philox-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/threefry-testset-1.csv (renamed from _randomgen/randomgen/tests/data/threefry-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/threefry-testset-2.csv (renamed from _randomgen/randomgen/tests/data/threefry-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/threefry32-testset-1.csv (renamed from _randomgen/randomgen/tests/data/threefry32-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/threefry32-testset-2.csv (renamed from _randomgen/randomgen/tests/data/threefry32-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/xoroshiro128-testset-1.csv1001
-rw-r--r--numpy/random/randomgen/tests/data/xoroshiro128-testset-2.csv1001
-rw-r--r--numpy/random/randomgen/tests/data/xorshift1024-testset-1.csv (renamed from _randomgen/randomgen/tests/data/xorshift1024-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/xorshift1024-testset-2.csv (renamed from _randomgen/randomgen/tests/data/xorshift1024-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/xoshiro256starstar-testset-1.csv (renamed from _randomgen/randomgen/tests/data/xoshiro256starstar-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/xoshiro256starstar-testset-2.csv (renamed from _randomgen/randomgen/tests/data/xoshiro256starstar-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/xoshiro512starstar-testset-1.csv (renamed from _randomgen/randomgen/tests/data/xoshiro512starstar-testset-1.csv)0
-rw-r--r--numpy/random/randomgen/tests/data/xoshiro512starstar-testset-2.csv (renamed from _randomgen/randomgen/tests/data/xoshiro512starstar-testset-2.csv)0
-rw-r--r--numpy/random/randomgen/tests/test_against_numpy.py (renamed from _randomgen/randomgen/tests/test_against_numpy.py)88
-rw-r--r--numpy/random/randomgen/tests/test_direct.py (renamed from _randomgen/randomgen/tests/test_direct.py)225
-rw-r--r--numpy/random/randomgen/tests/test_generator_mt19937.py (renamed from _randomgen/randomgen/tests/test_numpy_mt19937.py)954
-rw-r--r--numpy/random/randomgen/tests/test_generator_mt19937_regressions.py (renamed from _randomgen/randomgen/tests/test_numpy_mt19937_regressions.py)4
-rw-r--r--numpy/random/randomgen/tests/test_randomstate.py1808
-rw-r--r--numpy/random/randomgen/tests/test_randomstate_regression.py157
-rw-r--r--numpy/random/randomgen/tests/test_smoke.py (renamed from _randomgen/randomgen/tests/test_smoke.py)120
-rw-r--r--numpy/random/randomgen/threefry.pyx (renamed from _randomgen/randomgen/threefry.pyx)119
-rw-r--r--numpy/random/randomgen/threefry32.pyx (renamed from _randomgen/randomgen/threefry32.pyx)115
-rw-r--r--numpy/random/randomgen/xoroshiro128.pyx (renamed from _randomgen/randomgen/xoroshiro128.pyx)117
-rw-r--r--numpy/random/randomgen/xorshift1024.pyx (renamed from _randomgen/randomgen/xorshift1024.pyx)120
-rw-r--r--numpy/random/randomgen/xoshiro256starstar.pyx (renamed from _randomgen/randomgen/xoshiro256starstar.pyx)118
-rw-r--r--numpy/random/randomgen/xoshiro512starstar.pyx (renamed from _randomgen/randomgen/xoshiro512starstar.pyx)116
-rw-r--r--numpy/random/setup.py7
-rwxr-xr-xsetup.py13
-rwxr-xr-xtools/cythonize.py85
220 files changed, 10969 insertions, 8906 deletions
diff --git a/.gitignore b/.gitignore
index 8e96d4154..ac5693fad 100644
--- a/.gitignore
+++ b/.gitignore
@@ -168,7 +168,7 @@ numpy/core/src/umath/test_rational.c
numpy/core/src/umath/umath_tests.c
numpy/distutils/__config__.py
numpy/linalg/umath_linalg.c
-doc/source/reference/generated
+doc/source/**/generated/
benchmarks/results
benchmarks/html
benchmarks/env
@@ -176,4 +176,8 @@ benchmarks/numpy
# cythonized files
cythonize.dat
numpy/random/mtrand/mtrand.c
+numpy/random/randomgen/*.c
+numpy/random/randomgen/legacy/*.c
numpy/random/mtrand/randint_helpers.pxi
+numpy/random/randomgen/bounded_integers.pyx
+numpy/random/randomgen/bounded_integers.pxd
diff --git a/_randomgen/MANIFEST.in b/_randomgen/MANIFEST.in
deleted file mode 100644
index 61fffb8db..000000000
--- a/_randomgen/MANIFEST.in
+++ /dev/null
@@ -1,10 +0,0 @@
-exclude randomgen/entropy.c
-recursive-exclude randomgen *.c
-include versioneer.py
-include randomgen/_version.py
-include requirements.txt
-include README.md
-include README.rst
-include LICENSE.md
-recursive-include randomgen *.py *.pyx *.px[di] *.h *.in *.csv *.md
-graft randomgen/src
diff --git a/_randomgen/README.md b/_randomgen/README.md
index 45c33b928..2e8073645 100644
--- a/_randomgen/README.md
+++ b/_randomgen/README.md
@@ -47,9 +47,9 @@ which can fully reproduce the sequence produced by NumPy.
from randomgen import RandomGenerator
# Default basic PRNG is Xoroshiro128
rnd = RandomGenerator()
- w = rnd.standard_normal(10000, method='zig')
- x = rnd.standard_exponential(10000, method='zig')
- y = rnd.standard_gamma(5.5, 10000, method='zig')
+ w = rnd.standard_normal(10000)
+ x = rnd.standard_exponential(10000)
+ y = rnd.standard_gamma(5.5, 10000)
```
* Support for 32-bit floating randoms for core generators.
@@ -135,15 +135,15 @@ The RNGs include:
## Status
* Builds and passes all tests on:
- * Linux 32/64 bit, Python 2.7, 3.4, 3.5, 3.6
+ * Linux 32/64 bit, Python 2.7, 3.4, 3.5, 3.6, 3.6
* PC-BSD (FreeBSD) 64-bit, Python 2.7
- * OSX 64-bit, Python 3.6
- * Windows 32/64 bit, Python 2.7, 3.5 and 3.6
+ * OSX 64-bit, Python 2.7, 3.5, 3.6, 3.7
+ * Windows 32/64 bit, Python 2.7, 3.5, 3.6 and 3.7
## Version
The version matched the latest version of NumPy where
-`RandomGenerator(MT19937())` passes all NumPy test.
+`LegacyGenerator(MT19937())` passes all NumPy test.
## Documentation
@@ -168,7 +168,7 @@ Building requires:
* Cython (0.26+)
* tempita (0.5+), if not provided by Cython
-Testing requires pytest (3.0+).
+Testing requires pytest (4.0+).
**Note:** it might work with other versions but only tested with these
versions.
diff --git a/_randomgen/README.rst b/_randomgen/README.rst
index f4b886e5f..7e91b898d 100644
--- a/_randomgen/README.rst
+++ b/_randomgen/README.rst
@@ -12,9 +12,9 @@ generators in Python and NumPy.
Python 2.7 Support
------------------
-v1.16 is the final major version that supports Python 2.7. Any bugs in
-v1.16 will be patched until the end of 2019. All future releases are
-Python 3, with an initial minimum version of 3.5.
+Release 1.16.0 is the final version that supports Python 2.7. Any bugs
+in v1.16.0 will be patched until the end of 2019. All future releases
+are Python 3, with an initial minimum version of 3.5.
Compatibility Warning
---------------------
@@ -51,9 +51,9 @@ Features
from randomgen import RandomGenerator
# Default basic PRNG is Xoroshiro128
rnd = RandomGenerator()
- w = rnd.standard_normal(10000, method='zig')
- x = rnd.standard_exponential(10000, method='zig')
- y = rnd.standard_gamma(5.5, 10000, method='zig')
+ w = rnd.standard_normal(10000)
+ x = rnd.standard_exponential(10000)
+ y = rnd.standard_gamma(5.5, 10000)
- Support for 32-bit floating randoms for core generators. Currently
supported:
@@ -147,16 +147,16 @@ Status
- Builds and passes all tests on:
- - Linux 32/64 bit, Python 2.7, 3.4, 3.5, 3.6
+ - Linux 32/64 bit, Python 2.7, 3.5, 3.6, 3.7
- PC-BSD (FreeBSD) 64-bit, Python 2.7
- - OSX 64-bit, Python 3.6
- - Windows 32/64 bit, Python 2.7, 3.5 and 3.6
+ - OSX 64-bit, Python 2.7, 3.5, 3.6, 3.7
+ - Windows 32/64 bit, Python 2.7, 3.5, 3.6, and 3.7
Version
-------
The version matched the latest version of NumPy where
-``RandomGenerator(MT19937())`` passes all NumPy test.
+``LegacyGenerator(MT19937())`` passes all NumPy test.
Documentation
-------------
@@ -185,7 +185,7 @@ Building requires:
- Cython (0.26+)
- tempita (0.5+), if not provided by Cython
-Testing requires pytest (3.0+).
+Testing requires pytest (4.0+).
**Note:** it might work with other versions but only tested with these
versions.
diff --git a/_randomgen/appveyor.yml b/_randomgen/appveyor.yml
deleted file mode 100644
index 88544620d..000000000
--- a/_randomgen/appveyor.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-skip_tags: true
-clone_depth: 50
-
-os: Visual Studio 2015
-
-environment:
- matrix:
- - PY_MAJOR_VER: 2
- PYTHON_ARCH: "x86"
- - PY_MAJOR_VER: 3
- PYTHON_ARCH: "x86_64"
- - PY_MAJOR_VER: 3
- PYTHON_ARCH: "x86"
-
-platform:
- - x64
-
-build_script:
- - ps: Start-FileDownload "https://repo.continuum.io/miniconda/Miniconda$env:PY_MAJOR_VER-latest-Windows-$env:PYTHON_ARCH.exe" C:\Miniconda.exe; echo "Finished downloading miniconda"
- - cmd: C:\Miniconda.exe /S /D=C:\Py
- - SET PATH=C:\Py;C:\Py\Scripts;C:\Py\Library\bin;%PATH%
- - conda config --set always_yes yes
- - conda update conda --quiet
- - conda install numpy cython nose pandas pytest --quiet
- - python setup.py develop
- - set "GIT_DIR=%cd%"
-
-test_script:
- - pytest randomgen
-
-on_success:
- - cd %GIT_DIR%\
- - python benchmark.py
diff --git a/_randomgen/ci/conda-install.sh b/_randomgen/ci/conda-install.sh
deleted file mode 100644
index cccb8227d..000000000
--- a/_randomgen/ci/conda-install.sh
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env bash
-
-if [[ ${TRAVIS_OS_NAME} == "osx" ]]; then wget https://repo.continuum.io/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -O miniconda3.sh; fi
-if [[ ${TRAVIS_OS_NAME} == "linux" ]]; then wget http://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -O miniconda3.sh; fi
-chmod +x miniconda3.sh
-./miniconda3.sh -b
-export PATH=${HOME}/miniconda3/bin:$PATH
-conda config --set always_yes true
-conda update --all --quiet
-conda create -n randomgen-test ${PKGS} pip --quiet
-source activate randomgen-test
-
-PKGS="python=${PYTHON} matplotlib numpy"
-if [[ -n ${NUMPY} ]]; then PKGS="${PKGS}=${NUMPY}"; fi
-PKGS="${PKGS} Cython";
-if [[ -n ${CYTHON} ]]; then PKGS="${PKGS}=${CYTHON}"; fi
-PKGS="${PKGS} pandas";
-if [[ -n ${PANDAS} ]]; then PKGS="${PKGS}=${PANDAS}"; fi
-echo conda create -n randomgen-test ${PKGS} pytest setuptools nose --quiet
-conda create -n randomgen-test ${PKGS} pytest setuptools nose --quiet
diff --git a/_randomgen/ci/pypi-install.sh b/_randomgen/ci/pypi-install.sh
deleted file mode 100644
index ded8dd921..000000000
--- a/_randomgen/ci/pypi-install.sh
+++ /dev/null
@@ -1,3 +0,0 @@
-#!/usr/bin/env bash
-
-pip install numpy cython pandas pytest setuptools nose matplotlib --quiet
diff --git a/_randomgen/doc/Makefile b/_randomgen/doc/Makefile
deleted file mode 100644
index 1ee9d3660..000000000
--- a/_randomgen/doc/Makefile
+++ /dev/null
@@ -1,20 +0,0 @@
-# Minimal makefile for Sphinx documentation
-#
-
-# You can set these variables from the command line.
-SPHINXOPTS =
-SPHINXBUILD = sphinx-build
-SPHINXPROJ = RandomGen
-SOURCEDIR = source
-BUILDDIR = build
-
-# Put it first so that "make" without argument is like "make help".
-help:
- @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
-
-.PHONY: help Makefile
-
-# Catch-all target: route all unknown targets to Sphinx using the new
-# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS).
-%: Makefile
- @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file
diff --git a/_randomgen/doc/make.bat b/_randomgen/doc/make.bat
deleted file mode 100644
index e5a098a34..000000000
--- a/_randomgen/doc/make.bat
+++ /dev/null
@@ -1,36 +0,0 @@
-@ECHO OFF
-
-pushd %~dp0
-
-REM Command file for Sphinx documentation
-
-if "%SPHINXBUILD%" == "" (
- set SPHINXBUILD=sphinx-build
-)
-set SOURCEDIR=source
-set BUILDDIR=build
-set SPHINXPROJ=RandomGen
-
-if "%1" == "" goto help
-
-%SPHINXBUILD% >NUL 2>NUL
-if errorlevel 9009 (
- echo.
- echo.The 'sphinx-build' command was not found. Make sure you have Sphinx
- echo.installed, then set the SPHINXBUILD environment variable to point
- echo.to the full path of the 'sphinx-build' executable. Alternatively you
- echo.may add the Sphinx directory to PATH.
- echo.
- echo.If you don't have Sphinx installed, grab it from
- echo.http://sphinx-doc.org/
- exit /b 1
-)
-
-%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
-goto end
-
-:help
-%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS%
-
-:end
-popd
diff --git a/_randomgen/doc/source/conf.py b/_randomgen/doc/source/conf.py
deleted file mode 100644
index d4290d173..000000000
--- a/_randomgen/doc/source/conf.py
+++ /dev/null
@@ -1,221 +0,0 @@
-# -*- coding: utf-8 -*-
-#
-# Configuration file for the Sphinx documentation builder.
-#
-# This file does only contain a selection of the most common options. For a
-# full list see the documentation:
-# http://www.sphinx-doc.org/en/stable/config
-
-# -- Path setup --------------------------------------------------------------
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
-import guzzle_sphinx_theme
-import randomgen
-
-# -- Project information -----------------------------------------------------
-
-project = 'RandomGen'
-copyright = '2018, Kevin Sheppard'
-author = 'Kevin Sheppard'
-
-# The short X.Y version.
-
-version = randomgen.__version__
-if '+' in version:
- version = version.split('+')
- version = ''.join((version[0], ' (+', version[1].split('.')[0], ')'))
-# The full version, including alpha/beta/rc tags.
-release = randomgen.__version__
-
-# -- General configuration ---------------------------------------------------
-
-# If your documentation needs a minimal Sphinx version, state it here.
-#
-# needs_sphinx = '1.0'
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
- 'sphinx.ext.napoleon',
- 'sphinx.ext.autodoc',
- 'sphinx.ext.extlinks',
- 'sphinx.ext.todo',
- 'sphinx.ext.doctest',
- 'sphinx.ext.intersphinx',
- 'sphinx.ext.autosummary',
- 'sphinx.ext.mathjax',
- 'sphinx.ext.githubpages',
- 'IPython.sphinxext.ipython_console_highlighting',
- 'IPython.sphinxext.ipython_directive'
-]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix(es) of source filenames.
-# You can specify multiple suffix as a list of string:
-#
-# source_suffix = ['.rst', '.md']
-source_suffix = '.rst'
-
-# The master toctree document.
-master_doc = 'index'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = None
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This pattern also affects html_static_path and html_extra_path .
-exclude_patterns = []
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
-
-
-# -- Options for HTML output -------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-#
-# html_theme = 'alabaster'
-# html_theme = 'sphinx_rtd_theme'
-# html_theme_path = ["_themes", ]
-# Theme options are theme-specific and customize the look and feel of a theme
-# further. For a list of options available for each theme, see the
-# documentation.
-#
-# html_theme_options = {}
-
-html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator'
-html_theme_path = guzzle_sphinx_theme.html_theme_path()
-html_theme = 'guzzle_sphinx_theme'
-
-# Register the theme as an extension to generate a sitemap.xml
-extensions.append("guzzle_sphinx_theme")
-
-# Guzzle theme options (see theme.conf for more information)
-html_theme_options = {
- # Set the name of the project to appear in the sidebar
- "project_nav_name": project + u" " + version,
-}
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-html_static_path = ['_static']
-
-# Custom sidebar templates, must be a dictionary that maps document names
-# to template names.
-#
-# The default sidebars (for documents that don't match any pattern) are
-# defined by theme itself. Builtin themes are using these templates by
-# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
-# 'searchbox.html']``.
-#
-# html_sidebars = {}
-html_sidebars = {
- '**': ['logo-text.html', 'globaltoc.html', 'searchbox.html']
-}
-
-# If false, no module index is generated.
-html_domain_indices = True
-
-# -- Options for HTMLHelp output ---------------------------------------------
-
-# Output file base name for HTML help builder.
-htmlhelp_basename = 'RandomGendoc'
-
-
-# -- Options for LaTeX output ------------------------------------------------
-
-latex_elements = {
- # The paper size ('letterpaper' or 'a4paper').
- #
- # 'papersize': 'letterpaper',
-
- # The font size ('10pt', '11pt' or '12pt').
- #
- # 'pointsize': '10pt',
-
- # Additional stuff for the LaTeX preamble.
- #
- # 'preamble': '',
-
- # Latex figure (float) alignment
- #
- # 'figure_align': 'htbp',
-}
-
-# Grouping the document tree into LaTeX files. List of tuples
-# (source start file, target name, title,
-# author, documentclass [howto, manual, or own class]).
-latex_documents = [
- (master_doc, 'RandomGen.tex', 'RandomGen Documentation',
- 'Kevin Sheppard', 'manual'),
-]
-
-
-# -- Options for manual page output ------------------------------------------
-
-# One entry per manual page. List of tuples
-# (source start file, name, description, authors, manual section).
-man_pages = [
- (master_doc, 'RandomGen', 'RandomGen Documentation',
- [author], 1)
-]
-
-
-# -- Options for Texinfo output ----------------------------------------------
-
-# Grouping the document tree into Texinfo files. List of tuples
-# (source start file, target name, title, author,
-# dir menu entry, description, category)
-texinfo_documents = [
- (master_doc, 'RandomGen', 'RandomGen Documentation',
- author, 'RandomGen', 'Alternative random number generators for Python.',
- 'Miscellaneous'),
-]
-
-
-# -- Extension configuration -------------------------------------------------
-
-# -- Options for intersphinx extension ---------------------------------------
-
-# Example configuration for intersphinx: refer to the Python standard library.
-intersphinx_mapping = {
- 'statsmodels': ('http://www.statsmodels.org/dev/', None),
- 'matplotlib': ('https://matplotlib.org', None),
- 'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
- 'python': ('https://docs.python.org/3', None),
- 'numpy': ('https://docs.scipy.org/doc/numpy', None),
- 'np': ('https://docs.scipy.org/doc/numpy', None),
- 'pandas': ('https://pandas.pydata.org/pandas-docs/stable/', None),
- 'pd': ('https://pandas.pydata.org/pandas-docs/stable/', None),
-}
-
-autosummary_generate = True
-
-doctest_global_setup = """
-import numpy as np
-
-import randomgen
-
-import matplotlib.pyplot
-
-def show(*args, **kwargs):
- return
-
-matplotlib.pyplot.show = show
-"""
diff --git a/_randomgen/github_deploy_key_bashtage_randomgen.enc b/_randomgen/github_deploy_key_bashtage_randomgen.enc
deleted file mode 100644
index ec025c480..000000000
--- a/_randomgen/github_deploy_key_bashtage_randomgen.enc
+++ /dev/null
@@ -1 +0,0 @@
-gAAAAABaqYcL3y2gmKPwdhTQvWbDmqvTV9VyGXrEJI0HFVB3ZcmhgwRY_2L9_k7wALgsaQ9-FOqKreKMagBvAiS8IYhW4dUhJ1_6MO0bb8l_fdvkiKY7NH4DXKpO6sGmOg5YPfx44agTLtnI9yuZHo_LgYTkXFS2MQISZJvpmmtgC7fv_ydaRDG3v9c4a4zi7futr6zk0jAlxw29gjyFaReWJQgH6hJTVUVvOq30RJwjpa87jf45mVTsyPdVfHRqn6rcwvGsRKCW6hFnPRsJEP6-ivdjMFbzK6uK1TrswAJ2ZZIGcH84Kaph8kQayMZRL5FUoWsonkGK_SHwiPjmmHRXVMYxSJqNAtjxDgcznzmuazUOwdWCUIkxe0FtJieW5rLwTjT2u1cgcCQ2MKkBiCjO4tORCT0JGCyhOZdsJx6_5i2s1OKaCEb6Uur07itpI2IAEreA38u7CiU150Q7D8zinpPLWuXIrsk9nKfr1YjwXBSVtOBUOuh4Sy9MjcpQuavwJPYVSpNi6_BeIclxP45wjFF5Ai2P8IgaHxSFlMJNfze9H1U-2eTyQaykuZ2WrZBPoGYFRaQExU6jnXOdPMC5FqaO5DV5tvN56fLx9UFXaCqf_UknJRvYnLi94H__nZJWhN6XfCeNaUuPZuuiOFXekK-LC5VFAWXTN84pOesek0-zESKDffFozITTjA-4cvzppUnsSZNm5cXBUdr3wszkNlrQqDVZhP_HU2B8QSilBnnfVEsbKr_-SVQ0s3ipFohPS_GTFKtn8467vvIRgCkks8J1ba5xHb6QMlt2Y2L7yunLh0vmKwqZTVtU-4L4Xm2kKvgHi1kxAaApQiEX2bM-xX7TGNnzRFLKAxpHX4EvO72K2CcQXKu0XkRNc-_c-XcxsWZ7XtvyTCBXNnPtvj26B-FW8XyJH_u0HblrB-AKRgmpRuAhDNh1l_OAcOFHpUrH5t64t6lwOYCR3lXUJJytW-UEz-Nx9j32VX4Ep1IsGjkNuM3FtW4E-iVoGPwYwT3jsGo5rsO6MzrzoEKJbFdgQHnqe3NaxpF6rEVweQTNN2E1LFFuYHnRgo2LuMdK7IDXJ66MCxqEBRMH6Gcne-b5RHEpWoCQAvgyKwU5MclS4M3zLhGnXbtO-q4OL3JV1a-wx9e4vHZJrAolMjvw7a8l5vCDj-FqT5nJIVpG5eKwB_JL24O5d4xUSbgqBm6i1h51L---brkWg9i9YXsjZj5Inf2ZuU3hasZPyXFbTZbpBXN7BMalszLadCOWWsnDJMvl-UJeX2aDDATy5M_4-9Yjt70G1ZJNxZ8K2F6UdXwVifGJGa7jHU9nteCWZVfUdkiexqkLqKebZAlPBpzisrEQw6PmokKP2UO27NBFYTlfYL1NiCahXkOUMPiTKhjQ0_JSqdlUl2igNlNUFSip-63Rk4WtgodQo9iI4icfV5TFkR0h-LPD1m9lIamruWvAQWLU-_daZcN8rdqCWsysuo1ryp80UHHvyTiwloCa2f0ZKr78RIaD_QCkHmevywprNNuMd0ErbAOD7v3dUKjnlvpf8gLpUnu4ZfR1u86OPqsyt2b5tmwB6TWdpaCBNRAjlbFOU8aHDpPDVCAKf1AcEZ1B6p36YgNf5yxmKwc1QEmzXPr1KnSWJRps_QRBX-hEuBu8Q_BUQCjlInJVLcpSgt2lTuJPwwQzdxm5CeU1xdpeWCztSxfghmfE7mzhYizIYa1WaYs32xfZQglEG_O8oXCaU524vyh6cBnIytY3cF1FlwfbKQvbKyKkq8p5YSWe8HX8XRJGVe1bBNM2RYZO5BfLLl5cENIUSbb-REs6j8E61HGgJ9cLBG4-l2QbivSEhKsa4fI0JNVGEL_kwaEOVNHa85y_4oFAQuC4eYOMdgrwGfcD-J-XkE_J6khiLCOaZRcFhFNUfTjghPYzO37D24cAQ9fGnFFehQU-08Ie8SMV2O3fmUV2RbX_h6FXYKZ5Ptp3l2wP5wcuwhPFxPPJ279pswQw9NlHBF3gdtu3_cisqwNfc_HZQ6GXYzbcE7nwTGOY03LN3RjghJgkkeaNs6e0iIxfTJjIqG6-ZWNRNOJKdotjMLVqlhfk0KNZjO5rKEfDfYW_Lbiylgu7I7O-wy-Xn60OTu7na0ObYl-Y9tXkRTZPMNasjDWpfTXKZRp8EX45W-35VKmb0ERj0ee9uXgZxiPGLd3OP8cxIiXqZdZYKwJnD09zZuXwaTa2AAp2WmLYLiF-pDIISNxVF7mCxU9G0AWl0Ml1d5pS5zadM1OYB5yfjx09hlVasaiPaGqIptNtdz8tDQ1ngH-QBPV8wNvSxHwdU4w96pJIY9jG5Z3k-PVO26NNKjZ_KMZhO-3TgQXMJI0GHSyfYFHEMGJuUbeS4ThGyAt2Z6pVKTu7WFjgceseLMmwevJQeyScvtD22t8bpSuqfgxrAGSP5O2-e1UEl_12umZZG3sSd8jc_WNBgX7nSa6LeGAmlY0z_h9SblVl63r2qZi7-Ur0Y7O4JH4rHMDkf07tMU-foCiDDppvZkPRuvPlYgzLmnyOXePN0_1aiou9qbMWmzyJwhrqnt5uZXVHpRwCKKdXRBAcBebuKU-LKqMhWWowf1OUm240628OmQL2oTOaVWBlS3x1XKHMv18_ucbgWB4KaQdidSKMwIXE_LRfhr17g-h2CTQFsfImGKU36ECJHk35K9qr3aZI5X2MLUsOJjdbQiVJsLpdCDbr_HfPDNnux0QiVRZhslKnqOlcv8_6MeKtcqcxDJTi1v1430tpiZj-A2dp3F9YXi_PvCcKD4GheUwN8TUJgEZF3m9Vc80pAWWFDN7Obof_0zCcv7HrXgXCVJHnFzJn0J4AqW6db-DvYAzdejDnLTsTZK9ctJmWxHAWWXYi35aAjj6nalFk97T7EvOr2zS6f_xSUyPeNPs2fIP1lY3togmjPRvwbIN-ZxqLzkfjmxARrLJpqAxK_AvOz2vNlEosQd3zJxk7hEQWRfkTmakvDPgkd5fNsfIGfAt8B_PWnmz41DWKeOlsSQguPAqCE40NSszmyjSBhde8uHN8tGwdQpdcjPt01kgmrdD2GHfLs8zeyNWRzE3qmLT46S1dq1kfQX2j20LXDck9Ox0nFDUXYwaz6pVDPymhPqzh4EHtg0QKePJ5qpY2RDTW3S8UK3YkE3pa_C_-BPcNLVGr_k7WaMWGx0JJ72W2MqcoXgq3bZq_CZeseeKm3rH3YiaibidLk4WqMblcWUurHW09vFCNSOyQ28jkBeMSgadJ2zEbK9M1QmsDxxSCzWtIn_y7nDLCGh0NzD2alVp4QfxwjF5ZEYSXZOYXdhRBkd3pRX9perJT_zlQf7Ag2otXUZE-J6TkDAAwhWxxUFQ0iUIKNKtO-ocM8YevCyl0EK06AzX6jmShrE5eZpej7o7DA2dCoLYksacloBbonqDjkpXR1uZcGJSnhZm29UeSSGQN7cqgR5DDCHkthvOn8gZxS8vr1fQiswazUaMCClHUD_O88IlLXnqXj4n-84TMT9iBGN3iaab4P-fb2t1Azcd8uSGl3CwNEouEekdvVWHSp3bhMkwpPuvakJheLOGfX7npwWo5iIEvdA0VhesiEV8ZVJYCt3zmOwQtI-Sk5uAVAWAieB6-up9KWtwoF89C64CLp99srzLkaPddQJKtTruYQER5l3hL1LBe5g8XPEBNMrFjp2xIN8mNYpZRt41nFxsHoA9xnsv4NXWbwqnkyImP0Wm9HmclvwTOZ9xdT5Ryj8u97XSOz4y8T5Ql5UPwRBujiBI91GQnEKz3FPmUzS70Bbrg3XIirpbPqcRp_VnnxXT8AX0zGZiuWvx94NFa5h4a11vYpeLnKzQ2RyTHB96CSpxxdXHAHfMvc9ib3XzlKRUUfDX0x0tDBj3BkV5UMgLtn2RRCgT1PydLn13wYbeNwfb5GlGutiqQY_QTuUOqSII-2vyNzA5FUPpzofXGYwAz52pGwpZ7w0s4fBpXocxWt0gGW5wxDzTEX3UdkbRsN1GXs5tYkdbcCW_jPrVcq7pUGgOyGXMEYnZUA1ack2h6nSwlxbx_Aka_VyxZCJFYJW9S165lIhm7KkDCfRpQdoA4Fx27aAXwWL70ipNCyNHFOERXD5SoVMJDcz3-cXkttEddXooygKoXojR4epBuxhSkUNxgnd70faZaIC8_L5_ZlZIBn-lH3jLT5Yuzt8-weKpAyczteZ8eLB0YlnYlqhIVFYy4QBR8iejRZBXABKiuIWz_Xf4qu6UGwHTQ-1BBfl9mr0RxULn7NGtfbQ72Xwad-HlT1MnEd_6o95MkFvHbdINlpkaeVwiAgUSFbITPh7x8JaQKlAzjROJQdhGvT4j42woumzQtuqr9UnDWtf8aECkrJP_-AEy1BLbXmUo= \ No newline at end of file
diff --git a/_randomgen/randomgen/__init__.py b/_randomgen/randomgen/__init__.py
deleted file mode 100644
index b4c942c8f..000000000
--- a/_randomgen/randomgen/__init__.py
+++ /dev/null
@@ -1,22 +0,0 @@
-from randomgen.dsfmt import DSFMT
-from randomgen.generator import RandomGenerator
-from randomgen.mt19937 import MT19937
-from randomgen.pcg32 import PCG32
-from randomgen.pcg64 import PCG64
-from randomgen.philox import Philox
-from randomgen.threefry import ThreeFry
-from randomgen.threefry32 import ThreeFry32
-from randomgen.xoroshiro128 import Xoroshiro128
-from randomgen.xorshift1024 import Xorshift1024
-from randomgen.xoshiro256starstar import Xoshiro256StarStar
-from randomgen.xoshiro512starstar import Xoshiro512StarStar
-
-__all__ = ['RandomGenerator', 'DSFMT', 'MT19937', 'PCG64', 'PCG32', 'Philox',
- 'ThreeFry', 'ThreeFry32', 'Xoroshiro128', 'Xorshift1024',
- 'Xoshiro256StarStar', 'Xoshiro512StarStar',
- 'hypergeometric', 'multinomial', 'random_sample']
-
-from ._version import get_versions
-
-__version__ = get_versions()['version']
-del get_versions
diff --git a/_randomgen/randomgen/_testing.py b/_randomgen/randomgen/_testing.py
deleted file mode 100644
index 07d41677b..000000000
--- a/_randomgen/randomgen/_testing.py
+++ /dev/null
@@ -1,315 +0,0 @@
-"""
-Shim for NumPy's suppress_warnings
-"""
-
-
-try:
- from numpy.testing import suppress_warnings
-except ImportError:
-
- # The following two classes are copied from python 2.6 warnings module
- # (context manager)
- class WarningMessage(object):
-
- """
- Holds the result of a single showwarning() call.
- Deprecated in 1.8.0
- Notes
- -----
- `WarningMessage` is copied from the Python 2.6 warnings module,
- so it can be used in NumPy with older Python versions.
- """
-
- _WARNING_DETAILS = ("message", "category", "filename", "lineno",
- "file", "line")
-
- def __init__(self, message, category, filename, lineno, file=None,
- line=None):
- local_values = locals()
- for attr in self._WARNING_DETAILS:
- setattr(self, attr, local_values[attr])
- if category:
- self._category_name = category.__name__
- else:
- self._category_name = None
-
- def __str__(self):
- return ("{message : %r, category : %r, "
- "filename : %r, lineno : %s, "
- "line : %r}" % (self.message, self._category_name,
- self.filename, self.lineno, self.line))
-
- import re
- import warnings
- from functools import wraps
-
- class suppress_warnings(object):
- """
- Context manager and decorator doing much the same as
- ``warnings.catch_warnings``.
- However, it also provides a filter mechanism to work around
- http://bugs.python.org/issue4180.
- This bug causes Python before 3.4 to not reliably show warnings again
- after they have been ignored once (even within catch_warnings). It
- means that no "ignore" filter can be used easily, since following
- tests might need to see the warning. Additionally it allows easier
- specificity for testing warnings and can be nested.
- Parameters
- ----------
- forwarding_rule : str, optional
- One of "always", "once", "module", or "location". Analogous to
- the usual warnings module filter mode, it is useful to reduce
- noise mostly on the outmost level. Unsuppressed and unrecorded
- warnings will be forwarded based on this rule. Defaults to
- "always". "location" is equivalent to the warnings "default", match
- by exact location the warning warning originated from.
- Notes
- -----
- Filters added inside the context manager will be discarded again
- when leaving it. Upon entering all filters defined outside a
- context will be applied automatically.
- When a recording filter is added, matching warnings are stored in the
- ``log`` attribute as well as in the list returned by ``record``.
- If filters are added and the ``module`` keyword is given, the
- warning registry of this module will additionally be cleared when
- applying it, entering the context, or exiting it. This could cause
- warnings to appear a second time after leaving the context if they
- were configured to be printed once (default) and were already
- printed before the context was entered.
- Nesting this context manager will work as expected when the
- forwarding rule is "always" (default). Unfiltered and unrecorded
- warnings will be passed out and be matched by the outer level.
- On the outmost level they will be printed (or caught by another
- warnings context). The forwarding rule argument can modify this
- behaviour.
- Like ``catch_warnings`` this context manager is not threadsafe.
- Examples
- --------
- >>> with suppress_warnings() as sup:
- ... sup.filter(DeprecationWarning, "Some text")
- ... sup.filter(module=np.ma.core)
- ... log = sup.record(FutureWarning, "Does this occur?")
- ... command_giving_warnings()
- ... # The FutureWarning was given once, the filtered warnings were
- ... # ignored. All other warnings abide outside settings (may be
- ... # printed/error)
- ... assert_(len(log) == 1)
- ... assert_(len(sup.log) == 1) # also stored in log attribute
- Or as a decorator:
- >>> sup = suppress_warnings()
- >>> sup.filter(module=np.ma.core) # module must match exact
- >>> @sup
- >>> def some_function():
- ... # do something which causes a warning in np.ma.core
- ... pass
- """
- def __init__(self, forwarding_rule="always"):
- self._entered = False
-
- # Suppressions are instance or defined inside one with block:
- self._suppressions = []
-
- if forwarding_rule not in {"always", "module", "once", "location"}:
- raise ValueError("unsupported forwarding rule.")
- self._forwarding_rule = forwarding_rule
-
- def _clear_registries(self):
- if hasattr(warnings, "_filters_mutated"):
- # clearing the registry should not be necessary on new pythons,
- # instead the filters should be mutated.
- warnings._filters_mutated()
- return
- # Simply clear the registry, this should normally be harmless,
- # note that on new pythons it would be invalidated anyway.
- for module in self._tmp_modules:
- if hasattr(module, "__warningregistry__"):
- module.__warningregistry__.clear()
-
- def _filter(self, category=Warning, message="", module=None,
- record=False):
- if record:
- record = [] # The log where to store warnings
- else:
- record = None
- if self._entered:
- if module is None:
- warnings.filterwarnings(
- "always", category=category, message=message)
- else:
- module_regex = module.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=category, message=message,
- module=module_regex)
- self._tmp_modules.add(module)
- self._clear_registries()
-
- self._tmp_suppressions.append(
- (category, message, re.compile(message, re.I), module,
- record))
- else:
- self._suppressions.append(
- (category, message, re.compile(message, re.I), module,
- record))
-
- return record
-
- def filter(self, category=Warning, message="", module=None):
- """
- Add a new suppressing filter or apply it if the state is entered.
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- self._filter(category=category, message=message, module=module,
- record=False)
-
- def record(self, category=Warning, message="", module=None):
- """
- Append a new recording filter or apply it if the state is entered.
- All warnings matching will be appended to the ``log`` attribute.
- Parameters
- ----------
- category : class, optional
- Warning class to filter
- message : string, optional
- Regular expression matching the warning message.
- module : module, optional
- Module to filter for. Note that the module (and its file)
- must match exactly and cannot be a submodule. This may make
- it unreliable for external modules.
- Returns
- -------
- log : list
- A list which will be filled with all matched warnings.
- Notes
- -----
- When added within a context, filters are only added inside
- the context and will be forgotten when the context is exited.
- """
- return self._filter(category=category, message=message,
- module=module, record=True)
-
- def __enter__(self):
- if self._entered:
- raise RuntimeError("cannot enter suppress_warnings twice.")
-
- self._orig_show = warnings.showwarning
- if hasattr(warnings, "_showwarnmsg"):
- self._orig_showmsg = warnings._showwarnmsg
- self._filters = warnings.filters
- warnings.filters = self._filters[:]
-
- self._entered = True
- self._tmp_suppressions = []
- self._tmp_modules = set()
- self._forwarded = set()
-
- self.log = [] # reset global log (no need to keep same list)
-
- for cat, mess, _, mod, log in self._suppressions:
- if log is not None:
- del log[:] # clear the log
- if mod is None:
- warnings.filterwarnings(
- "always", category=cat, message=mess)
- else:
- module_regex = mod.__name__.replace('.', r'\.') + '$'
- warnings.filterwarnings(
- "always", category=cat, message=mess,
- module=module_regex)
- self._tmp_modules.add(mod)
- warnings.showwarning = self._showwarning
- if hasattr(warnings, "_showwarnmsg"):
- warnings._showwarnmsg = self._showwarnmsg
- self._clear_registries()
-
- return self
-
- def __exit__(self, *exc_info):
- warnings.showwarning = self._orig_show
- if hasattr(warnings, "_showwarnmsg"):
- warnings._showwarnmsg = self._orig_showmsg
- warnings.filters = self._filters
- self._clear_registries()
- self._entered = False
- del self._orig_show
- del self._filters
-
- def _showwarnmsg(self, msg):
- self._showwarning(msg.message, msg.category, msg.filename,
- msg.lineno, msg.file, msg.line, use_warnmsg=msg)
-
- def _showwarning(self, message, category, filename, lineno,
- *args, **kwargs):
- use_warnmsg = kwargs.pop("use_warnmsg", None)
- for cat, _, pattern, mod, rec in (
- self._suppressions + self._tmp_suppressions)[::-1]:
- if (issubclass(category, cat) and
- pattern.match(message.args[0]) is not None):
- if mod is None:
- # Message and category match, recorded or ignored
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
- # Use startswith, because warnings strips the c or o from
- # .pyc/.pyo files.
- elif mod.__file__.startswith(filename):
- # The message and module (filename) match
- if rec is not None:
- msg = WarningMessage(message, category, filename,
- lineno, **kwargs)
- self.log.append(msg)
- rec.append(msg)
- return
-
- # There is no filter in place, so pass to the outside handler
- # unless we should only pass it once
- if self._forwarding_rule == "always":
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno,
- *args, **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
- return
-
- if self._forwarding_rule == "once":
- signature = (message.args, category)
- elif self._forwarding_rule == "module":
- signature = (message.args, category, filename)
- elif self._forwarding_rule == "location":
- signature = (message.args, category, filename, lineno)
-
- if signature in self._forwarded:
- return
- self._forwarded.add(signature)
- if use_warnmsg is None:
- self._orig_show(message, category, filename, lineno, *args,
- **kwargs)
- else:
- self._orig_showmsg(use_warnmsg)
-
- def __call__(self, func):
- """
- Function decorator to apply certain suppressions to a whole
- function.
- """
- @wraps(func)
- def new_func(*args, **kwargs):
- with self:
- return func(*args, **kwargs)
-
- return new_func
diff --git a/_randomgen/randomgen/examples/cython/extending_distributions.pyx b/_randomgen/randomgen/examples/cython/extending_distributions.pyx
deleted file mode 100644
index 630d952bf..000000000
--- a/_randomgen/randomgen/examples/cython/extending_distributions.pyx
+++ /dev/null
@@ -1,48 +0,0 @@
-import numpy as np
-cimport numpy as np
-cimport cython
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from randomgen.common cimport *
-from randomgen.distributions cimport random_gauss_zig
-from randomgen.xoroshiro128 import Xoroshiro128
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def normals_zig(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef brng_t *rng
- cdef const char *capsule_name = "BasicRNG"
- cdef double[::1] random_values
-
- x = Xoroshiro128()
- capsule = x.capsule
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- rng = <brng_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- for i in range(n):
- random_values[i] = random_gauss_zig(rng)
- randoms = np.asarray(random_values)
- return randoms
-
-@cython.boundscheck(False)
-@cython.wraparound(False)
-def uniforms(Py_ssize_t n):
- cdef Py_ssize_t i
- cdef brng_t *rng
- cdef const char *capsule_name = "BasicRNG"
- cdef double[::1] random_values
-
- x = Xoroshiro128()
- capsule = x.capsule
- # Optional check that the capsule if from a Basic RNG
- if not PyCapsule_IsValid(capsule, capsule_name):
- raise ValueError("Invalid pointer to anon_func_state")
- # Cast the pointer
- rng = <brng_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- for i in range(n):
- # Call the function
- random_values[i] = rng.next_double(rng.state)
- randoms = np.asarray(random_values)
- return randoms \ No newline at end of file
diff --git a/_randomgen/randomgen/legacy/__init__.py b/_randomgen/randomgen/legacy/__init__.py
deleted file mode 100644
index b59b221b4..000000000
--- a/_randomgen/randomgen/legacy/__init__.py
+++ /dev/null
@@ -1,3 +0,0 @@
-from randomgen.legacy.legacy import LegacyGenerator
-
-__all__ = ['LegacyGenerator']
diff --git a/_randomgen/randomgen/legacy/legacy.pyx b/_randomgen/randomgen/legacy/legacy.pyx
deleted file mode 100644
index 33198a624..000000000
--- a/_randomgen/randomgen/legacy/legacy.pyx
+++ /dev/null
@@ -1,1988 +0,0 @@
-#!python
-#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
-from __future__ import absolute_import
-
-import warnings
-import operator
-
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from libc.stdlib cimport malloc, free
-cimport numpy as np
-import numpy as np
-cimport cython
-
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
-
-from randomgen.common cimport cont, disc, CONS_NONE, CONS_POSITIVE, CONS_NON_NEGATIVE, CONS_BOUNDED_0_1
-from randomgen.distributions cimport brng_t
-from randomgen.legacy.legacy_distributions cimport *
-from randomgen.xoroshiro128 import Xoroshiro128
-import randomgen.pickle
-
-np.import_array()
-
-cdef class LegacyGenerator:
- """
- LegacyGenerator(brng=None)
-
- Container providing legacy generators.
-
- ``LegacyGenerator`` exposes a number of methods for generating random
- numbers for a set of distributions where the method used to produce random
- samples has changed. Three core generators have changed: normal, exponential
- and gamma. These have been replaced by faster Ziggurat-based methods in
- ``RandomGenerator``. ``_LegacyGenerator`` retains the slower methods
- to produce samples from these distributions as well as from distributions
- that depend on these such as the Chi-square, power or Weibull.
-
- **No Compatibility Guarantee**
-
- ``LegacyGenerator`` is evolving and so it isn't possible to provide a
- compatibility guarantee like NumPy does. In particular, better algorithms
- have already been added. This will change once ``RandomGenerator``
- stabilizes.
-
- Parameters
- ----------
- brng : Basic RNG, optional
- Basic RNG to use as the core generator. If none is provided, uses
- Xoroshiro128.
-
- Examples
- --------
- Exactly reproducing a NumPy stream requires both a ``RandomGenerator``
- and a ``LegacyGenerator``. These must share a common ``MT19937`` basic
- RNG. Functions that are available in LegacyGenerator must be called
- from ``LegacyGenerator``, and other functions must be called from
- ``RandomGenerator``.
-
- >>> from randomgen import RandomGenerator, MT19937
- >>> from randomgen.legacy import LegacyGenerator
- >>> mt = MT19937(12345)
- >>> lg = LegacyGenerator(mt)
- >>> rg = RandomGenerator(mt)
- >>> x = lg.standard_normal(10)
- >>> rg.shuffle(x)
- >>> x[0]
- 0.09290787674371767
- >>> lg.standard_exponential()
- 1.6465621229906502
-
- The equivalent commands from NumPy produce identical output.
-
- >>> from numpy.random import RandomState
- >>> rs = RandomState(12345)
- >>> x = rs.standard_normal(10)
- >>> rs.shuffle(x)
- >>> x[0]
- 0.09290787674371767
- >>> rs.standard_exponential()
- 1.6465621229906502
- """
- cdef public object _basicrng
- cdef brng_t *_brng
- cdef aug_brng_t *_aug_state
- cdef object lock
-
- def __init__(self, brng=None):
- if brng is None:
- brng = Xoroshiro128()
- self._basicrng = brng
-
- capsule = brng.capsule
- cdef const char *name = "BasicRNG"
- if not PyCapsule_IsValid(capsule, name):
- raise ValueError("Invalid brng. The brng must be instantized.")
- self._brng = <brng_t *> PyCapsule_GetPointer(capsule, name)
- self._aug_state = <aug_brng_t *>malloc(sizeof(aug_brng_t))
- self._aug_state.basicrng = self._brng
- self._reset_gauss()
- self.lock = Lock()
-
- def __dealloc__(self):
- free(self._aug_state)
-
- def __repr__(self):
- return self.__str__() + ' at 0x{:X}'.format(id(self))
-
- def __str__(self):
- return self.__class__.__name__ + '(' + self._basicrng.__class__.__name__ + ')'
-
- # Pickling support:
- def __getstate__(self):
- return self.state
-
- def __setstate__(self, state):
- self.state = state
-
- def __reduce__(self):
- return (randomgen.pickle.__generator_ctor,
- (self.state['brng'],),
- self.state)
-
- cdef _reset_gauss(self):
- self._aug_state.has_gauss = 0
- self._aug_state.gauss = 0.0
-
- def seed(self, *args, **kwargs):
- """
- Reseed the basic RNG.
-
- Parameters depend on the basic RNG used.
-
- Notes
- -----
- Arguments are directly passed to the basic RNG. This is a convenience
- function.
-
- The best method to access seed is to directly use a basic RNG instance.
- This example demonstrates this best practice.
-
- >>> from randomgen import MT19937
- >>> from randomgen.legacy import LegacyGenerator
- >>> brng = MT19937(123456789)
- >>> lg = brng.generator
- >>> brng.seed(987654321)
-
- The method used to create the generator is not important.
-
- >>> brng = MT19937(123456789)
- >>> lg = LegacyGenerator(brng)
- >>> brng.seed(987654321)
-
- These best practice examples are equivalent to
-
- >>> lg = LegacyGenerator(MT19937(123456789))
- >>> lg.seed(987654321)
- """
-
- # TODO: Should this remain
- self._basicrng.seed(*args, **kwargs)
- self._reset_gauss()
- return self
-
- @property
- def state(self):
- """
- Get or set the augmented state
-
- Returns the basic RNGs state as well as two values added to track
- normal generation using the Polar (Box-Muller-like) method.
-
- Returns
- -------
- state : dict
- Dictionary containing the information required to describe the
- state of the Basic RNG with two additional fields, gauss and
- has_gauss, required to store generated Polar transformation
- (Box-Muller-like) normals.
- """
- st = self._basicrng.state
- st['has_gauss'] = self._aug_state.has_gauss
- st['gauss'] = self._aug_state.gauss
- return st
-
- @state.setter
- def state(self, value):
- if isinstance(value, tuple):
- if value[0] != 'MT19937':
- raise ValueError('tuple only supported for MT19937')
- st = {'brng': value[0],
- 'state': {'key': value[1], 'pos': value[2]}}
- if len(value) > 3:
- st['has_gauss'] = value[3]
- st['gauss'] = value[4]
- value = st
- self._aug_state.gauss = value.get('gauss', 0.0)
- self._aug_state.has_gauss = value.get('has_gauss', 0)
- self._basicrng.state = value
-
- def beta(self, a, b, size=None):
- """
- beta(a, b, size=None)
-
- Draw samples from a Beta distribution.
-
- The Beta distribution is a special case of the Dirichlet distribution,
- and is related to the Gamma distribution. It has the probability
- distribution function
-
- .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
- (1 - x)^{\\beta - 1},
-
- where the normalization, B, is the beta function,
-
- .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
- (1 - t)^{\\beta - 1} dt.
-
- It is often seen in Bayesian inference and order statistics.
-
- Parameters
- ----------
- a : float or array_like of floats
- Alpha, positive (>0).
- b : float or array_like of floats
- Beta, positive (>0).
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` and ``b`` are both scalars.
- Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized beta distribution.
-
- """
- return cont(&legacy_beta, self._aug_state, size, self.lock, 2,
- a, 'a', CONS_POSITIVE,
- b, 'b', CONS_POSITIVE,
- 0.0, '', CONS_NONE, None)
-
- def exponential(self, scale=1.0, size=None):
- """
- exponential(scale=1.0, size=None)
-
- Draw samples from an exponential distribution.
-
- Its probability density function is
-
- .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
-
- for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
- which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
- The rate parameter is an alternative, widely used parameterization
- of the exponential distribution [3]_.
-
- The exponential distribution is a continuous analogue of the
- geometric distribution. It describes many common situations, such as
- the size of raindrops measured over many rainstorms [1]_, or the time
- between page requests to Wikipedia [2]_.
-
- Parameters
- ----------
- scale : float or array_like of floats
- The scale parameter, :math:`\\beta = 1/\\lambda`.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``scale`` is a scalar. Otherwise,
- ``np.array(scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized exponential distribution.
-
- References
- ----------
- .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
- Random Signal Principles", 4th ed, 2001, p. 57.
- .. [2] Wikipedia, "Poisson process",
- https://en.wikipedia.org/wiki/Poisson_process
- .. [3] Wikipedia, "Exponential distribution",
- https://en.wikipedia.org/wiki/Exponential_distribution
-
- """
- return cont(&legacy_exponential, self._aug_state, size, self.lock, 1,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE,
- None)
-
- def standard_exponential(self, size=None):
- """
- standard_exponential(size=None)
-
- Draw samples from the standard exponential distribution.
-
- `standard_exponential` is identical to the exponential distribution
- with a scale parameter of 1.
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- out : float or ndarray
- Drawn samples.
-
- Examples
- --------
- Output a 3x8000 array:
-
- >>> n = randomgen.generator.standard_exponential((3, 8000))
- """
- return cont(&legacy_standard_exponential, self._aug_state, size, self.lock, 0,
- None, None, CONS_NONE,
- None, None, CONS_NONE,
- None, None, CONS_NONE,
- None)
-
- def randn(self, *args):
- """
- randn(d0, d1, ..., dn)
-
- Return a sample (or samples) from the "standard normal" distribution.
-
- If positive, int_like or int-convertible arguments are provided,
- `randn` generates an array of shape ``(d0, d1, ..., dn)``, filled
- with random floats sampled from a univariate "normal" (Gaussian)
- distribution of mean 0 and variance 1 (if any of the :math:`d_i` are
- floats, they are first converted to integers by truncation). A single
- float randomly sampled from the distribution is returned if no
- argument is provided.
-
- This is a convenience function. If you want an interface that takes a
- tuple as the first argument, use `standard_normal` instead.
-
- Parameters
- ----------
- d0, d1, ..., dn : int, optional
- The dimensions of the returned array, should be all positive.
- If no argument is given a single Python float is returned.
-
- Returns
- -------
- Z : ndarray or float
- A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
- the standard normal distribution, or a single such float if
- no parameters were supplied.
-
- See Also
- --------
- standard_normal : Similar, but takes a tuple as its argument.
-
- Notes
- -----
- For random samples from :math:`N(\\mu, \\sigma^2)`, use:
-
- ``sigma * randomgen.generator.randn(...) + mu``
-
- Examples
- --------
- >>> randomgen.generator.randn()
- 2.1923875335537315 #random
-
- Two-by-four array of samples from N(3, 6.25):
-
- >>> 2.5 * randomgen.generator.randn(2, 4) + 3
- array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], #random
- [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) #random
-
- """
- if len(args) == 0:
- return self.standard_normal()
- else:
- return self.standard_normal(size=args)
-
- # Complicated, continuous distributions:
- def standard_normal(self, size=None):
- """
- standard_normal(size=None)
-
- Draw samples from a standard Normal distribution (mean=0, stdev=1).
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- out : float or ndarray
- Drawn samples.
-
- Examples
- --------
- >>> s = randomgen.generator.standard_normal(8000)
- >>> s
- array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, #random
- -0.38672696, -0.4685006 ]) #random
- >>> s.shape
- (8000,)
- >>> s = randomgen.generator.standard_normal(size=(3, 4, 2))
- >>> s.shape
- (3, 4, 2)
-
- """
- return cont(&legacy_gauss, self._aug_state, size, self.lock, 0,
- None, None, CONS_NONE,
- None, None, CONS_NONE,
- None, None, CONS_NONE,
- None)
-
- def normal(self, loc=0.0, scale=1.0, size=None):
- """
- normal(loc=0.0, scale=1.0, size=None)
-
- Draw random samples from a normal (Gaussian) distribution.
-
- The probability density function of the normal distribution, first
- derived by De Moivre and 200 years later by both Gauss and Laplace
- independently [2]_, is often called the bell curve because of
- its characteristic shape (see the example below).
-
- The normal distributions occurs often in nature. For example, it
- describes the commonly occurring distribution of samples influenced
- by a large number of tiny, random disturbances, each with its own
- unique distribution [2]_.
-
- Parameters
- ----------
- loc : float or array_like of floats
- Mean ("centre") of the distribution.
- scale : float or array_like of floats
- Standard deviation (spread or "width") of the distribution.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``loc`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized normal distribution.
-
- See Also
- --------
- scipy.stats.norm : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Gaussian distribution is
-
- .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}
- e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
-
- where :math:`\\mu` is the mean and :math:`\\sigma` the standard
- deviation. The square of the standard deviation, :math:`\\sigma^2`,
- is called the variance.
-
- The function has its peak at the mean, and its "spread" increases with
- the standard deviation (the function reaches 0.607 times its maximum at
- :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
- `numpy.random.normal` is more likely to return samples lying close to
- the mean, rather than those far away.
-
- References
- ----------
- .. [1] Wikipedia, "Normal distribution",
- https://en.wikipedia.org/wiki/Normal_distribution
- .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
- Random Variables and Random Signal Principles", 4th ed., 2001,
- pp. 51, 51, 125.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> mu, sigma = 0, 0.1 # mean and standard deviation
- >>> s = randomgen.generator.normal(mu, sigma, 1000)
-
- Verify the mean and the variance:
-
- >>> abs(mu - np.mean(s)) < 0.01
- True
-
- >>> abs(sigma - np.std(s, ddof=1)) < 0.01
- True
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 30, density=True)
- >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
- ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
- ... linewidth=2, color='r')
- >>> plt.show()
-
- """
- return cont(&legacy_normal, self._aug_state, size, self.lock, 2,
- loc, '', CONS_NONE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- None)
-
- def standard_gamma(self, shape, size=None):
- """
- standard_gamma(shape, size=None)
-
- Draw samples from a standard Gamma distribution.
-
- Samples are drawn from a Gamma distribution with specified parameters,
- shape (sometimes designated "k") and scale=1.
-
- Parameters
- ----------
- shape : float or array_like of floats
- Parameter, should be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``shape`` is a scalar. Otherwise,
- ``np.array(shape).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized standard gamma distribution.
-
- See Also
- --------
- scipy.stats.gamma : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Gamma distribution is
-
- .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
-
- where :math:`k` is the shape and :math:`\\theta` the scale,
- and :math:`\\Gamma` is the Gamma function.
-
- The Gamma distribution is often used to model the times to failure of
- electronic components, and arises naturally in processes for which the
- waiting times between Poisson distributed events are relevant.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/GammaDistribution.html
- .. [2] Wikipedia, "Gamma distribution",
- https://en.wikipedia.org/wiki/Gamma_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> shape, scale = 2., 1. # mean and width
- >>> s = randomgen.generator.standard_gamma(shape, 1000000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> import scipy.special as sps
- >>> count, bins, ignored = plt.hist(s, 50, density=True)
- >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\
- ... (sps.gamma(shape) * scale**shape))
- >>> plt.plot(bins, y, linewidth=2, color='r')
- >>> plt.show()
- """
- return cont(&legacy_standard_gamma, self._aug_state, size, self.lock, 1,
- shape, 'shape', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE,
- None)
-
- def gamma(self, shape, scale=1.0, size=None):
- """
- gamma(shape, scale=1.0, size=None)
-
- Draw samples from a Gamma distribution.
-
- Samples are drawn from a Gamma distribution with specified parameters,
- `shape` (sometimes designated "k") and `scale` (sometimes designated
- "theta"), where both parameters are > 0.
-
- Parameters
- ----------
- shape : float or array_like of floats
- The shape of the gamma distribution. Should be greater than zero.
- scale : float or array_like of floats, optional
- The scale of the gamma distribution. Should be greater than zero.
- Default is equal to 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``shape`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized gamma distribution.
-
- See Also
- --------
- scipy.stats.gamma : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Gamma distribution is
-
- .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
-
- where :math:`k` is the shape and :math:`\\theta` the scale,
- and :math:`\\Gamma` is the Gamma function.
-
- The Gamma distribution is often used to model the times to failure of
- electronic components, and arises naturally in processes for which the
- waiting times between Poisson distributed events are relevant.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/GammaDistribution.html
- .. [2] Wikipedia, "Gamma distribution",
- https://en.wikipedia.org/wiki/Gamma_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> shape, scale = 2., 2. # mean and dispersion
- >>> s = randomgen.generator.gamma(shape, scale, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> import scipy.special as sps
- >>> count, bins, ignored = plt.hist(s, 50, density=True)
- >>> y = bins**(shape-1)*(np.exp(-bins/scale) /
- ... (sps.gamma(shape)*scale**shape))
- >>> plt.plot(bins, y, linewidth=2, color='r')
- >>> plt.show()
-
- """
- return cont(&legacy_gamma, self._aug_state, size, self.lock, 2,
- shape, 'shape', CONS_NON_NEGATIVE,
- scale, 'scale', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def f(self, dfnum, dfden, size=None):
- """
- f(dfnum, dfden, size=None)
-
- Draw samples from an F distribution.
-
- Samples are drawn from an F distribution with specified parameters,
- `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
- freedom in denominator), where both parameters should be greater than
- zero.
-
- The random variate of the F distribution (also known as the
- Fisher distribution) is a continuous probability distribution
- that arises in ANOVA tests, and is the ratio of two chi-square
- variates.
-
- Parameters
- ----------
- dfnum : int or array_like of ints
- Degrees of freedom in numerator. Should be greater than zero.
- dfden : int or array_like of ints
- Degrees of freedom in denominator. Should be greater than zero.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
- Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Fisher distribution.
-
- See Also
- --------
- scipy.stats.f : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The F statistic is used to compare in-group variances to between-group
- variances. Calculating the distribution depends on the sampling, and
- so it is a function of the respective degrees of freedom in the
- problem. The variable `dfnum` is the number of samples minus one, the
- between-groups degrees of freedom, while `dfden` is the within-groups
- degrees of freedom, the sum of the number of samples in each group
- minus the number of groups.
-
- References
- ----------
- .. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
- Fifth Edition, 2002.
- .. [2] Wikipedia, "F-distribution",
- https://en.wikipedia.org/wiki/F-distribution
-
- Examples
- --------
- An example from Glantz[1], pp 47-40:
-
- Two groups, children of diabetics (25 people) and children from people
- without diabetes (25 controls). Fasting blood glucose was measured,
- case group had a mean value of 86.1, controls had a mean value of
- 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
- data consistent with the null hypothesis that the parents diabetic
- status does not affect their children's blood glucose levels?
- Calculating the F statistic from the data gives a value of 36.01.
-
- Draw samples from the distribution:
-
- >>> dfnum = 1. # between group degrees of freedom
- >>> dfden = 48. # within groups degrees of freedom
- >>> s = randomgen.generator.f(dfnum, dfden, 1000)
-
- The lower bound for the top 1% of the samples is :
-
- >>> np.sort(s)[-10]
- 7.61988120985 # random
-
- So there is about a 1% chance that the F statistic will exceed 7.62,
- the measured value is 36, so the null hypothesis is rejected at the 1%
- level.
-
- """
- return cont(&legacy_f, self._aug_state, size, self.lock, 2,
- dfnum, 'dfnum', CONS_POSITIVE,
- dfden, 'dfden', CONS_POSITIVE,
- 0.0, '', CONS_NONE, None)
-
- def noncentral_f(self, dfnum, dfden, nonc, size=None):
- """
- noncentral_f(dfnum, dfden, nonc, size=None)
-
- Draw samples from the noncentral F distribution.
-
- Samples are drawn from an F distribution with specified parameters,
- `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
- freedom in denominator), where both parameters > 1.
- `nonc` is the non-centrality parameter.
-
- Parameters
- ----------
- dfnum : int or array_like of ints
- Parameter, should be > 1.
- dfden : int or array_like of ints
- Parameter, should be > 1.
- nonc : float or array_like of floats
- Parameter, should be >= 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``dfnum``, ``dfden``, and ``nonc``
- are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``
- samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized noncentral Fisher distribution.
-
- Notes
- -----
- When calculating the power of an experiment (power = probability of
- rejecting the null hypothesis when a specific alternative is true) the
- non-central F statistic becomes important. When the null hypothesis is
- true, the F statistic follows a central F distribution. When the null
- hypothesis is not true, then it follows a non-central F statistic.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Noncentral F-Distribution."
- From MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/NoncentralF-Distribution.html
- .. [2] Wikipedia, "Noncentral F-distribution",
- https://en.wikipedia.org/wiki/Noncentral_F-distribution
-
- Examples
- --------
- In a study, testing for a specific alternative to the null hypothesis
- requires use of the Noncentral F distribution. We need to calculate the
- area in the tail of the distribution that exceeds the value of the F
- distribution for the null hypothesis. We'll plot the two probability
- distributions for comparison.
-
- >>> dfnum = 3 # between group deg of freedom
- >>> dfden = 20 # within groups degrees of freedom
- >>> nonc = 3.0
- >>> nc_vals = randomgen.generator.noncentral_f(dfnum, dfden, nonc, 1000000)
- >>> NF = np.histogram(nc_vals, bins=50, density=True)
- >>> c_vals = randomgen.generator.f(dfnum, dfden, 1000000)
- >>> F = np.histogram(c_vals, bins=50, density=True)
- >>> import matplotlib.pyplot as plt
- >>> plt.plot(F[1][1:], F[0])
- >>> plt.plot(NF[1][1:], NF[0])
- >>> plt.show()
-
- """
- return cont(&legacy_noncentral_f, self._aug_state, size, self.lock, 3,
- dfnum, 'dfnum', CONS_POSITIVE,
- dfden, 'dfden', CONS_POSITIVE,
- nonc, 'nonc', CONS_NON_NEGATIVE, None)
-
- def chisquare(self, df, size=None):
- """
- chisquare(df, size=None)
-
- Draw samples from a chi-square distribution.
-
- When `df` independent random variables, each with standard normal
- distributions (mean 0, variance 1), are squared and summed, the
- resulting distribution is chi-square (see Notes). This distribution
- is often used in hypothesis testing.
-
- Parameters
- ----------
- df : int or array_like of ints
- Number of degrees of freedom.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``df`` is a scalar. Otherwise,
- ``np.array(df).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized chi-square distribution.
-
- Raises
- ------
- ValueError
- When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
- is given.
-
- Notes
- -----
- The variable obtained by summing the squares of `df` independent,
- standard normally distributed random variables:
-
- .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i
-
- is chi-square distributed, denoted
-
- .. math:: Q \\sim \\chi^2_k.
-
- The probability density function of the chi-squared distribution is
-
- .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}
- x^{k/2 - 1} e^{-x/2},
-
- where :math:`\\Gamma` is the gamma function,
-
- .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.
-
- References
- ----------
- .. [1] NIST "Engineering Statistics Handbook"
- https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
-
- Examples
- --------
- >>> randomgen.generator.chisquare(2,4)
- array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
-
- """
- return cont(&legacy_chisquare, self._aug_state, size, self.lock, 1,
- df, 'df', CONS_POSITIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def noncentral_chisquare(self, df, nonc, size=None):
- """
- noncentral_chisquare(df, nonc, size=None)
-
- Draw samples from a noncentral chi-square distribution.
-
- The noncentral :math:`\\chi^2` distribution is a generalisation of
- the :math:`\\chi^2` distribution.
-
- Parameters
- ----------
- df : int or array_like of ints
- Degrees of freedom, should be > 0 as of NumPy 1.10.0,
- should be > 1 for earlier versions.
- nonc : float or array_like of floats
- Non-centrality, should be non-negative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``df`` and ``nonc`` are both scalars.
- Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized noncentral chi-square distribution.
-
- Notes
- -----
- The probability density function for the noncentral Chi-square
- distribution is
-
- .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
- \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
- \\P_{Y_{df+2i}}(x),
-
- where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
-
- References
- ----------
- .. [1] Wikipedia, "Noncentral chi-square distribution"
- https://en.wikipedia.org/wiki/Noncentral_chi-square_distribution
-
- Examples
- --------
- Draw values from the distribution and plot the histogram
-
- >>> import matplotlib.pyplot as plt
- >>> values = plt.hist(randomgen.generator.noncentral_chisquare(3, 20, 100000),
- ... bins=200, density=True)
- >>> plt.show()
-
- Draw values from a noncentral chisquare with very small noncentrality,
- and compare to a chisquare.
-
- >>> plt.figure()
- >>> values = plt.hist(randomgen.generator.noncentral_chisquare(3, .0000001, 100000),
- ... bins=np.arange(0., 25, .1), density=True)
- >>> values2 = plt.hist(randomgen.generator.chisquare(3, 100000),
- ... bins=np.arange(0., 25, .1), density=True)
- >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
- >>> plt.show()
-
- Demonstrate how large values of non-centrality lead to a more symmetric
- distribution.
-
- >>> plt.figure()
- >>> values = plt.hist(randomgen.generator.noncentral_chisquare(3, 20, 100000),
- ... bins=200, density=True)
- >>> plt.show()
-
- """
- return cont(&legacy_noncentral_chisquare, self._aug_state, size, self.lock, 2,
- df, 'df', CONS_POSITIVE,
- nonc, 'nonc', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def standard_cauchy(self, size=None):
- """
- standard_cauchy(size=None)
-
- Draw samples from a standard Cauchy distribution with mode = 0.
-
- Also known as the Lorentz distribution.
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- samples : ndarray or scalar
- The drawn samples.
-
- Notes
- -----
- The probability density function for the full Cauchy distribution is
-
- .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+
- (\\frac{x-x_0}{\\gamma})^2 \\bigr] }
-
- and the Standard Cauchy distribution just sets :math:`x_0=0` and
- :math:`\\gamma=1`
-
- The Cauchy distribution arises in the solution to the driven harmonic
- oscillator problem, and also describes spectral line broadening. It
- also describes the distribution of values at which a line tilted at
- a random angle will cut the x axis.
-
- When studying hypothesis tests that assume normality, seeing how the
- tests perform on data from a Cauchy distribution is a good indicator of
- their sensitivity to a heavy-tailed distribution, since the Cauchy looks
- very much like a Gaussian distribution, but with heavier tails.
-
- References
- ----------
- .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
- Distribution",
- http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
- .. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
- Wolfram Web Resource.
- http://mathworld.wolfram.com/CauchyDistribution.html
- .. [3] Wikipedia, "Cauchy distribution"
- https://en.wikipedia.org/wiki/Cauchy_distribution
-
- Examples
- --------
- Draw samples and plot the distribution:
-
- >>> s = randomgen.generator.standard_cauchy(1000000)
- >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
- >>> import matplotlib.pyplot as plt
- >>> plt.hist(s, bins=100)
- >>> plt.show()
-
- """
- return cont(&legacy_standard_cauchy, self._aug_state, size, self.lock, 0,
- 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
-
- def standard_t(self, df, size=None):
- """
- standard_t(df, size=None)
-
- Draw samples from a standard Student's t distribution with `df` degrees
- of freedom.
-
- A special case of the hyperbolic distribution. As `df` gets
- large, the result resembles that of the standard normal
- distribution (`standard_normal`).
-
- Parameters
- ----------
- df : int or array_like of ints
- Degrees of freedom, should be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``df`` is a scalar. Otherwise,
- ``np.array(df).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized standard Student's t distribution.
-
- Notes
- -----
- The probability density function for the t distribution is
-
- .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}
- \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}
-
- The t test is based on an assumption that the data come from a
- Normal distribution. The t test provides a way to test whether
- the sample mean (that is the mean calculated from the data) is
- a good estimate of the true mean.
-
- The derivation of the t-distribution was first published in
- 1908 by William Gosset while working for the Guinness Brewery
- in Dublin. Due to proprietary issues, he had to publish under
- a pseudonym, and so he used the name Student.
-
- References
- ----------
- .. [1] Dalgaard, Peter, "Introductory Statistics With R",
- Springer, 2002.
- .. [2] Wikipedia, "Student's t-distribution"
- https://en.wikipedia.org/wiki/Student's_t-distribution
-
- Examples
- --------
- From Dalgaard page 83 [1]_, suppose the daily energy intake for 11
- women in kilojoules (kJ) is:
-
- >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\
- ... 7515, 8230, 8770])
-
- Does their energy intake deviate systematically from the recommended
- value of 7725 kJ?
-
- We have 10 degrees of freedom, so is the sample mean within 95% of the
- recommended value?
-
- >>> s = randomgen.generator.standard_t(10, size=100000)
- >>> np.mean(intake)
- 6753.636363636364
- >>> intake.std(ddof=1)
- 1142.1232221373727
-
- Calculate the t statistic, setting the ddof parameter to the unbiased
- value so the divisor in the standard deviation will be degrees of
- freedom, N-1.
-
- >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
- >>> import matplotlib.pyplot as plt
- >>> h = plt.hist(s, bins=100, density=True)
-
- For a one-sided t-test, how far out in the distribution does the t
- statistic appear?
-
- >>> np.sum(s<t) / float(len(s))
- 0.0090699999999999999 #random
-
- So the p-value is about 0.009, which says the null hypothesis has a
- probability of about 99% of being true.
-
- """
- return cont(&legacy_standard_t, self._aug_state, size, self.lock, 1,
- df, 'df', CONS_POSITIVE,
- 0, '', CONS_NONE,
- 0, '', CONS_NONE,
- None)
-
- def pareto(self, a, size=None):
- """
- pareto(a, size=None)
-
- Draw samples from a Pareto II or Lomax distribution with
- specified shape.
-
- The Lomax or Pareto II distribution is a shifted Pareto
- distribution. The classical Pareto distribution can be
- obtained from the Lomax distribution by adding 1 and
- multiplying by the scale parameter ``m`` (see Notes). The
- smallest value of the Lomax distribution is zero while for the
- classical Pareto distribution it is ``mu``, where the standard
- Pareto distribution has location ``mu = 1``. Lomax can also
- be considered as a simplified version of the Generalized
- Pareto distribution (available in SciPy), with the scale set
- to one and the location set to zero.
-
- The Pareto distribution must be greater than zero, and is
- unbounded above. It is also known as the "80-20 rule". In
- this distribution, 80 percent of the weights are in the lowest
- 20 percent of the range, while the other 20 percent fill the
- remaining 80 percent of the range.
-
- Parameters
- ----------
- a : float or array_like of floats
- Shape of the distribution. Should be greater than zero.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Pareto distribution.
-
- See Also
- --------
- scipy.stats.lomax : probability density function, distribution or
- cumulative density function, etc.
- scipy.stats.genpareto : probability density function, distribution or
- cumulative density function, etc.
-
- Notes
- -----
- The probability density for the Pareto distribution is
-
- .. math:: p(x) = \\frac{am^a}{x^{a+1}}
-
- where :math:`a` is the shape and :math:`m` the scale.
-
- The Pareto distribution, named after the Italian economist
- Vilfredo Pareto, is a power law probability distribution
- useful in many real world problems. Outside the field of
- economics it is generally referred to as the Bradford
- distribution. Pareto developed the distribution to describe
- the distribution of wealth in an economy. It has also found
- use in insurance, web page access statistics, oil field sizes,
- and many other problems, including the download frequency for
- projects in Sourceforge [1]_. It is one of the so-called
- "fat-tailed" distributions.
-
-
- References
- ----------
- .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
- Sourceforge projects.
- .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
- .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
- Values, Birkhauser Verlag, Basel, pp 23-30.
- .. [4] Wikipedia, "Pareto distribution",
- https://en.wikipedia.org/wiki/Pareto_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> a, m = 3., 2. # shape and mode
- >>> s = (randomgen.generator.pareto(a, 1000) + 1) * m
-
- Display the histogram of the samples, along with the probability
- density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, _ = plt.hist(s, 100, density=True)
- >>> fit = a*m**a / bins**(a+1)
- >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
- >>> plt.show()
-
- """
- return cont(&legacy_pareto, self._aug_state, size, self.lock, 1,
- a, 'a', CONS_POSITIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def weibull(self, a, size=None):
- """
- weibull(a, size=None)
-
- Draw samples from a Weibull distribution.
-
- Draw samples from a 1-parameter Weibull distribution with the given
- shape parameter `a`.
-
- .. math:: X = (-ln(U))^{1/a}
-
- Here, U is drawn from the uniform distribution over (0,1].
-
- The more common 2-parameter Weibull, including a scale parameter
- :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
-
- Parameters
- ----------
- a : float or array_like of floats
- Shape parameter of the distribution. Must be nonnegative.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Weibull distribution.
-
- See Also
- --------
- scipy.stats.weibull_max
- scipy.stats.weibull_min
- scipy.stats.genextreme
- gumbel
-
- Notes
- -----
- The Weibull (or Type III asymptotic extreme value distribution
- for smallest values, SEV Type III, or Rosin-Rammler
- distribution) is one of a class of Generalized Extreme Value
- (GEV) distributions used in modeling extreme value problems.
- This class includes the Gumbel and Frechet distributions.
-
- The probability density for the Weibull distribution is
-
- .. math:: p(x) = \\frac{a}
- {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},
-
- where :math:`a` is the shape and :math:`\\lambda` the scale.
-
- The function has its peak (the mode) at
- :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.
-
- When ``a = 1``, the Weibull distribution reduces to the exponential
- distribution.
-
- References
- ----------
- .. [1] Waloddi Weibull, Royal Technical University, Stockholm,
- 1939 "A Statistical Theory Of The Strength Of Materials",
- Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
- Generalstabens Litografiska Anstalts Forlag, Stockholm.
- .. [2] Waloddi Weibull, "A Statistical Distribution Function of
- Wide Applicability", Journal Of Applied Mechanics ASME Paper
- 1951.
- .. [3] Wikipedia, "Weibull distribution",
- https://en.wikipedia.org/wiki/Weibull_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> a = 5. # shape
- >>> s = randomgen.generator.weibull(a, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> x = np.arange(1,100.)/50.
- >>> def weib(x,n,a):
- ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
-
- >>> count, bins, ignored = plt.hist(randomgen.generator.weibull(5.,1000))
- >>> x = np.arange(1,100.)/50.
- >>> scale = count.max()/weib(x, 1., 5.).max()
- >>> plt.plot(x, weib(x, 1., 5.)*scale)
- >>> plt.show()
-
- """
- return cont(&legacy_weibull, self._aug_state, size, self.lock, 1,
- a, 'a', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def power(self, a, size=None):
- """
- power(a, size=None)
-
- Draws samples in [0, 1] from a power distribution with positive
- exponent a - 1.
-
- Also known as the power function distribution.
-
- Parameters
- ----------
- a : float or array_like of floats
- Parameter of the distribution. Should be greater than zero.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``a`` is a scalar. Otherwise,
- ``np.array(a).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized power distribution.
-
- Raises
- ------
- ValueError
- If a < 1.
-
- Notes
- -----
- The probability density function is
-
- .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.
-
- The power function distribution is just the inverse of the Pareto
- distribution. It may also be seen as a special case of the Beta
- distribution.
-
- It is used, for example, in modeling the over-reporting of insurance
- claims.
-
- References
- ----------
- .. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
- in economics and actuarial sciences", Wiley, 2003.
- .. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
- Dataplot Reference Manual, Volume 2: Let Subcommands and Library
- Functions", National Institute of Standards and Technology
- Handbook Series, June 2003.
- http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> a = 5. # shape
- >>> samples = 1000
- >>> s = randomgen.generator.power(a, samples)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, bins=30)
- >>> x = np.linspace(0, 1, 100)
- >>> y = a*x**(a-1.)
- >>> normed_y = samples*np.diff(bins)[0]*y
- >>> plt.plot(x, normed_y)
- >>> plt.show()
-
- Compare the power function distribution to the inverse of the Pareto.
-
- >>> from scipy import stats
- >>> rvs = randomgen.generator.power(5, 1000000)
- >>> rvsp = randomgen.generator.pareto(5, 1000000)
- >>> xx = np.linspace(0,1,100)
- >>> powpdf = stats.powerlaw.pdf(xx,5)
-
- >>> plt.figure()
- >>> plt.hist(rvs, bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-')
- >>> plt.title('randomgen.generator.power(5)')
-
- >>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-')
- >>> plt.title('inverse of 1 + randomgen.generator.pareto(5)')
-
- >>> plt.figure()
- >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
- >>> plt.plot(xx,powpdf,'r-')
- >>> plt.title('inverse of stats.pareto(5)')
-
- """
- return cont(&legacy_power, self._aug_state, size, self.lock, 1,
- a, 'a', CONS_POSITIVE,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE, None)
-
- def lognormal(self, mean=0.0, sigma=1.0, size=None):
- """
- lognormal(mean=0.0, sigma=1.0, size=None)
-
- Draw samples from a log-normal distribution.
-
- Draw samples from a log-normal distribution with specified mean,
- standard deviation, and array shape. Note that the mean and standard
- deviation are not the values for the distribution itself, but of the
- underlying normal distribution it is derived from.
-
- Parameters
- ----------
- mean : float or array_like of floats, optional
- Mean value of the underlying normal distribution. Default is 0.
- sigma : float or array_like of floats, optional
- Standard deviation of the underlying normal distribution. Should
- be greater than zero. Default is 1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``mean`` and ``sigma`` are both scalars.
- Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized log-normal distribution.
-
- See Also
- --------
- scipy.stats.lognorm : probability density function, distribution,
- cumulative density function, etc.
-
- Notes
- -----
- A variable `x` has a log-normal distribution if `log(x)` is normally
- distributed. The probability density function for the log-normal
- distribution is:
-
- .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}
- e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}
-
- where :math:`\\mu` is the mean and :math:`\\sigma` is the standard
- deviation of the normally distributed logarithm of the variable.
- A log-normal distribution results if a random variable is the *product*
- of a large number of independent, identically-distributed variables in
- the same way that a normal distribution results if the variable is the
- *sum* of a large number of independent, identically-distributed
- variables.
-
- References
- ----------
- .. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
- Distributions across the Sciences: Keys and Clues,"
- BioScience, Vol. 51, No. 5, May, 2001.
- https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
- .. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
- Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> mu, sigma = 3., 1. # mean and standard deviation
- >>> s = randomgen.generator.lognormal(mu, sigma, 1000)
-
- Display the histogram of the samples, along with
- the probability density function:
-
- >>> import matplotlib.pyplot as plt
- >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
-
- >>> x = np.linspace(min(bins), max(bins), 10000)
- >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
- ... / (x * sigma * np.sqrt(2 * np.pi)))
-
- >>> plt.plot(x, pdf, linewidth=2, color='r')
- >>> plt.axis('tight')
- >>> plt.show()
-
- Demonstrate that taking the products of random samples from a uniform
- distribution can be fit well by a log-normal probability density
- function.
-
- >>> # Generate a thousand samples: each is the product of 100 random
- >>> # values, drawn from a normal distribution.
- >>> b = []
- >>> for i in range(1000):
- ... a = 10. + randomgen.generator.randn(100)
- ... b.append(np.product(a))
-
- >>> b = np.array(b) / np.min(b) # scale values to be positive
- >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
- >>> sigma = np.std(np.log(b))
- >>> mu = np.mean(np.log(b))
-
- >>> x = np.linspace(min(bins), max(bins), 10000)
- >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
- ... / (x * sigma * np.sqrt(2 * np.pi)))
-
- >>> plt.plot(x, pdf, color='r', linewidth=2)
- >>> plt.show()
-
- """
- return cont(&legacy_lognormal, self._aug_state, size, self.lock, 2,
- mean, 'mean', CONS_NONE,
- sigma, 'sigma', CONS_NON_NEGATIVE,
- 0.0, '', CONS_NONE, None)
-
- def wald(self, mean, scale, size=None):
- """
- wald(mean, scale, size=None)
-
- Draw samples from a Wald, or inverse Gaussian, distribution.
-
- As the scale approaches infinity, the distribution becomes more like a
- Gaussian. Some references claim that the Wald is an inverse Gaussian
- with mean equal to 1, but this is by no means universal.
-
- The inverse Gaussian distribution was first studied in relationship to
- Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
- because there is an inverse relationship between the time to cover a
- unit distance and distance covered in unit time.
-
- Parameters
- ----------
- mean : float or array_like of floats
- Distribution mean, must be > 0.
- scale : float or array_like of floats
- Scale parameter, must be > 0.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``mean`` and ``scale`` are both scalars.
- Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized Wald distribution.
-
- Notes
- -----
- The probability density function for the Wald distribution is
-
- .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
- \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
-
- As noted above the inverse Gaussian distribution first arise
- from attempts to model Brownian motion. It is also a
- competitor to the Weibull for use in reliability modeling and
- modeling stock returns and interest rate processes.
-
- References
- ----------
- .. [1] Brighton Webs Ltd., Wald Distribution,
- https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
- .. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
- Distribution: Theory : Methodology, and Applications", CRC Press,
- 1988.
- .. [3] Wikipedia, "Wald distribution"
- https://en.wikipedia.org/wiki/Wald_distribution
-
- Examples
- --------
- Draw values from the distribution and plot the histogram:
-
- >>> import matplotlib.pyplot as plt
- >>> h = plt.hist(randomgen.generator.wald(3, 2, 100000), bins=200, density=True)
- >>> plt.show()
-
- """
- return cont(&legacy_wald, self._aug_state, size, self.lock, 2,
- mean, 'mean', CONS_POSITIVE,
- scale, 'scale', CONS_POSITIVE,
- 0.0, '', CONS_NONE, None)
-
-
-
- def negative_binomial(self, n, p, size=None):
- """
- negative_binomial(n, p, size=None)
-
- Draw samples from a negative binomial distribution.
-
- Samples are drawn from a negative binomial distribution with specified
- parameters, `n` successes and `p` probability of success where `n` is an
- integer > 0 and `p` is in the interval [0, 1].
-
- Parameters
- ----------
- n : int or array_like of ints
- Parameter of the distribution, > 0. Floats are also accepted,
- but they will be truncated to integers.
- p : float or array_like of floats
- Parameter of the distribution, >= 0 and <=1.
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``n`` and ``p`` are both scalars.
- Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized negative binomial distribution,
- where each sample is equal to N, the number of failures that
- occurred before a total of n successes was reached.
-
- Notes
- -----
- The probability density for the negative binomial distribution is
-
- .. math:: P(N;n,p) = \\binom{N+n-1}{N}p^{n}(1-p)^{N},
-
- where :math:`n` is the number of successes, :math:`p` is the
- probability of success, and :math:`N+n` is the number of trials.
- The negative binomial distribution gives the probability of N
- failures given n successes, with a success on the last trial.
-
- If one throws a die repeatedly until the third time a "1" appears,
- then the probability distribution of the number of non-"1"s that
- appear before the third "1" is a negative binomial distribution.
-
- References
- ----------
- .. [1] Weisstein, Eric W. "Negative Binomial Distribution." From
- MathWorld--A Wolfram Web Resource.
- http://mathworld.wolfram.com/NegativeBinomialDistribution.html
- .. [2] Wikipedia, "Negative binomial distribution",
- https://en.wikipedia.org/wiki/Negative_binomial_distribution
-
- Examples
- --------
- Draw samples from the distribution:
-
- A real world example. A company drills wild-cat oil
- exploration wells, each with an estimated probability of
- success of 0.1. What is the probability of having one success
- for each successive well, that is what is the probability of a
- single success after drilling 5 wells, after 6 wells, etc.?
-
- >>> s = randomgen.generator.negative_binomial(1, 0.9, 100000)
- >>> for i in range(1, 11): # doctest: +SKIP
- ... probability = sum(s<i) / 100000.
- ... print(i, "wells drilled, probability of one success =", probability)
-
- """
- return disc(&legacy_negative_binomial, self._aug_state, size, self.lock, 2, 0,
- n, 'n', CONS_POSITIVE,
- p, 'p', CONS_BOUNDED_0_1,
- 0.0, '', CONS_NONE)
-
- def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
- tol=1e-8):
- """
- multivariate_normal(self, mean, cov, size=None, check_valid='warn', tol=1e-8)
-
- Draw random samples from a multivariate normal distribution.
-
- The multivariate normal, multinormal or Gaussian distribution is a
- generalization of the one-dimensional normal distribution to higher
- dimensions. Such a distribution is specified by its mean and
- covariance matrix. These parameters are analogous to the mean
- (average or "center") and variance (standard deviation, or "width,"
- squared) of the one-dimensional normal distribution.
-
- Parameters
- ----------
- mean : 1-D array_like, of length N
- Mean of the N-dimensional distribution.
- cov : 2-D array_like, of shape (N, N)
- Covariance matrix of the distribution. It must be symmetric and
- positive-semidefinite for proper sampling.
- size : int or tuple of ints, optional
- Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
- generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
- each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
- If no shape is specified, a single (`N`-D) sample is returned.
- check_valid : { 'warn', 'raise', 'ignore' }, optional
- Behavior when the covariance matrix is not positive semidefinite.
- tol : float, optional
- Tolerance when checking the singular values in covariance matrix.
-
- Returns
- -------
- out : ndarray
- The drawn samples, of shape *size*, if that was provided. If not,
- the shape is ``(N,)``.
-
- In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
- value drawn from the distribution.
-
- Notes
- -----
- The mean is a coordinate in N-dimensional space, which represents the
- location where samples are most likely to be generated. This is
- analogous to the peak of the bell curve for the one-dimensional or
- univariate normal distribution.
-
- Covariance indicates the level to which two variables vary together.
- From the multivariate normal distribution, we draw N-dimensional
- samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix
- element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.
- The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its
- "spread").
-
- Instead of specifying the full covariance matrix, popular
- approximations include:
-
- - Spherical covariance (`cov` is a multiple of the identity matrix)
- - Diagonal covariance (`cov` has non-negative elements, and only on
- the diagonal)
-
- This geometrical property can be seen in two dimensions by plotting
- generated data-points:
-
- >>> mean = [0, 0]
- >>> cov = [[1, 0], [0, 100]] # diagonal covariance
-
- Diagonal covariance means that points are oriented along x or y-axis:
-
- >>> import matplotlib.pyplot as plt
- >>> x, y = randomgen.generator.multivariate_normal(mean, cov, 5000).T
- >>> plt.plot(x, y, 'x')
- >>> plt.axis('equal')
- >>> plt.show()
-
- Note that the covariance matrix must be positive semidefinite (a.k.a.
- nonnegative-definite). Otherwise, the behavior of this method is
- undefined and backwards compatibility is not guaranteed.
-
- References
- ----------
- .. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
- Processes," 3rd ed., New York: McGraw-Hill, 1991.
- .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
- Classification," 2nd ed., New York: Wiley, 2001.
-
- Examples
- --------
- >>> mean = (1, 2)
- >>> cov = [[1, 0], [0, 1]]
- >>> x = randomgen.generator.multivariate_normal(mean, cov, (3, 3))
- >>> x.shape
- (3, 3, 2)
-
- The following is probably true, given that 0.6 is roughly twice the
- standard deviation:
-
- >>> list((x[0,0,:] - mean) < 0.6)
- [True, True]
-
- """
- from numpy.dual import svd
-
- # Check preconditions on arguments
- mean = np.array(mean)
- cov = np.array(cov)
- if size is None:
- shape = []
- elif isinstance(size, (int, long, np.integer)):
- shape = [size]
- else:
- shape = size
-
- if len(mean.shape) != 1:
- raise ValueError("mean must be 1 dimensional")
- if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
- raise ValueError("cov must be 2 dimensional and square")
- if mean.shape[0] != cov.shape[0]:
- raise ValueError("mean and cov must have same length")
-
- # Compute shape of output and create a matrix of independent
- # standard normally distributed random numbers. The matrix has rows
- # with the same length as mean and as many rows are necessary to
- # form a matrix of shape final_shape.
- final_shape = list(shape[:])
- final_shape.append(mean.shape[0])
- x = self.standard_normal(final_shape).reshape(-1, mean.shape[0])
-
- # Transform matrix of standard normals into matrix where each row
- # contains multivariate normals with the desired covariance.
- # Compute A such that dot(transpose(A),A) == cov.
- # Then the matrix products of the rows of x and A has the desired
- # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
- # decomposition of cov is such an A.
- #
- # Also check that cov is positive-semidefinite. If so, the u.T and v
- # matrices should be equal up to roundoff error if cov is
- # symmetric and the singular value of the corresponding row is
- # not zero. We continue to use the SVD rather than Cholesky in
- # order to preserve current outputs. Note that symmetry has not
- # been checked.
-
- (u, s, v) = svd(cov)
-
- if check_valid != 'ignore':
- if check_valid != 'warn' and check_valid != 'raise':
- raise ValueError(
- "check_valid must equal 'warn', 'raise', or 'ignore'")
-
- psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
- if not psd:
- if check_valid == 'warn':
- warnings.warn("covariance is not positive-semidefinite.",
- RuntimeWarning)
- else:
- raise ValueError(
- "covariance is not positive-semidefinite.")
-
- x = np.dot(x, np.sqrt(s)[:, None] * v)
- x += mean
- x.shape = tuple(final_shape)
- return x
-
- def dirichlet(self, object alpha, size=None):
- """
- dirichlet(alpha, size=None)
-
- Draw samples from the Dirichlet distribution.
-
- Draw `size` samples of dimension k from a Dirichlet distribution. A
- Dirichlet-distributed random variable can be seen as a multivariate
- generalization of a Beta distribution. The Dirichlet distribution
- is a conjugate prior of a multinomial distribution in Bayesian
- inference.
-
- Parameters
- ----------
- alpha : array
- Parameter of the distribution (k dimension for sample of
- dimension k).
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
-
- Returns
- -------
- samples : ndarray,
- The drawn samples, of shape (size, alpha.ndim).
-
- Raises
- -------
- ValueError
- If any value in alpha is less than or equal to zero
-
- Notes
- -----
- The Dirichlet distribution is a distribution over vectors
- :math:`x` that fulfil the conditions :math:`x_i>0` and
- :math:`\\sum_{i=1}^k x_i = 1`.
-
- The probability density function :math:`p` of a
- Dirichlet-distributed random vector :math:`X` is
- proportional to
-
- .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i},
-
- where :math:`\\alpha` is a vector containing the positive
- concentration parameters.
-
- The method uses the following property for computation: let :math:`Y`
- be a random vector which has components that follow a standard gamma
- distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y`
- is Dirichlet-distributed
-
- References
- ----------
- .. [1] David McKay, "Information Theory, Inference and Learning
- Algorithms," chapter 23,
- http://www.inference.org.uk/mackay/itila/
- .. [2] Wikipedia, "Dirichlet distribution",
- https://en.wikipedia.org/wiki/Dirichlet_distribution
-
- Examples
- --------
- Taking an example cited in Wikipedia, this distribution can be used if
- one wanted to cut strings (each of initial length 1.0) into K pieces
- with different lengths, where each piece had, on average, a designated
- average length, but allowing some variation in the relative sizes of
- the pieces.
-
- >>> s = randomgen.generator.dirichlet((10, 5, 3), 20).transpose()
-
- >>> import matplotlib.pyplot as plt
- >>> plt.barh(range(20), s[0])
- >>> plt.barh(range(20), s[1], left=s[0], color='g')
- >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
- >>> plt.title("Lengths of Strings")
-
- """
-
- #=================
- # Pure python algo
- #=================
- #alpha = N.atleast_1d(alpha)
- #k = alpha.size
-
- #if n == 1:
- # val = N.zeros(k)
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val)
- #else:
- # val = N.zeros((k, n))
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val, axis = 0)
- # val = val.T
-
- #return val
-
- cdef np.npy_intp k
- cdef np.npy_intp totsize
- cdef np.ndarray alpha_arr, val_arr
- cdef double *alpha_data
- cdef double *val_data
- cdef np.npy_intp i, j
- cdef double acc, invacc
-
- k = len(alpha)
- alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(alpha, np.NPY_DOUBLE, np.NPY_ALIGNED)
- if np.any(np.less_equal(alpha_arr, 0)):
- raise ValueError('alpha <= 0')
- alpha_data = <double*>np.PyArray_DATA(alpha_arr)
-
- if size is None:
- shape = (k,)
- else:
- try:
- shape = (operator.index(size), k)
- except:
- shape = tuple(size) + (k,)
-
- diric = np.zeros(shape, np.float64)
- val_arr = <np.ndarray>diric
- val_data= <double*>np.PyArray_DATA(val_arr)
-
- i = 0
- totsize = np.PyArray_SIZE(val_arr)
- with self.lock, nogil:
- while i < totsize:
- acc = 0.0
- for j in range(k):
- val_data[i+j] = legacy_standard_gamma(self._aug_state,
- alpha_data[j])
- acc = acc + val_data[i + j]
- invacc = 1/acc
- for j in range(k):
- val_data[i + j] = val_data[i + j] * invacc
- i = i + k
-
- return diric
diff --git a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128.c b/_randomgen/randomgen/src/xoroshiro128/xoroshiro128.c
deleted file mode 100644
index 981aeab84..000000000
--- a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128.c
+++ /dev/null
@@ -1,58 +0,0 @@
-/* Written in 2016 by David Blackman and Sebastiano Vigna (vigna@acm.org)
-
-To the extent possible under law, the author has dedicated all copyright
-and related and neighboring rights to this software to the public domain
-worldwide. This software is distributed without any warranty.
-
-See <http://creativecommons.org/publicdomain/zero/1.0/>. */
-
-/* This is the successor to xorshift128+. It is the fastest full-period
- generator passing BigCrush without systematic failures, but due to the
- relatively short period it is acceptable only for applications with a
- mild amount of parallelism; otherwise, use a xorshift1024* generator.
-
- Beside passing BigCrush, this generator passes the PractRand test suite
- up to (and included) 16TB, with the exception of binary rank tests, as
- the lowest bit of this generator is an LFSR of degree 128. The next bit
- can be described by an LFSR of degree 8256, but in the long run it will
- fail linearity tests, too. The other bits needs a much higher degree to
- be represented as LFSRs.
-
- We suggest to use a sign test to extract a random Boolean value, and
- right shifts to extract subsets of bits.
-
- Note that the generator uses a simulated rotate operation, which most C
- compilers will turn into a single instruction. In Java, you can use
- Long.rotateLeft(). In languages that do not make low-level rotation
- instructions accessible xorshift128+ could be faster.
-
- The state must be seeded so that it is not everywhere zero. If you have
- a 64-bit seed, we suggest to seed a splitmix64 generator and use its
- output to fill s. */
-
-#include "xoroshiro128.h"
-
-extern INLINE uint64_t xoroshiro128_next64(xoroshiro128_state *state);
-
-extern INLINE uint32_t xoroshiro128_next32(xoroshiro128_state *state);
-
-void xoroshiro128_jump(xoroshiro128_state *state) {
- int i, b;
- uint64_t s0;
- uint64_t s1;
- static const uint64_t JUMP[] = {0xbeac0467eba5facb, 0xd86b048b86aa9922};
-
- s0 = 0;
- s1 = 0;
- for (i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
- for (b = 0; b < 64; b++) {
- if (JUMP[i] & UINT64_C(1) << b) {
- s0 ^= state->s[0];
- s1 ^= state->s[1];
- }
- xoroshiro128_next(&state->s[0]);
- }
-
- state->s[0] = s0;
- state->s[1] = s1;
-}
diff --git a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c b/_randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c
deleted file mode 100644
index c0293cc2b..000000000
--- a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c
+++ /dev/null
@@ -1,73 +0,0 @@
-/* Written in 2016 by David Blackman and Sebastiano Vigna (vigna@acm.org)
-
-To the extent possible under law, the author has dedicated all copyright
-and related and neighboring rights to this software to the public domain
-worldwide. This software is distributed without any warranty.
-
-See <http://creativecommons.org/publicdomain/zero/1.0/>. */
-
-#include <stdint.h>
-
-/* This is the successor to xorshift128+. It is the fastest full-period
- generator passing BigCrush without systematic failures, but due to the
- relatively short period it is acceptable only for applications with a
- mild amount of parallelism; otherwise, use a xorshift1024* generator.
-
- Beside passing BigCrush, this generator passes the PractRand test suite
- up to (and included) 16TB, with the exception of binary rank tests, as
- the lowest bit of this generator is an LFSR of degree 128. The next bit
- can be described by an LFSR of degree 8256, but in the long run it will
- fail linearity tests, too. The other bits needs a much higher degree to
- be represented as LFSRs.
-
- We suggest to use a sign test to extract a random Boolean value, and
- right shifts to extract subsets of bits.
-
- Note that the generator uses a simulated rotate operation, which most C
- compilers will turn into a single instruction. In Java, you can use
- Long.rotateLeft(). In languages that do not make low-level rotation
- instructions accessible xorshift128+ could be faster.
-
- The state must be seeded so that it is not everywhere zero. If you have
- a 64-bit seed, we suggest to seed a splitmix64 generator and use its
- output to fill s. */
-
-uint64_t s[2];
-
-static inline uint64_t rotl(const uint64_t x, int k) {
- return (x << k) | (x >> (64 - k));
-}
-
-uint64_t next(void) {
- const uint64_t s0 = s[0];
- uint64_t s1 = s[1];
- const uint64_t result = s0 + s1;
-
- s1 ^= s0;
- s[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14); // a, b
- s[1] = rotl(s1, 36); // c
-
- return result;
-}
-
-/* This is the jump function for the generator. It is equivalent
- to 2^64 calls to next(); it can be used to generate 2^64
- non-overlapping subsequences for parallel computations. */
-
-void jump(void) {
- static const uint64_t JUMP[] = {0xbeac0467eba5facb, 0xd86b048b86aa9922};
-
- uint64_t s0 = 0;
- uint64_t s1 = 0;
- for (int i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
- for (int b = 0; b < 64; b++) {
- if (JUMP[i] & UINT64_C(1) << b) {
- s0 ^= s[0];
- s1 ^= s[1];
- }
- next();
- }
-
- s[0] = s0;
- s[1] = s1;
-}
diff --git a/_randomgen/randomgen/tests/data/xoroshiro128-testset-1.csv b/_randomgen/randomgen/tests/data/xoroshiro128-testset-1.csv
deleted file mode 100644
index 8289ff27e..000000000
--- a/_randomgen/randomgen/tests/data/xoroshiro128-testset-1.csv
+++ /dev/null
@@ -1,1001 +0,0 @@
-seed, 0xdeadbeaf
-0, 0x86f9f4feeebed928
-1, 0xcd2c9d2d1dc1c071
-2, 0x94741e0555641979
-3, 0x81eed88d38a9138e
-4, 0x54b9f49cd0035507
-5, 0x8c112b85821a173a
-6, 0x51f4c2eabf05c192
-7, 0xefa6a9ad4ca56d94
-8, 0xd3bd969b3aeb5457
-9, 0xcd4f28af5618e25a
-10, 0xa20fd197fb60aad
-11, 0x796b5146f61afc52
-12, 0xf6fe619effce6d62
-13, 0x763ed6613d00e8bd
-14, 0x333d357571b249c
-15, 0xc2a39f35ba8f4ce6
-16, 0xae35d916c6cf8a2b
-17, 0xfdfaa6b7eb9591d3
-18, 0x52668584489de943
-19, 0xca982b4b760effb8
-20, 0x32c7256797c26f09
-21, 0x3115b9539d722cc
-22, 0x5183f1d23dd2f56e
-23, 0xa0a2a2c524e6650c
-24, 0x3191f4b5260e5a2d
-25, 0x1eebe2655fb4f190
-26, 0x5781c6b75a253a88
-27, 0xae45a39543b4140d
-28, 0xfc62724b20d2df78
-29, 0x3770290ba157dc9c
-30, 0xc4731569807dbff4
-31, 0xd42ec771a7a0a08f
-32, 0x743276e0eb868d75
-33, 0x37f95d1ba1faebc6
-34, 0x3b4800b089a323cf
-35, 0x437fa9c71af61839
-36, 0x1cab936dd28c6f1c
-37, 0xe9d60347286dd9f0
-38, 0x716625cbd57bbd63
-39, 0xbd6c0f5e6aea7288
-40, 0x7c782d5b111a89f3
-41, 0xaeb1b9478b99970
-42, 0xbdffccb6a96cb533
-43, 0x9423e6ea789f29f1
-44, 0x53df9a7a0ea73fe6
-45, 0x7d9bec5c15c7e349
-46, 0xd1fc83fcf223aea5
-47, 0xd1dce35338ad3bab
-48, 0x297bd5f1cf79e758
-49, 0x19ec39a6191419da
-50, 0x45e57323ad58071b
-51, 0x395bcbebe1ddf611
-52, 0x22d9065efb013222
-53, 0x4ea2d534fd9fecb3
-54, 0x9b9779e1edeb2e27
-55, 0x1ba777ba576a236e
-56, 0x23cf95e34d62dd3f
-57, 0x6d1689730795e6bd
-58, 0x24e510f9d2f65379
-59, 0x1e12d607e78701e8
-60, 0x3347fe3ddc50a23e
-61, 0x331754f825305d97
-62, 0xf21a675c0344709c
-63, 0xcc9bd2062ae5fb39
-64, 0xd2dcd3a1ee6afe9e
-65, 0xbdb0388382c408d1
-66, 0x19476497aaef64ad
-67, 0x906dfff3b1181dd7
-68, 0x47edd3c528f4c753
-69, 0xba2a0f289279aec2
-70, 0x710bc73fd1c732a9
-71, 0xe0238b1ab604610d
-72, 0x10f68d7eb0d19e19
-73, 0xc13654f8b8f3d9b7
-74, 0x112c76bf71ad04a9
-75, 0x4b9d965062e9a0fd
-76, 0xe0b13496fec19aa8
-77, 0x999493dababe73c8
-78, 0x87104db5240f12fb
-79, 0x8fc8dff016c96a13
-80, 0x3eff4853e8b167a8
-81, 0x438b6f5c3d10b85d
-82, 0xc2f94a0707d949f5
-83, 0x87981b13beefb01f
-84, 0x1718db5072923bb2
-85, 0xbe7ae4310234b5f1
-86, 0x3ad4306f2b2b3b47
-87, 0x9de166baaf152f81
-88, 0xebca2cf057a00802
-89, 0x99bfd19b5e0a87b2
-90, 0x5ae7b3ab9d2623c0
-91, 0x8de5811587a53d2e
-92, 0x629a57a87d068ee4
-93, 0xfd80a82607740601
-94, 0x5758bfc8610d0b8b
-95, 0x8f0c00fab932c1f5
-96, 0x3d49bd296a34582d
-97, 0xc99c1bb8319ce526
-98, 0x1dd5ba47ac1443ba
-99, 0xb5a40a33c9ca1cf9
-100, 0xa1025156b711394c
-101, 0xdb3ef94ee8bc71a4
-102, 0x6d3292123ffa9bc9
-103, 0x4b9683ebf2f98d1f
-104, 0x4d1a4709b547bfe7
-105, 0x3623a9c4054355b1
-106, 0xed15f8852d329b4d
-107, 0x60ef76852f40e346
-108, 0xe64c2bfc6d0ef2dc
-109, 0xf286f874cfb68ee2
-110, 0xb1b07a7ca9268329
-111, 0xf618a9bfe00b7cdd
-112, 0x54a40c4f52cab527
-113, 0x5007a4d41eaf0af1
-114, 0x6fa7f2210a7b7f3a
-115, 0x7b448faa473ad765
-116, 0x901b6276232cb3c2
-117, 0xd69d06b85d118dfd
-118, 0xf8fb03c5dfef937a
-119, 0x5a53e96d5ebc4689
-120, 0xe24e81bbd9772b3c
-121, 0xa996ed94405e1811
-122, 0x7d8712833a4cbd96
-123, 0xd8b81a509f392481
-124, 0x76b52a270551424b
-125, 0x4c854325eaa4ef23
-126, 0xc8823e5a74757b2f
-127, 0x9ac8deb0aa215a3f
-128, 0x89641160b3eeafdd
-129, 0x17781aba3d908856
-130, 0xd12e5f215de0a3b4
-131, 0xd94cd412b8bef057
-132, 0x40e85ebd5844b9e8
-133, 0xa581cf7ef62e70a2
-134, 0x74953df639f8a9a2
-135, 0xaa92c9804434caa6
-136, 0xf186398542a15448
-137, 0xa0888e1233d64da3
-138, 0x277d14f22bc64c91
-139, 0x2851b3b5fc49ad5
-140, 0x68182666788909
-141, 0x5ea068625e49839
-142, 0x63bac5a5d225e8db
-143, 0x2dd9db0ad24aff05
-144, 0x3f637e71528ad6ad
-145, 0xe3b7ba911c4fe47
-146, 0xe4bcf50c8ada7ab6
-147, 0x4470ffb01cd6980c
-148, 0x377cfdbe8e810731
-149, 0xdb33ff37954849c7
-150, 0xb622ead14010ad64
-151, 0x6c44d65c7a81a5cb
-152, 0xd99a3fca5a5d9fce
-153, 0x24e7360e1ee2efd4
-154, 0xbd927a3fb576d81
-155, 0x1ea3f2b7c909ffb7
-156, 0x48aedb2bec244a7e
-157, 0xc17d9539cf53a5f7
-158, 0xe4ea45fcf4de590b
-159, 0xe1d863ebb77cb7de
-160, 0x8ecf0bc8d88fefe4
-161, 0xa881cef3b3209e05
-162, 0x8f34a14a6978afb6
-163, 0xed4e2e5e1f4966fe
-164, 0xede897e11cbe230d
-165, 0xd344af5e50042d6
-166, 0xb2739594ba906c81
-167, 0x83c0bbde6d95b632
-168, 0x6b7ae9d1c4af98b2
-169, 0xc4b8f6816eae17f
-170, 0xf8e3a6bf7855bd3b
-171, 0x9f64ff72d6357488
-172, 0x50b6a304f9543f58
-173, 0x330e8281e591cc6e
-174, 0x15dfdd5af7b421e3
-175, 0x8d3224e62524222c
-176, 0x90d89d139a75b44f
-177, 0xf6efb68e15639dce
-178, 0x98cf64777861f844
-179, 0xa031e78e4b3a7b3a
-180, 0xa3647dbd85c538eb
-181, 0x73656c8c77d9c56
-182, 0x88840b683d4fdb72
-183, 0x3b84749774eac55
-184, 0xb9b753a86ec15b39
-185, 0x31ab026ace06b686
-186, 0x4fd37ef5b5b1d284
-187, 0x7cc6c46fb114a76
-188, 0x463ff22f392dbd4c
-189, 0x188c3718f2068889
-190, 0x769892f97d895302
-191, 0x9838246e76757b6f
-192, 0x546a68f394c391ee
-193, 0xc9e32a7d2a7fb559
-194, 0xd84ac91984217239
-195, 0x82ef273042519aaf
-196, 0x79650a2c9bf2a812
-197, 0xb7aa6dc1c23eaecb
-198, 0x60326b9e25b055d
-199, 0x6b17c296feac0e6a
-200, 0x7813f405baa0d85
-201, 0xb9d52400dcb399d2
-202, 0xfb588178727e0012
-203, 0x448763bafa3d9095
-204, 0xd63fd1757e94e19f
-205, 0x2bc98e05f296e73
-206, 0x9e05ff0a641889cb
-207, 0x1e1716c76c9a8990
-208, 0x9e2f67e555f5389
-209, 0x430a8a612033934b
-210, 0xd49a74a4d8743bf
-211, 0x7b08085a0b4aee34
-212, 0x2c0482984960e7c1
-213, 0xae26bcde5d8fe8fa
-214, 0x8f40022b951f98c9
-215, 0xcc59b599dd0383a6
-216, 0xb6833d7a5e00c373
-217, 0x3e025759aba46bdb
-218, 0x2558a3dd775dee09
-219, 0xdcd8370368d091a8
-220, 0x9e55348e5734fa9f
-221, 0x1061a08056830eea
-222, 0xdca96b36adc5ed23
-223, 0x8563d7d016fe5d7b
-224, 0xa3fb6b79b0095ee3
-225, 0xb887cd180ae6f882
-226, 0x670e10186fda74a9
-227, 0xa25f08a01b69032e
-228, 0x5d90bfde7e21c0c8
-229, 0xb1b154f328250786
-230, 0xe0050135775487f3
-231, 0xbd7001fa00656593
-232, 0xcb6136e259180b69
-233, 0xf7480387c0872215
-234, 0x2e478a3efc5a7ec4
-235, 0xeb1cad9cb7d82f45
-236, 0x5d4c127c6c060ca3
-237, 0x1f9efe7a0bc11db5
-238, 0x59b9712ac8f24207
-239, 0xb94edcfe7b8e7ded
-240, 0x474b672b27aef61b
-241, 0xc2150760d3da0859
-242, 0x1146d26c90b6becb
-243, 0x52926b0e9e820413
-244, 0x24f2b065f78bdaa5
-245, 0xf94b5372ca68e5e
-246, 0xfdf3e645313db1fa
-247, 0x181af7ab689d2ec7
-248, 0x1e275b8f25520a3
-249, 0x1f287f3ff3d55dc8
-250, 0xa035801d4747cae9
-251, 0xba6ed878f55ebd
-252, 0x74d6598302a5c786
-253, 0xe92ce6198f39ded4
-254, 0x7b811ab7cda273c9
-255, 0x9d17fb60483addd4
-256, 0xf2b457f77ba326f
-257, 0x32e5956d2a580c90
-258, 0xcba559493cdd2b6
-259, 0x59276c178ca0e7a6
-260, 0x509681deb2f0160b
-261, 0x1bc2df48eb8f2a3a
-262, 0xbe7f17f92c808cd8
-263, 0xebbcd3a312ab80b7
-264, 0xef85e7595c591a83
-265, 0x914028c61432c620
-266, 0x7d8f67244eb3ea9e
-267, 0xa0512684d8ca4355
-268, 0x5a12209ada976a9c
-269, 0xfa0cf430c33df55c
-270, 0xd514dc8064688736
-271, 0xc5020a78e10201f7
-272, 0x9df7e30707f4591b
-273, 0xbc41eeb3c45f4ba2
-274, 0x2b5605d64a470e5d
-275, 0x77753b9a125af99a
-276, 0x7ba925c3af8e2a4
-277, 0x46c1dadcd05c1165
-278, 0xcb64cd52411f993
-279, 0xa6c3c1f065f7c758
-280, 0xad68088813a0068a
-281, 0x6dd039e4b9d4631a
-282, 0x528f220f2f54270e
-283, 0xfe565ea36805959e
-284, 0x3f2edbdc64385933
-285, 0xf0ea2fe07768bf3a
-286, 0xd120fe046bfafc74
-287, 0x85c1b029a6d56aa1
-288, 0xb03c986da026593d
-289, 0xd126fed2a4ca68a7
-290, 0x7e63d8216bc42201
-291, 0xadbfd88dcf50e179
-292, 0x6c1c1308ee42ca66
-293, 0xf5415a024cbf5458
-294, 0x4e50d4d388352815
-295, 0x38949c203a1a34ab
-296, 0x3a35d5ff38274f23
-297, 0xc96c009af2982c00
-298, 0x581691437bf0b1e7
-299, 0x793d1a61f0b0dcf8
-300, 0xa36a3b1c3e39c61e
-301, 0xff2938c1b78db0fc
-302, 0x1e82a7fc6b7c4725
-303, 0xd91883febcf4672e
-304, 0x22c55d5c95f1d985
-305, 0x3fc97236c50bfce1
-306, 0x28a3e0c7a4380bcb
-307, 0x2c072113ce5f2570
-308, 0x9c816b6af1d912a3
-309, 0x83698f6af8e41daa
-310, 0xa7b1b189d398eae5
-311, 0xb5b44ce05dd0867e
-312, 0x5ceaebf68b501f84
-313, 0xdf384c2545db9168
-314, 0xa75eae42ad85396f
-315, 0x88273ff551afa924
-316, 0xda2c47046eabd9f0
-317, 0x18d83b83988fa9bb
-318, 0xeed5ad076674a6ac
-319, 0x28d969bd36a0d5e8
-320, 0x9259eebb564cfd98
-321, 0xdc2e175377ffcd6a
-322, 0xcdb19c84396bc51d
-323, 0xeaa3a7674b5e5da8
-324, 0x9bd54f94110b36a
-325, 0x88d96179c0a35528
-326, 0xea1536654ceee668
-327, 0xdd1cc9d40ad3ea60
-328, 0xe9106bddc3221293
-329, 0xe096d5b5acd9ff46
-330, 0x4cb27170156f9265
-331, 0xd0d3e5b9edadb2bb
-332, 0xf75347484f2af9b4
-333, 0x6170333a4e6885d5
-334, 0x99a50b6b702b80ba
-335, 0x10629a67c9781899
-336, 0x374a33743030da9d
-337, 0x289fdbd0bc89f257
-338, 0xa67c56d1bc5dc5dc
-339, 0x38e90cd1dd6d64f2
-340, 0xcc5ed5dc4955655e
-341, 0x723d33bae999723d
-342, 0x46af17f0c981605a
-343, 0xd1d3a915f899b0ff
-344, 0x9a60c9bee03dcb43
-345, 0x11753a29a1d9201
-346, 0x491c99adde4e0a73
-347, 0x634437d6dc4388ea
-348, 0x5f4cf58d810069e0
-349, 0x8d950ed29ac1703d
-350, 0xa6330099182b17e3
-351, 0xfc9bf9a5cd4ea35d
-352, 0x4560dc5769ff741b
-353, 0x374a9ff29ee966ba
-354, 0x16a9bd5c9214e40d
-355, 0x46fdfb2899af3e80
-356, 0xe2eff8b6ad57da07
-357, 0xa67c709690485024
-358, 0x87551c8907b62ead
-359, 0xde03a75e08382365
-360, 0x6744ad2be09ed2c1
-361, 0xb34ec9f71efb1f48
-362, 0x4fb71847ea9a525a
-363, 0x10ffcd51ebb2f5b9
-364, 0x489431753bfacc7b
-365, 0x7a9cc00b29aa7802
-366, 0x8017011d2285ce9d
-367, 0xd54d90e061d61b87
-368, 0xa41a40e4a81526a
-369, 0x47b5ba075adc3b4c
-370, 0xb8cbbc5498cc428b
-371, 0x6165fcf1ef4795b
-372, 0x57926a7aebb26866
-373, 0x226ec9794dd0714f
-374, 0x2759cca87ce9a2ed
-375, 0xb41a74ac376c84d3
-376, 0x3e8101b52e2518a6
-377, 0xc8f18bb165e3db1d
-378, 0x187f3ef2ff1093d2
-379, 0xb5c1069cdbe57e46
-380, 0xd3f342f3104a5902
-381, 0xf32e59c6c7b8458
-382, 0xfdb39f79b5b1f574
-383, 0x526dce2fc3115682
-384, 0x1a2e8128c0180ae
-385, 0x5eead6a0e587e4c6
-386, 0x450e7d15d282c580
-387, 0x931a6cd04be42fe5
-388, 0xb4d321f03fb71277
-389, 0x32479d856fd9bdfa
-390, 0xa28dc713e419022a
-391, 0x6c8dcea6b5adbb14
-392, 0x4ae7b8d58ef7aa3d
-393, 0x49903e3fbd56493e
-394, 0x5238b0c9ee856f3b
-395, 0x77deab4c733cb2
-396, 0xea5d74aec663c8dc
-397, 0x899afbc707b0899
-398, 0x56a9418f18219182
-399, 0xb42801a6445d852a
-400, 0xd8462e581c7cd53b
-401, 0x802701332acff0c8
-402, 0x309618a5c049ddaf
-403, 0x66f6d281cd986fa
-404, 0x53f089859dd3e861
-405, 0x497078aabbed67cd
-406, 0x9cdc9d89a2e1cc9
-407, 0x871b1721c6f463c4
-408, 0xe9f8872d8f113d84
-409, 0x48e03acc1ff301b
-410, 0x79d5f73993eb02ef
-411, 0x5ac76f9f2329e39b
-412, 0x878c2c8d84a9643a
-413, 0xd1d0786d40a7391d
-414, 0xf024ad81eea63787
-415, 0x9f96b1146e5354b3
-416, 0xa85fd9a5bc0fc195
-417, 0xafd0522c28edfd2f
-418, 0x6c1aa508159a1fcd
-419, 0x873f632373719c87
-420, 0x5db129eaa27ff3d1
-421, 0xd81037e5b49f399d
-422, 0xa40a347abfc43a81
-423, 0x314452aabf5a95b1
-424, 0x6f8642230a3edee8
-425, 0x2aaa01f7cc73fb09
-426, 0xa0ebf15ee345343
-427, 0x19fddca117f16f35
-428, 0x111be87b23ca2143
-429, 0x46de5fd13663c896
-430, 0x2dacbe0fca5e4efe
-431, 0xd534f9dce19043c7
-432, 0x7a548f9a35a6759b
-433, 0x3c6f046dd15b6fe3
-434, 0x1f7a17cbfc090519
-435, 0xd8a282357a83d2ce
-436, 0x96a8a5cfb5be2843
-437, 0xce416b54c95c6006
-438, 0xcda9d6127cb716cb
-439, 0xb062a607b35aef78
-440, 0x141e913718707191
-441, 0xef829605cf4aa346
-442, 0xb9555c4c76d6a7c4
-443, 0xd41bd4a1593170ca
-444, 0x2e00143ad3d88b4d
-445, 0x1afa722d16ac1d47
-446, 0xa22530a5d53159c8
-447, 0x17f76921e5633a50
-448, 0x8e3ed4b11072799f
-449, 0xedb6ace0cb513a05
-450, 0x8dbf3d235449385e
-451, 0xd01fb688b01b798f
-452, 0x8e3aa7aa93ab0436
-453, 0x8b18ef4c8cc20636
-454, 0xf40181de15f5029a
-455, 0xfcf54366f31c924b
-456, 0x7b16e64f5c3a1d71
-457, 0x6b5f96df49784c48
-458, 0xcb5914727615bb07
-459, 0xf228f7b32ec2d237
-460, 0x37c51a8a1b854a84
-461, 0x63484491b02c7fac
-462, 0x526a9f0a571e170a
-463, 0xeb8d59e7fbbe583a
-464, 0x4fd1fa3bd32b8c84
-465, 0x825ba1ed08b31e1f
-466, 0x644d2cadd8ddeeb2
-467, 0x3874924732d3c6d7
-468, 0xd2679fee287a403a
-469, 0x17ddb27712b6cdb9
-470, 0xcce6bed3fa81f460
-471, 0x8a2df0f2ccb3f028
-472, 0x85d166e4456aae72
-473, 0x5dc4ce3fab56777
-474, 0x555b2c69b6eabb7
-475, 0x873bc152fdb3717d
-476, 0x5670068eb7d52805
-477, 0x7f776ca61f79e219
-478, 0xa8b51d2bd8c8c939
-479, 0x50345828de969faa
-480, 0xbefa083bfbd71b60
-481, 0x883809b883dffdca
-482, 0x49ccf930ea76fce8
-483, 0x97cc45c4c2dcf12b
-484, 0x4d3aef2e2a4a450b
-485, 0xc7ed768e40efd44d
-486, 0x5530c69ecdc47b2c
-487, 0x2fbb8ad65b3e777a
-488, 0x45234e14d9fd969d
-489, 0xb6a758912ec87c9d
-490, 0xb35f335efeac2d3b
-491, 0x21efc82b1e65a1cf
-492, 0x897db9fe20a2702f
-493, 0x444042b714793c27
-494, 0x37356cc844e57cb7
-495, 0x602ecce617309266
-496, 0x4ea323a5d93363b7
-497, 0x2c2f0344303d7067
-498, 0x983de14baf7a9234
-499, 0xc4edde0900601361
-500, 0x12574e754cf862a8
-501, 0x82eb774465a3a83b
-502, 0x115fd6ada32ab10
-503, 0xce23f43213ea118a
-504, 0x912e289389130f18
-505, 0x977464bbb2fc0cd9
-506, 0xeb944201e2747c79
-507, 0xa41dae77205e05ee
-508, 0x66c91981aba16d08
-509, 0xbd4aefbeb385af57
-510, 0xd7c7d36c0ec75862
-511, 0x492e43720ebee40c
-512, 0xf44861d4636833df
-513, 0xb9fb92c7203e2a1a
-514, 0xd75f7f48e860938b
-515, 0x8235d433d3e773f8
-516, 0x36cc65bb70a32774
-517, 0x3898d9516512bffa
-518, 0x4f5c36707161dc35
-519, 0xa35e3d81512f0a8e
-520, 0x4ae50933ef7bd3b9
-521, 0x641dc03f71dc81f3
-522, 0xc6002d833e1d768e
-523, 0x6c7d94f79b7a1956
-524, 0x4027405ac3c6e666
-525, 0xab69f022928e86d8
-526, 0x90272e57839563ab
-527, 0x56e78769f743d98
-528, 0xb5c7931145b93a39
-529, 0x253fbe3201939650
-530, 0x5325825cbe56c3a9
-531, 0x159aa2be6163c7bf
-532, 0x56b8d5a5ed375c9
-533, 0xbd4b45a7cce10f56
-534, 0x2f799de5fd80339e
-535, 0x40232bd30ebb82d2
-536, 0xc10e2198616b20a6
-537, 0x6a13ecc0b52813f
-538, 0xfafd5d5b466ee59e
-539, 0x810cbf398208d400
-540, 0x7137dc6f08e5b6d3
-541, 0xfe59d9caf7564d0c
-542, 0x3117cae7c6ee6927
-543, 0x89e83cf15785a430
-544, 0x386b6daed57236e1
-545, 0xc2e6fb38df98a4dc
-546, 0x496513da22e1e53e
-547, 0x57efdf29edd94aab
-548, 0x3433ac46ce163ef3
-549, 0x296565c39cba14f3
-550, 0x1ce89ad8ff370a6f
-551, 0xcb12c5a7db52fd27
-552, 0x8125373ad475530a
-553, 0x75ed8dda02fd5bbc
-554, 0xaf2c279596340f93
-555, 0x18c7f80478479a56
-556, 0x14edf2ed871d9c41
-557, 0xf35731f0b8d26e4a
-558, 0x2cace2d1996272bd
-559, 0x84c3b017f5b12bb8
-560, 0x441c286a303c81c8
-561, 0x92a7c594c92b2353
-562, 0xb175a7a7e0cab31f
-563, 0x501d7003cb9e530d
-564, 0x1e9d3dea32bb5d6
-565, 0x60756fd6e4b239d2
-566, 0xf979b4c7ddf4bb22
-567, 0x1e5c0ba3d2797a7a
-568, 0x94590d4209c70c70
-569, 0xc5dbc6ef6fd8c203
-570, 0x46a0eb4fc61727f2
-571, 0xe3ddaa7f4033fcb0
-572, 0x4fc177555a6b2f9b
-573, 0xce0f6ab675596a18
-574, 0xe11a08478844ecec
-575, 0x47054780433de44
-576, 0x89a3be9609dc2a34
-577, 0x9ea612c49a4c170f
-578, 0x8212e9db2df9ca7d
-579, 0xdf1cedac92affa7c
-580, 0xc21b0ff068580e5a
-581, 0x49168be340b1ade
-582, 0xce3a5fd54225a6a9
-583, 0x80ecff24ec6cdb9f
-584, 0xd14429e950a1a21e
-585, 0xc66a1ad3cad8f9a6
-586, 0xcc76bdca3ded453c
-587, 0x748165a5cb8b6bd
-588, 0xcc77eb3966db7c5d
-589, 0xbaceadcc1db342d6
-590, 0x33b42e3dc005fc38
-591, 0x43b5661eead65675
-592, 0x356821fd43c46e5
-593, 0x4efdd2444e0c5ffa
-594, 0xf84ce60e2c0de959
-595, 0x14a4b1dd26583f04
-596, 0x6ffb885f5fe18b87
-597, 0x8233b6a95b1af132
-598, 0x7e2e9c449dd06b71
-599, 0x736bc96174cd4d97
-600, 0x86591ab3ab385777
-601, 0xb7696e3909a91039
-602, 0xda363e1a90c99d9c
-603, 0x793cd7e1855b9a43
-604, 0xa9dbce0ccacd24c2
-605, 0x5d9a1d9b06fcf2f2
-606, 0xa7db7fd7c2b50d55
-607, 0x13c85aaefd37bf77
-608, 0xcba5689a383aa436
-609, 0x7dcbc3e297d2bd31
-610, 0x9860da13006164e8
-611, 0xda3be955750ba8a6
-612, 0x57f6a78ac6d2cb3
-613, 0x861ed21955702cef
-614, 0x3cfdfb6fa0763186
-615, 0xd075f803b072f140
-616, 0x6b1622638e94a714
-617, 0x6f4b177c0213a295
-618, 0x26c113226bbfa72
-619, 0xbcb962e03d008ba7
-620, 0x1e50555d6e75d9b9
-621, 0xd67082f15ff0086
-622, 0x20766d0fc6bd729b
-623, 0xeea24b2ecc4db639
-624, 0x3136637be559ec83
-625, 0xd3f2c641faccfcf8
-626, 0xe43f5bfe95bfb2c2
-627, 0xbc801108984335e3
-628, 0x19ff6b0c435e06a1
-629, 0x7b8f28c44eb5195d
-630, 0x375460c52c467757
-631, 0x534f4697a2a2f0d3
-632, 0xbd1aed6c1a94e586
-633, 0x9dec33a59dd000e1
-634, 0x4611fc38e6902126
-635, 0x1296da2fca821b09
-636, 0xce4684ac8861a6b7
-637, 0x16bdaa7a0563d3c8
-638, 0x22a6a8b6de1fcd10
-639, 0xeed5c457b2d2a399
-640, 0xb66c697c0e328f69
-641, 0xe678d6d573b2dc21
-642, 0xd0a78328399774d2
-643, 0x7fee339fadd44eaa
-644, 0x32c2da48753c8818
-645, 0x691f87af10bc6f5c
-646, 0xe382722ac6ebdbb3
-647, 0x28bb87557931a39f
-648, 0xc3aba948d7d22fa6
-649, 0x3ce7016f24e2f50b
-650, 0x863b408ab8161d28
-651, 0x1e3d2d6746c16b31
-652, 0xe5a21dc5843a37d6
-653, 0x8ecb559ea375c81d
-654, 0xff2681b83a599f98
-655, 0xcd9893140d02b725
-656, 0x80294d390a4e1a08
-657, 0x254166d362613f84
-658, 0xd2c336ba5b4ae618
-659, 0xef79a05286b75aaf
-660, 0x704140e00e02ea9f
-661, 0xa2623b124bb92365
-662, 0x2225846e393c249b
-663, 0x95676d7c7aae81a3
-664, 0xe0cbe12ba194b3d9
-665, 0xda8ca3d800ea6152
-666, 0x8b2c2f63db05c887
-667, 0xf14012751ef435e9
-668, 0x33820fbd9a06d78
-669, 0xf37375a008192ae8
-670, 0xaa2c34f4b405589e
-671, 0xd26bbda155ac158b
-672, 0x418b108b101ea70d
-673, 0xb9648a82ca0617d7
-674, 0xae6e2213c8c0d3e3
-675, 0xda7335c158d64615
-676, 0x78f175a4a89cdf5b
-677, 0xac7a07b66a84f751
-678, 0x266019228d3bdb87
-679, 0x3a7798913c66d5a
-680, 0x2aa9c173879dc048
-681, 0x67453dc96c3642da
-682, 0xbe9ea095f8333cda
-683, 0x10998be0d5702361
-684, 0x77a4e1af57a6b02e
-685, 0x66356334d32ab0fe
-686, 0x2df9585cb5ea1b34
-687, 0x51159b44acaa000f
-688, 0xbc433d2fbb8a4953
-689, 0x5a533a3838335feb
-690, 0xd57ffb6f839fc89d
-691, 0xe7cd85b8d026e706
-692, 0xdd4acea5a81530e7
-693, 0xd7af04b51606fa0f
-694, 0xe31e683c116deb37
-695, 0x4e2adf78e2a88fd1
-696, 0xc58b907a61dee8f
-697, 0x673e1a4b00b0a2de
-698, 0x36b639fa8091f63
-699, 0x7782c303339e2f0a
-700, 0xfd84e0fb7774b0be
-701, 0x2a6ac41e094d6e25
-702, 0xcf221a0187c8ca32
-703, 0x4e457ef8a6a528dd
-704, 0x9a7714c8913ac3a2
-705, 0x5a6513aaec56ddf0
-706, 0x254fc4d74dc56a5
-707, 0x93e1bd37d16ee5f2
-708, 0xd1a16a2aa652c2ce
-709, 0xa66ab34e5262848
-710, 0x5e6f429f482e4a2d
-711, 0x198eeff9e36608ec
-712, 0x3bea433a42228c7b
-713, 0x1a85a30f51e1ad8
-714, 0xe80b6a4fdb0d0482
-715, 0xc3e8d0c13f8879e
-716, 0xbaa3c52bb9413a89
-717, 0xc2d4614798d79e2e
-718, 0xbbd3f6abc551b6a3
-719, 0x282e112e6bdf2de8
-720, 0x615cc8613f4d4518
-721, 0x53b2627138d76555
-722, 0x1b19126726fd77a1
-723, 0x915c0a108cd2d357
-724, 0x1061822da93d9907
-725, 0xe79aee77f55dc17a
-726, 0x7b367a3165fbeba7
-727, 0x1894d6a0059bc074
-728, 0x876235ba0475437c
-729, 0x2b8f64a5357907dd
-730, 0xadabbbf775f4c3a2
-731, 0xf70d7e73e0914757
-732, 0x50c1494071662c91
-733, 0xae3cc90ade2512c8
-734, 0xd73f9d2b66333aa8
-735, 0x46342e130d23dc94
-736, 0x6c8307abda3d568a
-737, 0x235d9a334f4eae0c
-738, 0x33d0ccce19e66c93
-739, 0xd83559cfbc7acb8
-740, 0x430f65543bfcfad6
-741, 0x5dbe2eb34c5b25cd
-742, 0xdcad606d1b515392
-743, 0x6376bc62812519c9
-744, 0xf292cdcbab076b52
-745, 0x5b6669c53c3e9b1
-746, 0xbd5a95d4d51f18ec
-747, 0xf71d40c0b07b0a16
-748, 0xa51966e8052a050d
-749, 0x7fd18ced5be2d350
-750, 0x82727df4050382b7
-751, 0x7c10a4e48f664caa
-752, 0x3712f2d7d2e6bdba
-753, 0x2535b833ad6b4ef6
-754, 0x420577375164ff95
-755, 0x68c40b08f579888f
-756, 0x8922d2a586203dcd
-757, 0xf317b95e3aff246a
-758, 0xbbd1c166e380207d
-759, 0x9303601189dfdda1
-760, 0xef342abd93377a47
-761, 0x499773d085e7de1a
-762, 0xd204bb687ac202ea
-763, 0x19ffb5b90619622a
-764, 0xc59bff0531dfbe98
-765, 0x8c6d480a717445db
-766, 0x8c3c030ca187e2f4
-767, 0x53f0740df18d7b6a
-768, 0x1a5eed54662e3c6e
-769, 0xbb29a94e32f03c3c
-770, 0xdb0df407c4bbc009
-771, 0x6c0a9f4598ac0ba8
-772, 0x2e0ac7251648f892
-773, 0xb4555f7c1e3fe8ac
-774, 0x2cd8ce106d8e441d
-775, 0x608e38e439a239d5
-776, 0x1bb66d4c2a2ca5a8
-777, 0xc32ec47253591fa6
-778, 0xd3974f6f2b8b038a
-779, 0xdcbfd9eb4a9b1626
-780, 0x8589b3e6fc1ba06b
-781, 0x81f34f1da9f27f9a
-782, 0xd3bdd7496dcc21bd
-783, 0x7c963559e1c47305
-784, 0x5817e571d2fcc113
-785, 0x4f35fea60a0582c8
-786, 0xb851f167a0bda1c2
-787, 0xf57e13249380eddb
-788, 0x570e69bf38151a56
-789, 0x117feac919f19d69
-790, 0x49ce46af93025c96
-791, 0x4220f6c18e8e1f9a
-792, 0xf082699d8fd5070b
-793, 0xccd31756abff1928
-794, 0xbf9d4ab46de14d1
-795, 0xf2e0be2c2bbbc823
-796, 0x6e9b495ef22563ed
-797, 0x9a609cdcff6e3152
-798, 0xbbd2e5dafc83fcd4
-799, 0xac153055d6c5770c
-800, 0x312bbcdd6b681016
-801, 0x3ed60c24fd5a2f3a
-802, 0xc7f3b2948dcdf5c9
-803, 0x8cc631df1fac1c1d
-804, 0x77f4aab62a657d61
-805, 0x2f43e30323829573
-806, 0x5b7d20ab0ef901b6
-807, 0x7fa99ec817785705
-808, 0x5c95cf94241f1e3c
-809, 0xafa2d371f8f579e1
-810, 0xe4c314c4017e2e77
-811, 0xf672b575a585c0b3
-812, 0x6600a50a45d6ecdb
-813, 0xe06c0d7edb63485b
-814, 0xf1b45b82f4d0e8f1
-815, 0x41581d87cc1b759b
-816, 0x8807b689eddf602e
-817, 0x9e11d2949076d4c0
-818, 0x9f3b430319e48bb1
-819, 0xb27fa37d89396b64
-820, 0xd930a0cc6723c8b0
-821, 0x935fe6e9c7a57eaf
-822, 0x184f5dba2f19591a
-823, 0x513f86165d0adb73
-824, 0x4f2cd09cb85aef51
-825, 0xda66728c1901a11c
-826, 0x2445b8938b33db42
-827, 0x98fd86e4b89be5e8
-828, 0x2f752d8769747705
-829, 0x2cb9b42b98ce0c0a
-830, 0xf3314e0e0c57d31b
-831, 0xf9c382d55868b2df
-832, 0x83264a41539ec2c6
-833, 0xa2b3674f0adc1d0f
-834, 0x2dd7ad1d92001e7e
-835, 0xee210f6047a94713
-836, 0x71a18140729bbcfa
-837, 0x415058c01e01384b
-838, 0x6cc2e2079c9de72a
-839, 0x8561a9093d2b5d72
-840, 0xd6c276d566895b2
-841, 0x57cb2804836f4867
-842, 0x78becdfda7fd91d6
-843, 0x4046a94c8377a3
-844, 0xadaaaa0d558d261a
-845, 0x56ef182050db8865
-846, 0xbc28289519f6ebe5
-847, 0xbe7b95e4334540fe
-848, 0x384b9838c105f8c8
-849, 0xecfb823fc8815c7e
-850, 0xafdbbb2bfa8bdff8
-851, 0xed33653dbeb638b8
-852, 0xf4164289a7a6dc1
-853, 0x6e5cc51c2a3a8b20
-854, 0xdd59a99d16116f34
-855, 0xd48f95ba89787b5
-856, 0xacf9753586b8be7d
-857, 0xc0430da7c73bf373
-858, 0x25320aec467ee989
-859, 0x5108e8f4be4f8d8
-860, 0x69b1c7d23ff502c1
-861, 0x7c08bd62caea3313
-862, 0x4285d5b8ce1d19fc
-863, 0xbe03dc19cc3be0ad
-864, 0x182cdb615e4d4147
-865, 0xf75270e6096d5d1a
-866, 0x467b7ac524d17060
-867, 0xb0960b398a111ec3
-868, 0x126c099178f50090
-869, 0x19980d353ddb289d
-870, 0xd4b394e2c0305403
-871, 0x5972d7c748938602
-872, 0x276461c9da39bec4
-873, 0x6b3a2046d6ebdce3
-874, 0x4c55d74597c27388
-875, 0x363bf469f4f673be
-876, 0x9b26d4e69d36f584
-877, 0x21d441f573e56b6f
-878, 0xc29509f2a1e9c4c8
-879, 0x5178088ff6e62d5e
-880, 0x902f8ecd57128a7
-881, 0x479fddd275330bae
-882, 0xf56ac8b6f6364526
-883, 0x4904060a896d759f
-884, 0x1c0f1f4e800bbfe6
-885, 0x9b03bcb77880240d
-886, 0x2f35904d9867379d
-887, 0xf88a05a4dd6928e7
-888, 0xb5341282b6781021
-889, 0x225910a217522b71
-890, 0xa76bac3bf3675285
-891, 0xf19973940d9a57d
-892, 0x9f6ef608ed4291d6
-893, 0xec63cdbf5911fb10
-894, 0x8a359dd4ec3b41ec
-895, 0x8373d0d4e6af7261
-896, 0xfc6a14169335e7d5
-897, 0xf06ff499b6856cda
-898, 0x71f5ce76943ec9e8
-899, 0x9417034d7879b92b
-900, 0xfa0e3c78f47c0276
-901, 0xea9ebf817a3e3b93
-902, 0x7c08ff3d42e19a10
-903, 0x8697e5798f9bab52
-904, 0x10eb4dab88e4ce59
-905, 0xbd11bc073298b46c
-906, 0xf46483b5fea2427b
-907, 0xafed38960dd33a59
-908, 0xf7a00b0413eb47f6
-909, 0x4233464f10e7666c
-910, 0x7ce6db32b60aba3a
-911, 0xf9ae9414469308da
-912, 0xf5c4e8e04c008924
-913, 0xb89c735c89bdafde
-914, 0x8b815ec319546463
-915, 0xdd57dedbf1fa66e
-916, 0xdc0bba0705548598
-917, 0x1ed685fb6c966b2f
-918, 0xd9afc3ac4319d72a
-919, 0xed7c7e9407e71351
-920, 0x585b44a509258719
-921, 0xdf9eac3020de19aa
-922, 0x102102d94b983d57
-923, 0x85dbeaa806a02e79
-924, 0x4bacf4194786b961
-925, 0x32bf2fed8ab9b611
-926, 0xce94384eb215dd1f
-927, 0xfd1da2a7795c4801
-928, 0x149b31c0a14f7d02
-929, 0x4e01962d69248840
-930, 0x41d509a1c742473c
-931, 0x46105403c2b4e56d
-932, 0xe6fca820341c56e4
-933, 0xf1982bf03572ac79
-934, 0x9f99e2fb3cc2715e
-935, 0x6e3bd2ca3d50faf2
-936, 0xd0aea8d0fee1014
-937, 0xda0ededd067cc72b
-938, 0x56c42899c5af28b7
-939, 0x8c4883568ff28ba2
-940, 0xad9019516b75c1d3
-941, 0x3aa1b33682aaf348
-942, 0x31187b962cf65f58
-943, 0x7f2cc27ce4c8459a
-944, 0xb75ee9bbf97014c3
-945, 0x8eb8f42b9a9c3024
-946, 0x5b7dcf683a3c14c5
-947, 0xa258b18ccb7cb3c4
-948, 0x7587bc7015c145f5
-949, 0x7536427aff38edd3
-950, 0x437b33489ef425b7
-951, 0x22febd7e96538bfd
-952, 0x9fefcc49d567b35e
-953, 0xfd756268183d7d6d
-954, 0x480df3a7112b2eea
-955, 0xfd02a24b1eed9e6a
-956, 0xcb3b6c96d65ab879
-957, 0x19f8b328f365f6c8
-958, 0x7d5d20e6328ef6cd
-959, 0x8d74057415768152
-960, 0xcba11867467079a2
-961, 0xf86138cf35f091fb
-962, 0xdb3204b36a02eb61
-963, 0x8974a7786d5f6894
-964, 0xc8445cca1175a023
-965, 0x1523bfeb2f088c15
-966, 0x4e39bb650d7c4de0
-967, 0x91c9e9ff5b823702
-968, 0x7c3a6850a7c143e7
-969, 0x131999c480253f47
-970, 0x3ac336af37f6a4e7
-971, 0xb057ae911b406d5a
-972, 0xde0b70c5f9d5be60
-973, 0x93fd54e75618a86a
-974, 0x3955e207acb1f65c
-975, 0xa33450c2890b0b61
-976, 0xc6294720e971cd52
-977, 0x89cb13a5b1364169
-978, 0xa6fbc61118b44104
-979, 0xba0651279f93958b
-980, 0x6995c30cf06ed3dd
-981, 0xd75cd3472c5f86a9
-982, 0xb18d90ce11dfe2ad
-983, 0xd69200ae86d53222
-984, 0xe73fc25107e53e90
-985, 0xc1edc96f67bcb096
-986, 0x587cc0fc53992abe
-987, 0x2139d74bc6f3edff
-988, 0x1b4609bbfa08b543
-989, 0x564e5d7acb190539
-990, 0x1099ce214921efbf
-991, 0x7764cd537ccb1b55
-992, 0x4232db7dbdad3998
-993, 0x54c970b3ca338f24
-994, 0xf28c8f460244de6a
-995, 0xbd37dcd3829c5a4b
-996, 0xefbfe21ef1ab13ae
-997, 0x6df8dfc0a865d4a3
-998, 0x5e65a5bfa3f4d555
-999, 0xf6affb932cc9f3f2
diff --git a/_randomgen/randomgen/tests/data/xoroshiro128-testset-2.csv b/_randomgen/randomgen/tests/data/xoroshiro128-testset-2.csv
deleted file mode 100644
index 6c513476b..000000000
--- a/_randomgen/randomgen/tests/data/xoroshiro128-testset-2.csv
+++ /dev/null
@@ -1,1001 +0,0 @@
-seed, 0x0
-0, 0x509946a41cd733a3
-1, 0x885667b1934bfa
-2, 0x1061f9ad258fd5d5
-3, 0x3f8be44897a4317c
-4, 0x60da683bea50e6ab
-5, 0xd6b52f5379de1de0
-6, 0x2608bc9fedc5b750
-7, 0xb9fac9c7ec9de02a
-8, 0xc1942c64262d8742
-9, 0xc2c334fa4c2214b4
-10, 0xe53cfba26ba5ce93
-11, 0xf01f0c9d5398a979
-12, 0x1bfa2ef194eeb86d
-13, 0xc9df57572868239
-14, 0x728e35871474105a
-15, 0xdc7b1e93de9e112a
-16, 0xc4d930cafb32002b
-17, 0xf18b0bd68577e055
-18, 0x4929ceed7e690239
-19, 0x3b7a547b356b29d8
-20, 0x660f1cebb7affd72
-21, 0xf850e6052cc5f5c3
-22, 0x931512b017c71f1
-23, 0x8d88b7af3b8731e7
-24, 0x3050de537e8e84e0
-25, 0xc917230b8bd3d552
-26, 0xf58da0814356b478
-27, 0xcfc06b804972be32
-28, 0xe3892682eff28645
-29, 0x55bc734a03ca4fa6
-30, 0xe2f7700a020152b9
-31, 0xcba5a308a8d40969
-32, 0x928b63592b6b2f55
-33, 0xa372b4e0293d90c1
-34, 0xd73e00b1c0fdbb6
-35, 0x43c712d398019cad
-36, 0x295d994760c6501b
-37, 0xe94236abdd256f1d
-38, 0xed4566687d847ec0
-39, 0xd3a838dfcbcb5df1
-40, 0xf4ac54b3d79aae61
-41, 0xcabd8f089de74dc8
-42, 0xd58e132a2cd64b6d
-43, 0x4eb8bc55f8993dd2
-44, 0x8e4ee152013579ca
-45, 0x1aa7c7e058e02a75
-46, 0x5038184ea8f1dfbe
-47, 0xa9af7da9879e99ed
-48, 0x267567fe1128a585
-49, 0x3a3d637084865189
-50, 0x35179207577f3a88
-51, 0xc323e40ec505a4a7
-52, 0xd2ff171e3203c51f
-53, 0xf524706a4db15f35
-54, 0xbff297a90126dd1c
-55, 0xec7517f97c47dbf7
-56, 0xf56604fd9a8a7f3e
-57, 0x2a63c3bb1635de13
-58, 0x9c22f64a9b9acfc
-59, 0x6fc94c63f4e1b311
-60, 0x955820d474d00924
-61, 0x5a4d25256934ab74
-62, 0x95ea0cf2b73da09e
-63, 0x8e21647894c89a8d
-64, 0xa6ffd9037f6627ae
-65, 0xca5d03082b0007fd
-66, 0x2ee116ac7bdd65ce
-67, 0xa9e6e172042fa80e
-68, 0x4763c82d6e7c3d8d
-69, 0x325169a3ff49a8fe
-70, 0xe0be054ea126080c
-71, 0x4ccc1794542607ba
-72, 0x58c480dddafc29d4
-73, 0xedab421340a8d4d
-74, 0xd28d8f3c2ab241cc
-75, 0xb2a89fabba9566c3
-76, 0x1b12fc8b30a80b49
-77, 0x65e178e0065959ef
-78, 0x4adc931b03e25f55
-79, 0xefb7b250b854feff
-80, 0xe024be0a14bf3da2
-81, 0x60c2285324a5a642
-82, 0x280dfcde16655ff
-83, 0x18c2cbf4c0ba9bb0
-84, 0xcbeea642049c68c
-85, 0xa1b19b0b60833fa9
-86, 0x10f72a22ef81e27f
-87, 0x5cc3db165dde75b2
-88, 0x947a3c40223e6bd0
-89, 0x5d469f487abb870f
-90, 0x1b1f4fea711c039d
-91, 0x63b22d9e9616b06c
-92, 0x689aa9d9808ffa7c
-93, 0x2164f59dcc5f3e03
-94, 0xbdfc99e1f2c1193b
-95, 0xdc9e85a6a6b8f61e
-96, 0x11ad0ede8657e961
-97, 0x73a69427c838bafa
-98, 0xfa4b98f49849cc62
-99, 0x5ccb852e4e18aad9
-100, 0xae102d1e06ad3569
-101, 0x4be41a104f8d8463
-102, 0x723f50acab4314fc
-103, 0xd3b7b694a6bb6c38
-104, 0x8bfd1fbedfb8f092
-105, 0x3d5d9aea4d80f37f
-106, 0x28cd19af64bfc553
-107, 0xceba7e81e5ec8edc
-108, 0xd8c0513fca3ca719
-109, 0xfefef4fd2a0896f0
-110, 0x1aa7320084a97662
-111, 0xf63c0bb8fdae24ea
-112, 0x497a5e9d0a13c0d5
-113, 0x37fabc943a07639
-114, 0xb5be9f1d19096cc1
-115, 0x4385acd2ed447c52
-116, 0x8e146c6874b731fd
-117, 0x553c3c72c9e05a64
-118, 0xb9cca017a8d4be34
-119, 0x8f8e09bbd56564ef
-120, 0xcf6da1a96df67f67
-121, 0x5882a27646d3189e
-122, 0x1554cc47896d846f
-123, 0x105600be06e72171
-124, 0x95a04162a7ec3791
-125, 0xadeb00515f0d6be0
-126, 0x22ed3d1ca5ebc008
-127, 0x312629837e981334
-128, 0xca916e4cef410dd2
-129, 0x18556a16bdff0e5
-130, 0xfab80417581d6927
-131, 0x40f7ecce118881b5
-132, 0x6a12fe0d0c5f3ca2
-133, 0xd3e2ba4104a669f5
-134, 0xdeb3d714e1fd921b
-135, 0x32cc61dc107d240a
-136, 0x1a79efae30dbd860
-137, 0xebd6029e65fcefa9
-138, 0x94289c890d17c9b4
-139, 0xd91bbe374cb9f243
-140, 0x3352bdd3eccaa300
-141, 0x5cc9a4bf3127b238
-142, 0xebd9f454d96adb59
-143, 0xd5f61261bb7089ff
-144, 0xa743f21be20ce5f2
-145, 0x3d2a78d45bfb1da9
-146, 0x9ebbad453112c987
-147, 0xff48b7b97e3f597b
-148, 0x2b57be29ae160a9f
-149, 0x90df488fada68e76
-150, 0x785b4250ae46cc0
-151, 0x1c4fdcb6c66db255
-152, 0x3567f33a12172e96
-153, 0xc173a5f010dbe48c
-154, 0x859eac8b59dd2bc0
-155, 0x532809d8c8b5378b
-156, 0x656f93a00428ed0e
-157, 0xd4ee7e2199473a09
-158, 0x9bc701f16ecf35a4
-159, 0xcea39cb296d32304
-160, 0x43fbb2333d392310
-161, 0xc9d66a1062247d
-162, 0x271a83a113c42b2f
-163, 0xee17f7585ab05734
-164, 0x5e98cde55d0b8ae9
-165, 0x488cc07f036165b3
-166, 0xd78481d7416f6724
-167, 0x681436c7434b6260
-168, 0xc53bd2c997a04ce5
-169, 0x61b5951080b80e26
-170, 0x48f285546280fec1
-171, 0x87ff976327bf74ce
-172, 0xc10c08c9bc8b05ee
-173, 0xa62f879a73bf12a2
-174, 0x31d7cbb6f9c1acf
-175, 0x3e522645e518ee29
-176, 0xb85967a95e811cf8
-177, 0x99f8643751545edd
-178, 0x3f962076f920dd9
-179, 0xc92abe52da7ed89c
-180, 0xc1fe02e7dba484c9
-181, 0x7904149975239b19
-182, 0x5bfaad7ac409b74b
-183, 0xb915e6eba7685946
-184, 0x8b2291b29fd71d66
-185, 0xe57e5709ad0bd967
-186, 0x3fe55bb3338f0f1d
-187, 0xf41f8f7a981c05d6
-188, 0x80d3d9160712aa45
-189, 0x2da872bdd8bbffe7
-190, 0x6698441241fe0a4e
-191, 0x4870fc969dc6676c
-192, 0xd420fc68814fe867
-193, 0x6aa0500b9d8bacb5
-194, 0x55078d31633dcd47
-195, 0x6d758a56c80bd405
-196, 0x122149ae571cb397
-197, 0x22d1134c99ac507b
-198, 0xe7d9e27ae05a47d1
-199, 0xd18a73dc45e5a290
-200, 0xb5bc1909e08803e2
-201, 0xe9a1e3ee93f2e109
-202, 0xf040a8f79841c101
-203, 0x9a705a608899152d
-204, 0x4f7783a0dab4290f
-205, 0x11c5bad24bb23af3
-206, 0x58545a19a22fb881
-207, 0xeeaf5ab573077828
-208, 0x9e036466fd858142
-209, 0xef70bf26fdd6bc94
-210, 0xcc3a2971056cb9f7
-211, 0xef120c86e84d643f
-212, 0xa499226ab42f6d7a
-213, 0xa85cae4acfa0a29d
-214, 0xc8280371e2c25d89
-215, 0x246336f7e63ac4e6
-216, 0x76561f7777c7b915
-217, 0x704f83c71583f0b8
-218, 0x489db8592a76cd3b
-219, 0x268d584af17550c3
-220, 0x350e989762428fe
-221, 0x6857adc12d13f1bb
-222, 0xde62c7d9517260e2
-223, 0xc1f37ee8baac988e
-224, 0x714732f71cdd5374
-225, 0x56f01487bfa58c5
-226, 0x5163b23d41d95f14
-227, 0x745150434b747a38
-228, 0xdcf7cd6c6b14b1b
-229, 0xd853cc6bc2580f81
-230, 0x693562e66b579775
-231, 0x8f15d6369dbe6678
-232, 0x464c1791098ad19d
-233, 0xeeba6610f16ac2b9
-234, 0x3b307cc3c5bf5859
-235, 0x7e82177c4dcb75e0
-236, 0xae5978c33dd3e817
-237, 0xec3c0128360b0b2
-238, 0x2c325b630e904749
-239, 0x237ff1d19b4e7ead
-240, 0x3c82e47b67a33956
-241, 0xf38b46203355a168
-242, 0x4df09cfda8d5774c
-243, 0x4b06980b33ad6226
-244, 0x7afc1e940df03034
-245, 0xaa093355c596ecb7
-246, 0xbbb5100165d1445f
-247, 0x599c0f30608a36d
-248, 0xd35999534e29986d
-249, 0xd0cae757abc97c36
-250, 0x9ec9f2c24bbe3b70
-251, 0x76b96e93352c2960
-252, 0x4dd3acf1c01ae06c
-253, 0x71ebb829cb09312a
-254, 0x3234f4c24cdbb897
-255, 0x9b0930936363bc6
-256, 0x2ede98b9aacc3672
-257, 0x7388672bfb4c92c8
-258, 0x53011e6a80763bca
-259, 0x69eb6ca56f23129a
-260, 0x59d98a723f847ad5
-261, 0x234af2de04ba218
-262, 0x589b028bf0830748
-263, 0x525da4a281c641e5
-264, 0x47ceb48568778c5f
-265, 0xa2d73af3a884d972
-266, 0xdc6fab52b39bfe68
-267, 0x7f1a5e5ea6139484
-268, 0x70571bee802a1fa1
-269, 0x489f1d18d9c61c4a
-270, 0xd781c38aa8aafbc
-271, 0x5aa610ad2539aa57
-272, 0xd71a2a69974ae4a0
-273, 0xe4479465870487bb
-274, 0xf714dacd7fc4475b
-275, 0x5cb9c32e10d39249
-276, 0x746650ada73de1a3
-277, 0xbdd059359907cd53
-278, 0x38352adeaf41c72a
-279, 0x330a370593019b35
-280, 0xc75ff9305bdaf3c3
-281, 0xc64723389b0bd56e
-282, 0xbafccbf3fae88f31
-283, 0x3fc2c4b1d35da1fc
-284, 0xd9414b4382f59e69
-285, 0xec31e6d32a58f7e1
-286, 0x4763fb9ad6cadb98
-287, 0x5e9817762a380e57
-288, 0x9a670c79b387ff5b
-289, 0x467beb71ab258339
-290, 0x23cafda15a336768
-291, 0xe42ebf79e2d170e0
-292, 0xced716e4bbfe75ea
-293, 0x1057597f33a23633
-294, 0x563d3fb59a847744
-295, 0x1a3f85cf84ea5e0a
-296, 0x7630855876b41b32
-297, 0xb59e3eecb52851b2
-298, 0x26aed463d3769fd2
-299, 0x530d3898b8d043c6
-300, 0x28fa6b7cdc76ae94
-301, 0x99591569d7464343
-302, 0xa46da7d9e275579a
-303, 0x30f6e5979a92bcfe
-304, 0xaf345d66f7e756d6
-305, 0xdfad061213a92b33
-306, 0x2843134719ff646
-307, 0xbc8699b6d3f04313
-308, 0xacb08fbaeaedce6
-309, 0xe8fd50dc65724bc7
-310, 0x956d0436e93242fd
-311, 0xa9c3e3eee8a80b9
-312, 0x9bf71b03710af171
-313, 0xbd61bd65edf3d9ad
-314, 0x531b6865fc4f810d
-315, 0x58ca69e7da1ea2f2
-316, 0x4946f99ec03e8adf
-317, 0x3b9d4f48b1eb484b
-318, 0x605be28093b8144a
-319, 0xa7a4c6c8f2ade7af
-320, 0x729f97c8057a21f6
-321, 0xc97d9d778a4e2932
-322, 0xc173b6c3d0c5168f
-323, 0xd400c6f451c927fa
-324, 0x611d96eb9e70ecdf
-325, 0x3ad1a1709255cd86
-326, 0xf85d02d9fd5678eb
-327, 0x495bd5eb58af79ab
-328, 0x977bc4282d97381e
-329, 0x9a5b1811cde9f133
-330, 0x49b2b9d26ba0977b
-331, 0xf6c4e846b99bb87a
-332, 0xc399d5f8661b0c8
-333, 0xaf9a91415ddeb79f
-334, 0xb93df0259e6f3c5e
-335, 0x80ad075b109611b5
-336, 0xf3004a806f25186b
-337, 0x89a86842ef14b472
-338, 0xba53e5942ca79cdd
-339, 0xc4cd49237032e3a0
-340, 0xb39700c89fc1109d
-341, 0xc35fd5106aa40bf7
-342, 0xa0ff3091df2010c7
-343, 0xd4970cd890097774
-344, 0x39e7db9319a17976
-345, 0x56306e9316a184b7
-346, 0xe4d218267f28a145
-347, 0xbaa24a30caf53ebe
-348, 0xf4811ee0d51ce11d
-349, 0xccb9ece4a25b129d
-350, 0x132b2d1c4f092d60
-351, 0x7d5e7a59f14dd113
-352, 0x8ed30762f02d3098
-353, 0x8a92bb806bf9a4c0
-354, 0xd1957618db32f95
-355, 0x3ae37701b1db294a
-356, 0xc29e705f675713ad
-357, 0x3d12dc6fc3dcc569
-358, 0x7bc096e1e2ca2e43
-359, 0xf58f4f816e71b16d
-360, 0x23e6f93c7d0f1050
-361, 0xacaf403b80890da3
-362, 0x7a5e19bf92de04ec
-363, 0x72b3638076a857e8
-364, 0xb87601882acb1f3d
-365, 0xb51d157f2576ac70
-366, 0x7ef0c2f1ae02af0f
-367, 0xd519f6224fb2866
-368, 0xe00a80d729843eab
-369, 0x3c95b55c523d0871
-370, 0x81dcfef1772a151f
-371, 0xa5b20337760a602d
-372, 0xf36049e3e0f98eac
-373, 0x21bc3e0f1083016a
-374, 0xd8f295098597530f
-375, 0x78a2582906003e78
-376, 0x1c5cf0f434493262
-377, 0x2228d56b7da9cc80
-378, 0xc3d7eaedd0f36349
-379, 0xc9ca575c3b6dfe54
-380, 0xb5f03d2d974c91b3
-381, 0xb2f7ce70c56a865c
-382, 0x98f33d64e66602ec
-383, 0x559904911cb8b69c
-384, 0x19c426ae3d196913
-385, 0x818fcd24869feeec
-386, 0xf4c52f4b00f4295e
-387, 0xbdb808d5fe34cb3f
-388, 0x5014922c0ca80ee9
-389, 0x9f7e4c8068fb96d2
-390, 0xec99128e620a2df8
-391, 0xfcbb4fc594857a59
-392, 0x6aebf62bc7c79e4f
-393, 0xde8cba80e35ed831
-394, 0x55bb9ced0fcb6fd7
-395, 0xbe7534a18c050ef7
-396, 0xed2e6d1767c7ed5c
-397, 0xc88e18ac1064dd88
-398, 0xf71fbae1105d8324
-399, 0xb4431f0a4b807ea4
-400, 0x78de56556e1272d7
-401, 0x34d3e7e84ceed376
-402, 0x72f0ca866b3b182b
-403, 0x4747a9b5faaa6dfe
-404, 0x5a0f85d879e90288
-405, 0xbecbea458ec061f1
-406, 0x5e0bcff71b1911e3
-407, 0xc2e32dc60548a6ca
-408, 0xfa76a9a3d449b8c2
-409, 0x81303b7e225dea8b
-410, 0x4aa42b413ca5c63c
-411, 0x4d7372d31df5b70d
-412, 0x2a408f03bb0499d1
-413, 0xd75529b610d56d9c
-414, 0xa6e9d1356654ffbd
-415, 0xe10bdb510c440754
-416, 0x8fce6a25abf05e69
-417, 0x21aaf272093d6081
-418, 0xcc18cf69f0f0b2bd
-419, 0xbb4e0a1cda31a035
-420, 0x70128e6522fe238d
-421, 0xaaeae87b79d223da
-422, 0x6882e6705d12bc8f
-423, 0x8e110abf1ccb274e
-424, 0xb7ebac3cfca55a39
-425, 0x909705e2a6e584ce
-426, 0x3b54f18e8f7708cf
-427, 0xcac28674d5caa98e
-428, 0xdde0e042ad4107a5
-429, 0xfc2ca3a740f903ac
-430, 0x9aae84ca64051770
-431, 0x858a0d2d879442e
-432, 0x75b3e7d29e357b39
-433, 0x9f6f5487d5ec5ac1
-434, 0xfd95986f2765eed4
-435, 0x2899b60770693140
-436, 0xb8ab9650b7300ee8
-437, 0xaa772209ef643b16
-438, 0x9c98fb4b5946fc61
-439, 0x6f614d64e4a38b84
-440, 0xbe0099b53347a13f
-441, 0xe8d05eabf7db8a0e
-442, 0x4c849670c59692d5
-443, 0x421d2e32838ebba6
-444, 0x1fb1f7427466dd6b
-445, 0xd79d9987fd12fa15
-446, 0xc195d5fedaa613c1
-447, 0xfecdf6c6fb6c4924
-448, 0xd8536233459d6d65
-449, 0xaed30f22454f593c
-450, 0x14d427078bb818c1
-451, 0xf7235f42e291617a
-452, 0xb1fc436bdb2efb83
-453, 0x21cc3fd0fb82e07b
-454, 0x2df968f572e077bb
-455, 0xe1b76c513528f8c3
-456, 0x955681442083db83
-457, 0x2e009197f295008c
-458, 0x2c258d6b1935587a
-459, 0xd10fda2d14ce8e70
-460, 0xd21cdc7f5db09825
-461, 0xe39168a7b3a080cc
-462, 0xc82c2a353a812026
-463, 0x6adc63d4bb7f26b0
-464, 0x5d2acdd2deaed807
-465, 0x47c39719b79aee01
-466, 0x5b6351daac993e69
-467, 0x1e2d2cf25d029df
-468, 0x671c43218ccc62b
-469, 0x783093122682b9c8
-470, 0x8055e091219d2263
-471, 0xa6e7f6bc43717757
-472, 0x91855fe232480a87
-473, 0x554030e74824042
-474, 0xd0c14f8ff34b1a30
-475, 0x13aa852fdea8bca8
-476, 0x27ed292b1a4fa598
-477, 0x3e56548b7095af08
-478, 0x47432aa82a4bfcfc
-479, 0xadddde35537dc4c8
-480, 0xadb0d103d29faa1f
-481, 0x14818cb71d4cdaf9
-482, 0x31507bcc3d46a5d
-483, 0x7407577173399611
-484, 0xac03706bbe4da972
-485, 0x4efb0cae8499469c
-486, 0xc6d4bcbc4396a20b
-487, 0xd7581757d38762c3
-488, 0x6308e217f7e69120
-489, 0x6931392a2fcf3756
-490, 0xb5a5b36744f09886
-491, 0x8da8292d0bf2ed08
-492, 0x13e0aa8d5a24bd3d
-493, 0x1131dbe7a2b97139
-494, 0x2098efd4a1e7108c
-495, 0x9c470e15e690e574
-496, 0xe60e3aeb65560eb8
-497, 0x4ae5444669ffc65f
-498, 0x911fc7e6820923b8
-499, 0x25b3fbb125c1ae19
-500, 0xa8b6c812471f37f0
-501, 0xe6d9aec89655113e
-502, 0x59d24e18c48dd532
-503, 0xc9b320756e0d6c35
-504, 0xb70a2316319c8e2a
-505, 0x328a0b16ff39152c
-506, 0xc2088e4c8a3298
-507, 0x10bce05ac0971264
-508, 0xe26b5b7655fd4ad5
-509, 0xfb1d818177211481
-510, 0x98d83c41a7196f86
-511, 0x45801b77072aace8
-512, 0x563268328ebfb870
-513, 0x4d26e320a51123fa
-514, 0xf531c63a3438f527
-515, 0xd94525fda256c193
-516, 0x883de65d7957b025
-517, 0x7be095e05de1599b
-518, 0xd27792977b3a11dd
-519, 0xfd179d3c4a5f06fe
-520, 0xfbe066b302e09da2
-521, 0xb841424fbd2c7249
-522, 0x566dc3e3f2345fc1
-523, 0x14e8cfa0ee0ab392
-524, 0xf1e11e1841884ad7
-525, 0xc895b028b3b7df26
-526, 0x70f727baee1dc909
-527, 0xc2f4bcd3f8c7905a
-528, 0x6d294a99118f9f47
-529, 0x18a723a0ddcf902e
-530, 0xac36efa0258143c4
-531, 0xc558a61c40490895
-532, 0x430341fd196a18e7
-533, 0x27fea5b52f4178c7
-534, 0xd0d628d45d51e088
-535, 0xc016cdc47158510a
-536, 0x925a6cdd446f0555
-537, 0x5e5a30a2f1d9bd08
-538, 0x918ad9cea082da5b
-539, 0x23bb26bfaa0e79d8
-540, 0xf667bd79197706ca
-541, 0x9ae3d6e8290fa1d5
-542, 0x20a15e60e0007e64
-543, 0x7d88beb1713a320b
-544, 0x2d8b1728e392a6c3
-545, 0xb4cc0ae2c22afad3
-546, 0x749fe3524435e61f
-547, 0x137bc8f57e7060a3
-548, 0x8070ee0def7571b
-549, 0x2d5cacc36c121329
-550, 0x8408aeea38281006
-551, 0xc05e54af2206ce49
-552, 0xd547b24a26ebd3c2
-553, 0xc66d83645d95b57d
-554, 0x8f4187e81ac31f25
-555, 0xe2878187a7ffa7e2
-556, 0xf7802760e1a8b9e9
-557, 0xd7f135ce1d83b5d
-558, 0x4e2d5eee2bbad34c
-559, 0x9b73503bcada0bcc
-560, 0xc3fb41b2fdd2b56e
-561, 0xc5958ed587dca76f
-562, 0x1029b90ef3fa1e44
-563, 0xeb52236cba057b2f
-564, 0xd411396dfa523c6d
-565, 0xccb05527609c690f
-566, 0xe73e27fd2c6295e0
-567, 0x363628b4f2a7bd3a
-568, 0x443bcaaab9b67c72
-569, 0x1142926c0ff02a91
-570, 0x6d7fe0d6fbcb7265
-571, 0xea31775a5191e8a2
-572, 0xc44a633ed2339375
-573, 0x261bfdb83fc7a23b
-574, 0x4cb967260869e0e7
-575, 0xf3b7134ffac658c0
-576, 0x97b266a6b95e219a
-577, 0x18a5be54082669aa
-578, 0x9adbdbfe1ad667c
-579, 0x6fd02995faae35b0
-580, 0x9e62832c534ef39f
-581, 0xb89e8229d7a85aec
-582, 0xa46c8670446a0539
-583, 0x6960eeea3b3a4c70
-584, 0x27901b708cbb4f97
-585, 0xde1abdbbffa9cf6c
-586, 0xcadb304d56e1ad33
-587, 0x579b5110955d30c9
-588, 0x77b57f59d61ebdbb
-589, 0x900adb153a8037c0
-590, 0x1f5200f1f8be5a4a
-591, 0xc491a76e1cefe1f1
-592, 0x6724370243b5d0c9
-593, 0x6cc8e3b9fbb98c87
-594, 0xca1722c5183b2b57
-595, 0xe9a61a3f20c59fec
-596, 0x91723ba5418ed1b5
-597, 0x4299a43bd28daf49
-598, 0x8dc266e15f1f32b1
-599, 0x91a22c16ad09703b
-600, 0xfe36e6cd32ebd06c
-601, 0x30c0d9f9a60a11ae
-602, 0xfad538e8bf1a0f03
-603, 0x47c2a0261dc808f6
-604, 0x9148743d0cdc81a
-605, 0x17c3f4257197c037
-606, 0xdc8b06b5220c01a7
-607, 0xf9ad586eb09e30f4
-608, 0x702600c123b400c6
-609, 0x9218eef469b0db7e
-610, 0xce16c9bac0969bb4
-611, 0xa758408742a37457
-612, 0x2eb094509e812e4a
-613, 0x28440b87ce1e0e21
-614, 0xab48eb01ee07e56a
-615, 0x85e69345a3649100
-616, 0x517927d4a415e569
-617, 0xd02de9b703206f93
-618, 0x5cae9cf67b9d62a9
-619, 0x8b0e1e0c2623250d
-620, 0xce893eb84c72510b
-621, 0xd97bdcd1e00f8c3d
-622, 0x106c1721a5e8546
-623, 0xb4fc0554e61e059a
-624, 0x5fad1bc4c54c3136
-625, 0xd09dc05514ba4ad3
-626, 0xc934e2153df00372
-627, 0x3cff94fa0fa2967a
-628, 0x4e7ca152c113e934
-629, 0xc5ccf2350fb82ffc
-630, 0x10aa453d349b70df
-631, 0xf8270070253de14
-632, 0x412b14bc12ef4538
-633, 0xaf83140fc27938c2
-634, 0xf342d1c8c97c7e74
-635, 0x693b4b07b79cfdc2
-636, 0xe859c76fde1be4aa
-637, 0x90ac4aa1c012a971
-638, 0xeca381124c9bf23b
-639, 0x1f544598d356ab2f
-640, 0xbcd78485f20f8339
-641, 0x6b8eb4bf854dcdf5
-642, 0xdb231419a9323609
-643, 0xb2f0167ca051a61a
-644, 0x9806e89e8d3ebd15
-645, 0x69ce87797a14c206
-646, 0x143ecc33c23e61f5
-647, 0x3be37d8fbcfc396f
-648, 0x2a4336f50c851387
-649, 0xe665ed14a40c6400
-650, 0xc229fc93c03a6a24
-651, 0xb7f27088c74af74
-652, 0x8a8da4fbf5fb90a6
-653, 0x8c54684d5db6a600
-654, 0x11ef0e952c8ad4ec
-655, 0x928a518e677b87a6
-656, 0x6b6c8ebe9db7253e
-657, 0x9feecd8eaf8a8101
-658, 0x4f270f30f3ad2d0b
-659, 0x23798146bff58d75
-660, 0x7d9134e4005b9246
-661, 0x18b5eb6833bb921e
-662, 0xff0ef41f6c734814
-663, 0x388b18f678774f4e
-664, 0xa51646467be5785e
-665, 0x9f96bbe291c9361
-666, 0xce39cac00148c7b1
-667, 0x69d41ab8914f944f
-668, 0x579ca60b75dbf4e5
-669, 0x352f2b89c968d81
-670, 0x181d45a561c05553
-671, 0x5a6aeaa048d6a494
-672, 0xd7938433b99408ca
-673, 0x13bd6696806f0800
-674, 0x2ca8e35e87037dbb
-675, 0x70d9d33ef79088a1
-676, 0xcdcb45940c9ba3e7
-677, 0x2546f21a69a29dc
-678, 0xc674c5afa1f9abdf
-679, 0x856dfa52fdff93b5
-680, 0x614b66daa02187bb
-681, 0x55ce165aee2e205e
-682, 0xf34d6cf856f941e3
-683, 0xa4a0ec51b4f25b25
-684, 0x83e6bf89bfe39762
-685, 0xb559b9a68edf3381
-686, 0x9259778b54c31479
-687, 0x4e4e21e81138cacd
-688, 0xbbb990cd25c2fb2d
-689, 0x38a85ad84d9e31e
-690, 0x5b4f4081ffba398d
-691, 0xfb12eb300dada910
-692, 0x6975328f3eebcb34
-693, 0x3db017218a6478f0
-694, 0x9397aca7eaa5de14
-695, 0x6f7318d0d9ffed6
-696, 0x40cf276103c34010
-697, 0xeacef8ae2095ec8a
-698, 0x80f7593e9ddaa152
-699, 0x8d4bc01f7e5c6520
-700, 0xbbc9606e7518e199
-701, 0xfe71ef90abbaca29
-702, 0x528edfe3e467ed43
-703, 0x52b322c36f60627d
-704, 0x9946be5ea3beac73
-705, 0x890745d71a02c404
-706, 0x5570d1bde8bb7993
-707, 0x563fceeff8466dcc
-708, 0x62a9ca23db6e4d62
-709, 0x89d6038410c92e8
-710, 0x16bc3b1ea7b90a89
-711, 0x12ff3e9c30d8dde6
-712, 0xe78cb53e8dd40a77
-713, 0x643722181b85d5a
-714, 0x73e26524635d78e3
-715, 0x941ccfc41d47c53b
-716, 0xadbedec82c31e57c
-717, 0x2addd39e7a36aad6
-718, 0xe64d81fa432bb65d
-719, 0x1e2e63d01399ca82
-720, 0x760a3c0edbbef3a6
-721, 0x801131e88419a79c
-722, 0xa2a75136213dbb6
-723, 0x83e576905753c3ff
-724, 0xdbbdab8007c4ea0
-725, 0xbb73b13358898c2d
-726, 0x5818372d8fe036f7
-727, 0x3aa052cd647e29d1
-728, 0x235219635ff4abb6
-729, 0xe24e07311fa76b65
-730, 0x4967574b62c3efb8
-731, 0xb04b4c210022e795
-732, 0x3d48e77713ef3fda
-733, 0xf4ec1050775fd3b1
-734, 0x38953c604d35190d
-735, 0xf731a6450c1e23fe
-736, 0xac66ae73ecc6b9dd
-737, 0x442e2bcbca5bbaa8
-738, 0xa74a741bd02570bf
-739, 0xa85473cbf3b4c45e
-740, 0x24d43199c69cdda
-741, 0x59f78fa87f895d36
-742, 0x78f5513621dc1813
-743, 0x226c2606635698c9
-744, 0xea39babbad3df384
-745, 0x2f178b076f08f80d
-746, 0xaee482470bd9acb5
-747, 0x48571d8c4235c1f6
-748, 0x6569395eec2df1d7
-749, 0xa9b7408c1d67a372
-750, 0x3b9c5ba01aecae9d
-751, 0xb047b26325765767
-752, 0x9bb1968c8b6149d4
-753, 0xbba4038fdd341986
-754, 0xc1d23b5b89beaa88
-755, 0xaa9a341db334c8ac
-756, 0xaa9337dd1fddf923
-757, 0x9fdf160ed939d68b
-758, 0xbf48cdd432d0f148
-759, 0x2a01743f1f7b581b
-760, 0xb68d5c631e9fb70a
-761, 0xe9ab844ec026cc7b
-762, 0x1fabd46f0d5266f0
-763, 0x29e53ae817eec5b
-764, 0xeffbebc07500ad4d
-765, 0x432ae3b596c1589b
-766, 0x48d44f3895d6dc23
-767, 0xcc3a5576e24ec2bf
-768, 0xc8f4a042462e95d9
-769, 0x24c12cd6ef57b6
-770, 0xa7896ae26675f69
-771, 0xb98a1790d429c90b
-772, 0x71f7ac96dea8ffb6
-773, 0x7878c64cad319f72
-774, 0x65586d63156e1a05
-775, 0xa70ef198e61e2a11
-776, 0xf5a84f622d490449
-777, 0x7789e1c1927e82c6
-778, 0xfe053fdbb586b8fd
-779, 0x59a94b735df951c3
-780, 0xdf5e72909ff2bfbd
-781, 0x34dc2bd8876a92e5
-782, 0x7e408900bfa3b282
-783, 0x844176cb62d5008b
-784, 0x7406e9e156cddc9c
-785, 0x6a6d87de33056193
-786, 0x20c388365359e4c
-787, 0xdbda2eee6499be64
-788, 0x3574cf8bc4840b47
-789, 0xc2d904ac50e44ee3
-790, 0xb9edf042b0d96102
-791, 0x2ac087f3922dd11e
-792, 0xeaf244df29c2a8ae
-793, 0xb4243528d8d0649c
-794, 0xed67e39d9217e6cd
-795, 0xcbdcd1620727437
-796, 0xcc00dec8485d0dfb
-797, 0x2e5411679d89f548
-798, 0xdd355c299c05131e
-799, 0x6fc81e1e9beb2c8
-800, 0x205ac04eedc0085c
-801, 0x8bf73a08c5240640
-802, 0xec6f8daf06673cae
-803, 0x6e29f78f0a59638e
-804, 0x8c530fd613aeccda
-805, 0x58b99ce19626ee04
-806, 0xb16f71c11f209bb9
-807, 0xea1d7ee0e82f9146
-808, 0x5641482551d357fa
-809, 0x13fb8eff6efa4b89
-810, 0xca4bdfac87e46ce0
-811, 0x9e2babf08f33b6ad
-812, 0x482633792e270729
-813, 0xd5c17bce83e146e9
-814, 0xf8df8169c7ff4df6
-815, 0xad974ea8b3bb7e7d
-816, 0x8ad356322d6c0a26
-817, 0x5ba5a24cff70d235
-818, 0xb604ea125e469d44
-819, 0xecb90d0ca42445d9
-820, 0x9c499d3f441e6eb3
-821, 0x2aed9e67fc701d26
-822, 0xb3476334028bed9
-823, 0xba079723415a89fd
-824, 0x8684b0e124ebd181
-825, 0x6effee2741402b37
-826, 0x15e734115d68f8a4
-827, 0xafc15b8a9fa93205
-828, 0x9749e35360fcd91
-829, 0x8ffbf6ba4b02bacd
-830, 0x2f107b6a820f44ba
-831, 0x230cdb06c5f7422b
-832, 0x2149918883f7c858
-833, 0x3e8eb9dbfb832b71
-834, 0x871f0b4369d3dbc3
-835, 0x3553e06132e55fa4
-836, 0x1ec19fd1ce7a5823
-837, 0xf3908fc23446b3a2
-838, 0xe300b55305c8d7f3
-839, 0x61e4ab3372dce7dc
-840, 0xb50f68be3632604f
-841, 0xd6d2993fa6d155b9
-842, 0xf9c8d0fed0c90246
-843, 0xdd1b49530387141f
-844, 0xd1db9818546e095c
-845, 0xb91885ccff43ee8c
-846, 0x8d704dca3b7fdb63
-847, 0x8309c9077939df4
-848, 0x6536739d7ae608f7
-849, 0xdab8a503cb9b94a6
-850, 0xc504248b8f69f733
-851, 0xb0ccfb81eb67e3e4
-852, 0x45ac4f949c418493
-853, 0x7763a70137c01376
-854, 0x7f08d6362b17c470
-855, 0xb190bb422946ad46
-856, 0xdafe7dfcb0d71320
-857, 0xec415ea4c54398f5
-858, 0x5955b81204c5657c
-859, 0xff1f983c56d6d7cb
-860, 0xb25b4a0de0bf393d
-861, 0x3a90222bef45f3fc
-862, 0xf0eb0903e3695f44
-863, 0x405ecabf26817b33
-864, 0xccf01a062f2351eb
-865, 0xa62a5f63e31545b1
-866, 0x673d1baf237668d3
-867, 0xd15db3cddfb0a161
-868, 0xa8adebfc9b5351f6
-869, 0xc297fae49f0b2d08
-870, 0xe5ed1156ab569225
-871, 0xf4aa4bab70aa8c11
-872, 0x8e32dd1eb44c6363
-873, 0xc7aa250f1492e86d
-874, 0xc645795d705914cf
-875, 0xfdd8a48c0fb81c53
-876, 0x6ad1401f539799fe
-877, 0xa157e71b6bdd4254
-878, 0x4cc09814465a6c9e
-879, 0xed1f66bd824e39ec
-880, 0x6b74f7f6f2d4c16b
-881, 0xa3391c0100010ae4
-882, 0xe0f384530c0e7eb
-883, 0xf6aeb9f0d64c7159
-884, 0x3d7f6bd980e07a17
-885, 0x8b4e1bd3e782ea4e
-886, 0x7b005009d95b7d38
-887, 0xf43f001d5e7326c0
-888, 0x16600ff7361a1721
-889, 0x13778aceafd72087
-890, 0x85d3359c37907c58
-891, 0x7374f768c968d0f
-892, 0x2373d89b9b8f9e9a
-893, 0x21a3fe7e4dc5cc35
-894, 0xb02abcad4f4ae60
-895, 0xb9eb579582666e3b
-896, 0x9c12186973b91695
-897, 0x1bd25ac6911295e7
-898, 0x9f5a90e0fc16ffa2
-899, 0xe3e8f10ce7fbb9e1
-900, 0x5867e566887d2d16
-901, 0xd569aaf2ffead057
-902, 0x678359b93dfd07f1
-903, 0x9fb73a4f1b777d94
-904, 0x5c6b0bcc70df3a54
-905, 0x66fd71a67ed5e59d
-906, 0x62f21a6fe936b212
-907, 0x86922151e4b251c4
-908, 0xbfdee56cdeabe8bd
-909, 0xbe3bc7c4c2380ffc
-910, 0xd09ebebb0e786d49
-911, 0x4951a83005aa22de
-912, 0xc1b7da6cf08630c4
-913, 0x8b294b5fef04b0af
-914, 0xaca7a47f7fda4d5f
-915, 0x70bbddc64b4b1a91
-916, 0xad306a764087085c
-917, 0x19b9f11c14adb74a
-918, 0xbf1a7d2c83fbbbe
-919, 0xb78da8a53fa857
-920, 0x5b614c5060a543b7
-921, 0xb6f32557404d475f
-922, 0x9fc53dfe5281f084
-923, 0x43ad9d302c10a475
-924, 0xa4575be2c10fbc13
-925, 0xe58c4c02d5b2bc8a
-926, 0xaa838a3e5a16bb55
-927, 0x95c39373858011e1
-928, 0x17a6be18c1801fa
-929, 0x835e6c3d99898c27
-930, 0x9af26334bd726505
-931, 0x7addf56712a22afb
-932, 0xf619281f6d4d37d0
-933, 0x310c6b1e29ca7eaa
-934, 0xe8106bbe1ea0f3c9
-935, 0xc89add421cfe7bb9
-936, 0xe01b7a6885180236
-937, 0xda8cd608ee0eee61
-938, 0x3bb2f5f40a8f4880
-939, 0xd434cddc85946350
-940, 0x6390806f8d1465f
-941, 0x2a5f0150c8362cf3
-942, 0xcc6980e968b75f37
-943, 0xd86756899b2c95d2
-944, 0x95ab76b54f439605
-945, 0x1e0d6f6a99569ffc
-946, 0xd47b20b72c0f02e3
-947, 0xcd9fff1462fe8a25
-948, 0x71867c57f009bc8b
-949, 0x85238c818139a22b
-950, 0x58247991b6447ce7
-951, 0x3b41a627153bcc9f
-952, 0xa3ddf05f18153e13
-953, 0x21a3d47762fbdbe4
-954, 0x8ee55f20e5c5b14
-955, 0xc3ed8e23589b365f
-956, 0xbd12efde1b5e8afc
-957, 0x35b81175f738edc8
-958, 0x16b2627c28c952c0
-959, 0xb16a5009047b002b
-960, 0x5e4c769bd80bed26
-961, 0x96174863aa73bf6b
-962, 0xb3bfe6a2d7d05881
-963, 0x5c0a1757302c3fb6
-964, 0xfcc52e2da058ae67
-965, 0x12b26055c0ea26e8
-966, 0x87d8126b14b8417b
-967, 0xc87745c58eaa597f
-968, 0xb38b4b4b579ab55
-969, 0x559ece2bb0ca0b32
-970, 0xecbf6af7914a6435
-971, 0xd994b534e3f46c42
-972, 0x67301d5555cbaf1
-973, 0x4b2222c098aecb6a
-974, 0x2f1b7acadaa10ffc
-975, 0x4c48c65a542d56f4
-976, 0xf3fbde71409cd64c
-977, 0xb32e3ef1dc24a7cb
-978, 0x229321ce5bcd85
-979, 0xcad7e7dfee447d7a
-980, 0x7fddd28936d166a5
-981, 0x928bfb0027da2715
-982, 0x97b17752c6aaa82b
-983, 0x3eaca529c941d7c1
-984, 0x91937555520265e
-985, 0x8e7e5c3786ee3588
-986, 0x27162348b08a9aca
-987, 0x302165a3d76eab04
-988, 0x94111b7672c6bd95
-989, 0x7a471169035fc35a
-990, 0xe850ed94b0be86e1
-991, 0xf7a3721d6c85c1cc
-992, 0x6727a68e16268dfc
-993, 0x65433e82f0e19d29
-994, 0x6109fd616c977544
-995, 0x7068ef83a29cdc70
-996, 0xcef2deae0fccb574
-997, 0xee2a2ee021a6ad5a
-998, 0x5195005fba78706c
-999, 0x31364d630d333f34
diff --git a/_randomgen/randomgen/tests/test_legacy.py b/_randomgen/randomgen/tests/test_legacy.py
deleted file mode 100644
index 21c56946f..000000000
--- a/_randomgen/randomgen/tests/test_legacy.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import pickle
-
-from randomgen.legacy import LegacyGenerator
-
-
-def test_pickle():
- lg = LegacyGenerator()
- lg.random_sample(100)
- lg.standard_normal()
- lg2 = pickle.loads(pickle.dumps(lg))
- assert lg.standard_normal() == lg2.standard_normal()
- assert lg.random_sample() == lg2.random_sample()
-
-
-def test_weibull():
- lg = LegacyGenerator()
- assert lg.weibull(0.0) == 0.0
diff --git a/_randomgen/requirements.txt b/_randomgen/requirements.txt
deleted file mode 100644
index 0d7fe1ba8..000000000
--- a/_randomgen/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-numpy>=1.13
-cython>=0.26
-setuptools
-wheel \ No newline at end of file
diff --git a/_randomgen/setup.cfg b/_randomgen/setup.cfg
deleted file mode 100644
index b25ab5078..000000000
--- a/_randomgen/setup.cfg
+++ /dev/null
@@ -1,11 +0,0 @@
-[metadata]
-description-file = README.md
-license_file = LICENSE.md
-
-[versioneer]
-VCS = git
-style = pep440
-versionfile_source = randomgen/_version.py
-versionfile_build = randomgen/_version.py
-tag_prefix = v
-parentdir_prefix = randomgen-
diff --git a/_randomgen/versioneer.py b/_randomgen/versioneer.py
deleted file mode 100644
index 64fea1c89..000000000
--- a/_randomgen/versioneer.py
+++ /dev/null
@@ -1,1822 +0,0 @@
-
-# Version: 0.18
-
-"""The Versioneer - like a rocketeer, but for versions.
-
-The Versioneer
-==============
-
-* like a rocketeer, but for versions!
-* https://github.com/warner/python-versioneer
-* Brian Warner
-* License: Public Domain
-* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, 3.5, 3.6, and pypy
-* [![Latest Version]
-(https://pypip.in/version/versioneer/badge.svg?style=flat)
-](https://pypi.python.org/pypi/versioneer/)
-* [![Build Status]
-(https://travis-ci.org/warner/python-versioneer.png?branch=master)
-](https://travis-ci.org/warner/python-versioneer)
-
-This is a tool for managing a recorded version number in distutils-based
-python projects. The goal is to remove the tedious and error-prone "update
-the embedded version string" step from your release process. Making a new
-release should be as easy as recording a new tag in your version-control
-system, and maybe making new tarballs.
-
-
-## Quick Install
-
-* `pip install versioneer` to somewhere to your $PATH
-* add a `[versioneer]` section to your setup.cfg (see below)
-* run `versioneer install` in your source tree, commit the results
-
-## Version Identifiers
-
-Source trees come from a variety of places:
-
-* a version-control system checkout (mostly used by developers)
-* a nightly tarball, produced by build automation
-* a snapshot tarball, produced by a web-based VCS browser, like github's
- "tarball from tag" feature
-* a release tarball, produced by "setup.py sdist", distributed through PyPI
-
-Within each source tree, the version identifier (either a string or a number,
-this tool is format-agnostic) can come from a variety of places:
-
-* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
- about recent "tags" and an absolute revision-id
-* the name of the directory into which the tarball was unpacked
-* an expanded VCS keyword ($Id$, etc)
-* a `_version.py` created by some earlier build step
-
-For released software, the version identifier is closely related to a VCS
-tag. Some projects use tag names that include more than just the version
-string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
-needs to strip the tag prefix to extract the version identifier. For
-unreleased software (between tags), the version identifier should provide
-enough information to help developers recreate the same tree, while also
-giving them an idea of roughly how old the tree is (after version 1.2, before
-version 1.3). Many VCS systems can report a description that captures this,
-for example `git describe --tags --dirty --always` reports things like
-"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
-0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
-uncommitted changes.
-
-The version identifier is used for multiple purposes:
-
-* to allow the module to self-identify its version: `myproject.__version__`
-* to choose a name and prefix for a 'setup.py sdist' tarball
-
-## Theory of Operation
-
-Versioneer works by adding a special `_version.py` file into your source
-tree, where your `__init__.py` can import it. This `_version.py` knows how to
-dynamically ask the VCS tool for version information at import time.
-
-`_version.py` also contains `$Revision$` markers, and the installation
-process marks `_version.py` to have this marker rewritten with a tag name
-during the `git archive` command. As a result, generated tarballs will
-contain enough information to get the proper version.
-
-To allow `setup.py` to compute a version too, a `versioneer.py` is added to
-the top level of your source tree, next to `setup.py` and the `setup.cfg`
-that configures it. This overrides several distutils/setuptools commands to
-compute the version when invoked, and changes `setup.py build` and `setup.py
-sdist` to replace `_version.py` with a small static file that contains just
-the generated version data.
-
-## Installation
-
-See [INSTALL.md](./INSTALL.md) for detailed installation instructions.
-
-## Version-String Flavors
-
-Code which uses Versioneer can learn about its version string at runtime by
-importing `_version` from your main `__init__.py` file and running the
-`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
-import the top-level `versioneer.py` and run `get_versions()`.
-
-Both functions return a dictionary with different flavors of version
-information:
-
-* `['version']`: A condensed version string, rendered using the selected
- style. This is the most commonly used value for the project's version
- string. The default "pep440" style yields strings like `0.11`,
- `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
- below for alternative styles.
-
-* `['full-revisionid']`: detailed revision identifier. For Git, this is the
- full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
-
-* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the
- commit date in ISO 8601 format. This will be None if the date is not
- available.
-
-* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
- this is only accurate if run in a VCS checkout, otherwise it is likely to
- be False or None
-
-* `['error']`: if the version string could not be computed, this will be set
- to a string describing the problem, otherwise it will be None. It may be
- useful to throw an exception in setup.py if this is set, to avoid e.g.
- creating tarballs with a version string of "unknown".
-
-Some variants are more useful than others. Including `full-revisionid` in a
-bug report should allow developers to reconstruct the exact code being tested
-(or indicate the presence of local changes that should be shared with the
-developers). `version` is suitable for display in an "about" box or a CLI
-`--version` output: it can be easily compared against release notes and lists
-of bugs fixed in various releases.
-
-The installer adds the following text to your `__init__.py` to place a basic
-version in `YOURPROJECT.__version__`:
-
- from ._version import get_versions
- __version__ = get_versions()['version']
- del get_versions
-
-## Styles
-
-The setup.cfg `style=` configuration controls how the VCS information is
-rendered into a version string.
-
-The default style, "pep440", produces a PEP440-compliant string, equal to the
-un-prefixed tag name for actual releases, and containing an additional "local
-version" section with more detail for in-between builds. For Git, this is
-TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
---dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
-tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
-that this commit is two revisions ("+2") beyond the "0.11" tag. For released
-software (exactly equal to a known tag), the identifier will only contain the
-stripped tag, e.g. "0.11".
-
-Other styles are available. See [details.md](details.md) in the Versioneer
-source tree for descriptions.
-
-## Debugging
-
-Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
-to return a version of "0+unknown". To investigate the problem, run `setup.py
-version`, which will run the version-lookup code in a verbose mode, and will
-display the full contents of `get_versions()` (including the `error` string,
-which may help identify what went wrong).
-
-## Known Limitations
-
-Some situations are known to cause problems for Versioneer. This details the
-most significant ones. More can be found on Github
-[issues page](https://github.com/warner/python-versioneer/issues).
-
-### Subprojects
-
-Versioneer has limited support for source trees in which `setup.py` is not in
-the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are
-two common reasons why `setup.py` might not be in the root:
-
-* Source trees which contain multiple subprojects, such as
- [Buildbot](https://github.com/buildbot/buildbot), which contains both
- "master" and "slave" subprojects, each with their own `setup.py`,
- `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI
- distributions (and upload multiple independently-installable tarballs).
-* Source trees whose main purpose is to contain a C library, but which also
- provide bindings to Python (and perhaps other langauges) in subdirectories.
-
-Versioneer will look for `.git` in parent directories, and most operations
-should get the right version string. However `pip` and `setuptools` have bugs
-and implementation details which frequently cause `pip install .` from a
-subproject directory to fail to find a correct version string (so it usually
-defaults to `0+unknown`).
-
-`pip install --editable .` should work correctly. `setup.py install` might
-work too.
-
-Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in
-some later version.
-
-[Bug #38](https://github.com/warner/python-versioneer/issues/38) is tracking
-this issue. The discussion in
-[PR #61](https://github.com/warner/python-versioneer/pull/61) describes the
-issue from the Versioneer side in more detail.
-[pip PR#3176](https://github.com/pypa/pip/pull/3176) and
-[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve
-pip to let Versioneer work correctly.
-
-Versioneer-0.16 and earlier only looked for a `.git` directory next to the
-`setup.cfg`, so subprojects were completely unsupported with those releases.
-
-### Editable installs with setuptools <= 18.5
-
-`setup.py develop` and `pip install --editable .` allow you to install a
-project into a virtualenv once, then continue editing the source code (and
-test) without re-installing after every change.
-
-"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a
-convenient way to specify executable scripts that should be installed along
-with the python package.
-
-These both work as expected when using modern setuptools. When using
-setuptools-18.5 or earlier, however, certain operations will cause
-`pkg_resources.DistributionNotFound` errors when running the entrypoint
-script, which must be resolved by re-installing the package. This happens
-when the install happens with one version, then the egg_info data is
-regenerated while a different version is checked out. Many setup.py commands
-cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into
-a different virtualenv), so this can be surprising.
-
-[Bug #83](https://github.com/warner/python-versioneer/issues/83) describes
-this one, but upgrading to a newer version of setuptools should probably
-resolve it.
-
-### Unicode version strings
-
-While Versioneer works (and is continually tested) with both Python 2 and
-Python 3, it is not entirely consistent with bytes-vs-unicode distinctions.
-Newer releases probably generate unicode version strings on py2. It's not
-clear that this is wrong, but it may be surprising for applications when then
-write these strings to a network connection or include them in bytes-oriented
-APIs like cryptographic checksums.
-
-[Bug #71](https://github.com/warner/python-versioneer/issues/71) investigates
-this question.
-
-
-## Updating Versioneer
-
-To upgrade your project to a new release of Versioneer, do the following:
-
-* install the new Versioneer (`pip install -U versioneer` or equivalent)
-* edit `setup.cfg`, if necessary, to include any new configuration settings
- indicated by the release notes. See [UPGRADING](./UPGRADING.md) for details.
-* re-run `versioneer install` in your source tree, to replace
- `SRC/_version.py`
-* commit any changed files
-
-## Future Directions
-
-This tool is designed to make it easily extended to other version-control
-systems: all VCS-specific components are in separate directories like
-src/git/ . The top-level `versioneer.py` script is assembled from these
-components by running make-versioneer.py . In the future, make-versioneer.py
-will take a VCS name as an argument, and will construct a version of
-`versioneer.py` that is specific to the given VCS. It might also take the
-configuration arguments that are currently provided manually during
-installation by editing setup.py . Alternatively, it might go the other
-direction and include code from all supported VCS systems, reducing the
-number of intermediate scripts.
-
-
-## License
-
-To make Versioneer easier to embed, all its code is dedicated to the public
-domain. The `_version.py` that it creates is also in the public domain.
-Specifically, both are released under the Creative Commons "Public Domain
-Dedication" license (CC0-1.0), as described in
-https://creativecommons.org/publicdomain/zero/1.0/ .
-
-"""
-
-from __future__ import print_function
-try:
- import configparser
-except ImportError:
- import ConfigParser as configparser
-import errno
-import json
-import os
-import re
-import subprocess
-import sys
-
-
-class VersioneerConfig:
- """Container for Versioneer configuration parameters."""
-
-
-def get_root():
- """Get the project root directory.
-
- We require that all commands are run from the project root, i.e. the
- directory that contains setup.py, setup.cfg, and versioneer.py .
- """
- root = os.path.realpath(os.path.abspath(os.getcwd()))
- setup_py = os.path.join(root, "setup.py")
- versioneer_py = os.path.join(root, "versioneer.py")
- if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
- # allow 'python path/to/setup.py COMMAND'
- root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
- setup_py = os.path.join(root, "setup.py")
- versioneer_py = os.path.join(root, "versioneer.py")
- if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
- err = ("Versioneer was unable to run the project root directory. "
- "Versioneer requires setup.py to be executed from "
- "its immediate directory (like 'python setup.py COMMAND'), "
- "or in a way that lets it use sys.argv[0] to find the root "
- "(like 'python path/to/setup.py COMMAND').")
- raise VersioneerBadRootError(err)
- try:
- # Certain runtime workflows (setup.py install/develop in a setuptools
- # tree) execute all dependencies in a single python process, so
- # "versioneer" may be imported multiple times, and python's shared
- # module-import table will cache the first one. So we can't use
- # os.path.dirname(__file__), as that will find whichever
- # versioneer.py was first imported, even in later projects.
- me = os.path.realpath(os.path.abspath(__file__))
- me_dir = os.path.normcase(os.path.splitext(me)[0])
- vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0])
- if me_dir != vsr_dir:
- print("Warning: build in %s is using versioneer.py from %s"
- % (os.path.dirname(me), versioneer_py))
- except NameError:
- pass
- return root
-
-
-def get_config_from_root(root):
- """Read the project setup.cfg file to determine Versioneer config."""
- # This might raise EnvironmentError (if setup.cfg is missing), or
- # configparser.NoSectionError (if it lacks a [versioneer] section), or
- # configparser.NoOptionError (if it lacks "VCS="). See the docstring at
- # the top of versioneer.py for instructions on writing your setup.cfg .
- setup_cfg = os.path.join(root, "setup.cfg")
- parser = configparser.SafeConfigParser()
- with open(setup_cfg, "r") as f:
- parser.readfp(f)
- VCS = parser.get("versioneer", "VCS") # mandatory
-
- def get(parser, name):
- if parser.has_option("versioneer", name):
- return parser.get("versioneer", name)
- return None
- cfg = VersioneerConfig()
- cfg.VCS = VCS
- cfg.style = get(parser, "style") or ""
- cfg.versionfile_source = get(parser, "versionfile_source")
- cfg.versionfile_build = get(parser, "versionfile_build")
- cfg.tag_prefix = get(parser, "tag_prefix")
- if cfg.tag_prefix in ("''", '""'):
- cfg.tag_prefix = ""
- cfg.parentdir_prefix = get(parser, "parentdir_prefix")
- cfg.verbose = get(parser, "verbose")
- return cfg
-
-
-class NotThisMethod(Exception):
- """Exception raised if a method is not valid for the current scenario."""
-
-
-# these dictionaries contain VCS-specific tools
-LONG_VERSION_PY = {}
-HANDLERS = {}
-
-
-def register_vcs_handler(vcs, method): # decorator
- """Decorator to mark a method as the handler for a particular VCS."""
- def decorate(f):
- """Store f in HANDLERS[vcs][method]."""
- if vcs not in HANDLERS:
- HANDLERS[vcs] = {}
- HANDLERS[vcs][method] = f
- return f
- return decorate
-
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
- env=None):
- """Call the given command(s)."""
- assert isinstance(commands, list)
- p = None
- for c in commands:
- try:
- dispcmd = str([c] + args)
- # remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, env=env,
- stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None))
- break
- except EnvironmentError:
- e = sys.exc_info()[1]
- if e.errno == errno.ENOENT:
- continue
- if verbose:
- print("unable to run %s" % dispcmd)
- print(e)
- return None, None
- else:
- if verbose:
- print("unable to find command, tried %s" % (commands,))
- return None, None
- stdout = p.communicate()[0].strip()
- if sys.version_info[0] >= 3:
- stdout = stdout.decode()
- if p.returncode != 0:
- if verbose:
- print("unable to run %s (error)" % dispcmd)
- print("stdout was %s" % stdout)
- return None, p.returncode
- return stdout, p.returncode
-
-
-LONG_VERSION_PY['git'] = '''
-# This file helps to compute a version number in source trees obtained from
-# git-archive tarball (such as those provided by githubs download-from-tag
-# feature). Distribution tarballs (built by setup.py sdist) and build
-# directories (produced by setup.py build) will contain a much shorter file
-# that just contains the computed version number.
-
-# This file is released into the public domain. Generated by
-# versioneer-0.18 (https://github.com/warner/python-versioneer)
-
-"""Git implementation of _version.py."""
-
-import errno
-import os
-import re
-import subprocess
-import sys
-
-
-def get_keywords():
- """Get the keywords needed to look up the version information."""
- # these strings will be replaced by git during git-archive.
- # setup.py/versioneer.py will grep for the variable names, so they must
- # each be defined on a line of their own. _version.py will just call
- # get_keywords().
- git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
- git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
- git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s"
- keywords = {"refnames": git_refnames, "full": git_full, "date": git_date}
- return keywords
-
-
-class VersioneerConfig:
- """Container for Versioneer configuration parameters."""
-
-
-def get_config():
- """Create, populate and return the VersioneerConfig() object."""
- # these strings are filled in when 'setup.py versioneer' creates
- # _version.py
- cfg = VersioneerConfig()
- cfg.VCS = "git"
- cfg.style = "%(STYLE)s"
- cfg.tag_prefix = "%(TAG_PREFIX)s"
- cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
- cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
- cfg.verbose = False
- return cfg
-
-
-class NotThisMethod(Exception):
- """Exception raised if a method is not valid for the current scenario."""
-
-
-LONG_VERSION_PY = {}
-HANDLERS = {}
-
-
-def register_vcs_handler(vcs, method): # decorator
- """Decorator to mark a method as the handler for a particular VCS."""
- def decorate(f):
- """Store f in HANDLERS[vcs][method]."""
- if vcs not in HANDLERS:
- HANDLERS[vcs] = {}
- HANDLERS[vcs][method] = f
- return f
- return decorate
-
-
-def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False,
- env=None):
- """Call the given command(s)."""
- assert isinstance(commands, list)
- p = None
- for c in commands:
- try:
- dispcmd = str([c] + args)
- # remember shell=False, so use git.cmd on windows, not just git
- p = subprocess.Popen([c] + args, cwd=cwd, env=env,
- stdout=subprocess.PIPE,
- stderr=(subprocess.PIPE if hide_stderr
- else None))
- break
- except EnvironmentError:
- e = sys.exc_info()[1]
- if e.errno == errno.ENOENT:
- continue
- if verbose:
- print("unable to run %%s" %% dispcmd)
- print(e)
- return None, None
- else:
- if verbose:
- print("unable to find command, tried %%s" %% (commands,))
- return None, None
- stdout = p.communicate()[0].strip()
- if sys.version_info[0] >= 3:
- stdout = stdout.decode()
- if p.returncode != 0:
- if verbose:
- print("unable to run %%s (error)" %% dispcmd)
- print("stdout was %%s" %% stdout)
- return None, p.returncode
- return stdout, p.returncode
-
-
-def versions_from_parentdir(parentdir_prefix, root, verbose):
- """Try to determine the version from the parent directory name.
-
- Source tarballs conventionally unpack into a directory that includes both
- the project name and a version string. We will also support searching up
- two directory levels for an appropriately named parent directory
- """
- rootdirs = []
-
- for i in range(3):
- dirname = os.path.basename(root)
- if dirname.startswith(parentdir_prefix):
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None, "date": None}
- else:
- rootdirs.append(root)
- root = os.path.dirname(root) # up a level
-
- if verbose:
- print("Tried directories %%s but none started with prefix %%s" %%
- (str(rootdirs), parentdir_prefix))
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-
-
-@register_vcs_handler("git", "get_keywords")
-def git_get_keywords(versionfile_abs):
- """Extract version information from the given file."""
- # the code embedded in _version.py can just fetch the value of these
- # keywords. When used from setup.py, we don't want to import _version.py,
- # so we do it with a regexp instead. This function is not used from
- # _version.py.
- keywords = {}
- try:
- f = open(versionfile_abs, "r")
- for line in f.readlines():
- if line.strip().startswith("git_refnames ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["refnames"] = mo.group(1)
- if line.strip().startswith("git_full ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["full"] = mo.group(1)
- if line.strip().startswith("git_date ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["date"] = mo.group(1)
- f.close()
- except EnvironmentError:
- pass
- return keywords
-
-
-@register_vcs_handler("git", "keywords")
-def git_versions_from_keywords(keywords, tag_prefix, verbose):
- """Get version information from git keywords."""
- if not keywords:
- raise NotThisMethod("no keywords at all, weird")
- date = keywords.get("date")
- if date is not None:
- # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant
- # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601
- # -like" string, which we must then edit to make compliant), because
- # it's been around since git-1.5.3, and it's too difficult to
- # discover which version we're using, or to work around using an
- # older one.
- date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
- refnames = keywords["refnames"].strip()
- if refnames.startswith("$Format"):
- if verbose:
- print("keywords are unexpanded, not using")
- raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
- refs = set([r.strip() for r in refnames.strip("()").split(",")])
- # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
- # just "foo-1.0". If we see a "tag: " prefix, prefer those.
- TAG = "tag: "
- tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
- if not tags:
- # Either we're using git < 1.8.3, or there really are no tags. We use
- # a heuristic: assume all version tags have a digit. The old git %%d
- # expansion behaves like git log --decorate=short and strips out the
- # refs/heads/ and refs/tags/ prefixes that would let us distinguish
- # between branches and tags. By ignoring refnames without digits, we
- # filter out many common branch names like "release" and
- # "stabilization", as well as "HEAD" and "master".
- tags = set([r for r in refs if re.search(r'\d', r)])
- if verbose:
- print("discarding '%%s', no digits" %% ",".join(refs - tags))
- if verbose:
- print("likely tags: %%s" %% ",".join(sorted(tags)))
- for ref in sorted(tags):
- # sorting will prefer e.g. "2.0" over "2.0rc1"
- if ref.startswith(tag_prefix):
- r = ref[len(tag_prefix):]
- if verbose:
- print("picking %%s" %% r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None,
- "date": date}
- # no suitable tags, so version is "0+unknown", but full hex is still there
- if verbose:
- print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags", "date": None}
-
-
-@register_vcs_handler("git", "pieces_from_vcs")
-def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- """Get version from 'git describe' in the root of the source tree.
-
- This only gets called if the git-archive 'subst' keywords were *not*
- expanded, and _version.py hasn't already been rewritten with a short
- version string, meaning we're inside a checked out source tree.
- """
- GITS = ["git"]
- if sys.platform == "win32":
- GITS = ["git.cmd", "git.exe"]
-
- out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
- hide_stderr=True)
- if rc != 0:
- if verbose:
- print("Directory %%s not under git control" %% root)
- raise NotThisMethod("'git rev-parse --git-dir' returned error")
-
- # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
- # if there isn't one, this yields HEX[-dirty] (no NUM)
- describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long",
- "--match", "%%s*" %% tag_prefix],
- cwd=root)
- # --long was added in git-1.5.5
- if describe_out is None:
- raise NotThisMethod("'git describe' failed")
- describe_out = describe_out.strip()
- full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
- if full_out is None:
- raise NotThisMethod("'git rev-parse' failed")
- full_out = full_out.strip()
-
- pieces = {}
- pieces["long"] = full_out
- pieces["short"] = full_out[:7] # maybe improved later
- pieces["error"] = None
-
- # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
- # TAG might have hyphens.
- git_describe = describe_out
-
- # look for -dirty suffix
- dirty = git_describe.endswith("-dirty")
- pieces["dirty"] = dirty
- if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
-
- # now we have TAG-NUM-gHEX or HEX
-
- if "-" in git_describe:
- # TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
- if not mo:
- # unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%%s'"
- %% describe_out)
- return pieces
-
- # tag
- full_tag = mo.group(1)
- if not full_tag.startswith(tag_prefix):
- if verbose:
- fmt = "tag '%%s' doesn't start with prefix '%%s'"
- print(fmt %% (full_tag, tag_prefix))
- pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
- %% (full_tag, tag_prefix))
- return pieces
- pieces["closest-tag"] = full_tag[len(tag_prefix):]
-
- # distance: number of commits since tag
- pieces["distance"] = int(mo.group(2))
-
- # commit: short hex revision ID
- pieces["short"] = mo.group(3)
-
- else:
- # HEX: no tags
- pieces["closest-tag"] = None
- count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
- pieces["distance"] = int(count_out) # total number of commits
-
- # commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = run_command(GITS, ["show", "-s", "--format=%%ci", "HEAD"],
- cwd=root)[0].strip()
- pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
-
- return pieces
-
-
-def plus_or_dot(pieces):
- """Return a + if we don't already have one, else return a ."""
- if "+" in pieces.get("closest-tag", ""):
- return "."
- return "+"
-
-
-def render_pep440(pieces):
- """Build up version string, with post-release "local version identifier".
-
- Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
-
- Exceptions:
- 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += plus_or_dot(pieces)
- rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- else:
- # exception #1
- rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
- pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- return rendered
-
-
-def render_pep440_pre(pieces):
- """TAG[.post.devDISTANCE] -- No -dirty.
-
- Exceptions:
- 1: no tags. 0.post.devDISTANCE
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += ".post.dev%%d" %% pieces["distance"]
- else:
- # exception #1
- rendered = "0.post.dev%%d" %% pieces["distance"]
- return rendered
-
-
-def render_pep440_post(pieces):
- """TAG[.postDISTANCE[.dev0]+gHEX] .
-
- The ".dev0" means dirty. Note that .dev0 sorts backwards
- (a dirty tree will appear "older" than the corresponding clean one),
- but you shouldn't be releasing software with -dirty anyways.
-
- Exceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += plus_or_dot(pieces)
- rendered += "g%%s" %% pieces["short"]
- else:
- # exception #1
- rendered = "0.post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += "+g%%s" %% pieces["short"]
- return rendered
-
-
-def render_pep440_old(pieces):
- """TAG[.postDISTANCE[.dev0]] .
-
- The ".dev0" means dirty.
-
- Eexceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- else:
- # exception #1
- rendered = "0.post%%d" %% pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- return rendered
-
-
-def render_git_describe(pieces):
- """TAG[-DISTANCE-gHEX][-dirty].
-
- Like 'git describe --tags --dirty --always'.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render_git_describe_long(pieces):
- """TAG-DISTANCE-gHEX[-dirty].
-
- Like 'git describe --tags --dirty --always -long'.
- The distance/hash is unconditional.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render(pieces, style):
- """Render the given version pieces into the requested style."""
- if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"],
- "date": None}
-
- if not style or style == "default":
- style = "pep440" # the default
-
- if style == "pep440":
- rendered = render_pep440(pieces)
- elif style == "pep440-pre":
- rendered = render_pep440_pre(pieces)
- elif style == "pep440-post":
- rendered = render_pep440_post(pieces)
- elif style == "pep440-old":
- rendered = render_pep440_old(pieces)
- elif style == "git-describe":
- rendered = render_git_describe(pieces)
- elif style == "git-describe-long":
- rendered = render_git_describe_long(pieces)
- else:
- raise ValueError("unknown style '%%s'" %% style)
-
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None,
- "date": pieces.get("date")}
-
-
-def get_versions():
- """Get version information or return default if unable to do so."""
- # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
- # __file__, we can work backwards from there to the root. Some
- # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
- # case we can only use expanded keywords.
-
- cfg = get_config()
- verbose = cfg.verbose
-
- try:
- return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
- verbose)
- except NotThisMethod:
- pass
-
- try:
- root = os.path.realpath(__file__)
- # versionfile_source is the relative path from the top of the source
- # tree (where the .git directory might live) to this file. Invert
- # this to find the root from __file__.
- for i in cfg.versionfile_source.split('/'):
- root = os.path.dirname(root)
- except NameError:
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None,
- "error": "unable to find root of source tree",
- "date": None}
-
- try:
- pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
- return render(pieces, cfg.style)
- except NotThisMethod:
- pass
-
- try:
- if cfg.parentdir_prefix:
- return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
- except NotThisMethod:
- pass
-
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None,
- "error": "unable to compute version", "date": None}
-'''
-
-
-@register_vcs_handler("git", "get_keywords")
-def git_get_keywords(versionfile_abs):
- """Extract version information from the given file."""
- # the code embedded in _version.py can just fetch the value of these
- # keywords. When used from setup.py, we don't want to import _version.py,
- # so we do it with a regexp instead. This function is not used from
- # _version.py.
- keywords = {}
- try:
- f = open(versionfile_abs, "r")
- for line in f.readlines():
- if line.strip().startswith("git_refnames ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["refnames"] = mo.group(1)
- if line.strip().startswith("git_full ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["full"] = mo.group(1)
- if line.strip().startswith("git_date ="):
- mo = re.search(r'=\s*"(.*)"', line)
- if mo:
- keywords["date"] = mo.group(1)
- f.close()
- except EnvironmentError:
- pass
- return keywords
-
-
-@register_vcs_handler("git", "keywords")
-def git_versions_from_keywords(keywords, tag_prefix, verbose):
- """Get version information from git keywords."""
- if not keywords:
- raise NotThisMethod("no keywords at all, weird")
- date = keywords.get("date")
- if date is not None:
- # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant
- # datestamp. However we prefer "%ci" (which expands to an "ISO-8601
- # -like" string, which we must then edit to make compliant), because
- # it's been around since git-1.5.3, and it's too difficult to
- # discover which version we're using, or to work around using an
- # older one.
- date = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
- refnames = keywords["refnames"].strip()
- if refnames.startswith("$Format"):
- if verbose:
- print("keywords are unexpanded, not using")
- raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
- refs = set([r.strip() for r in refnames.strip("()").split(",")])
- # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
- # just "foo-1.0". If we see a "tag: " prefix, prefer those.
- TAG = "tag: "
- tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
- if not tags:
- # Either we're using git < 1.8.3, or there really are no tags. We use
- # a heuristic: assume all version tags have a digit. The old git %d
- # expansion behaves like git log --decorate=short and strips out the
- # refs/heads/ and refs/tags/ prefixes that would let us distinguish
- # between branches and tags. By ignoring refnames without digits, we
- # filter out many common branch names like "release" and
- # "stabilization", as well as "HEAD" and "master".
- tags = set([r for r in refs if re.search(r'\d', r)])
- if verbose:
- print("discarding '%s', no digits" % ",".join(refs - tags))
- if verbose:
- print("likely tags: %s" % ",".join(sorted(tags)))
- for ref in sorted(tags):
- # sorting will prefer e.g. "2.0" over "2.0rc1"
- if ref.startswith(tag_prefix):
- r = ref[len(tag_prefix):]
- if verbose:
- print("picking %s" % r)
- return {"version": r,
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": None,
- "date": date}
- # no suitable tags, so version is "0+unknown", but full hex is still there
- if verbose:
- print("no suitable tags, using unknown + full revision id")
- return {"version": "0+unknown",
- "full-revisionid": keywords["full"].strip(),
- "dirty": False, "error": "no suitable tags", "date": None}
-
-
-@register_vcs_handler("git", "pieces_from_vcs")
-def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
- """Get version from 'git describe' in the root of the source tree.
-
- This only gets called if the git-archive 'subst' keywords were *not*
- expanded, and _version.py hasn't already been rewritten with a short
- version string, meaning we're inside a checked out source tree.
- """
- GITS = ["git"]
- if sys.platform == "win32":
- GITS = ["git.cmd", "git.exe"]
-
- out, rc = run_command(GITS, ["rev-parse", "--git-dir"], cwd=root,
- hide_stderr=True)
- if rc != 0:
- if verbose:
- print("Directory %s not under git control" % root)
- raise NotThisMethod("'git rev-parse --git-dir' returned error")
-
- # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
- # if there isn't one, this yields HEX[-dirty] (no NUM)
- describe_out, rc = run_command(GITS, ["describe", "--tags", "--dirty",
- "--always", "--long",
- "--match", "%s*" % tag_prefix],
- cwd=root)
- # --long was added in git-1.5.5
- if describe_out is None:
- raise NotThisMethod("'git describe' failed")
- describe_out = describe_out.strip()
- full_out, rc = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
- if full_out is None:
- raise NotThisMethod("'git rev-parse' failed")
- full_out = full_out.strip()
-
- pieces = {}
- pieces["long"] = full_out
- pieces["short"] = full_out[:7] # maybe improved later
- pieces["error"] = None
-
- # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
- # TAG might have hyphens.
- git_describe = describe_out
-
- # look for -dirty suffix
- dirty = git_describe.endswith("-dirty")
- pieces["dirty"] = dirty
- if dirty:
- git_describe = git_describe[:git_describe.rindex("-dirty")]
-
- # now we have TAG-NUM-gHEX or HEX
-
- if "-" in git_describe:
- # TAG-NUM-gHEX
- mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
- if not mo:
- # unparseable. Maybe git-describe is misbehaving?
- pieces["error"] = ("unable to parse git-describe output: '%s'"
- % describe_out)
- return pieces
-
- # tag
- full_tag = mo.group(1)
- if not full_tag.startswith(tag_prefix):
- if verbose:
- fmt = "tag '%s' doesn't start with prefix '%s'"
- print(fmt % (full_tag, tag_prefix))
- pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
- % (full_tag, tag_prefix))
- return pieces
- pieces["closest-tag"] = full_tag[len(tag_prefix):]
-
- # distance: number of commits since tag
- pieces["distance"] = int(mo.group(2))
-
- # commit: short hex revision ID
- pieces["short"] = mo.group(3)
-
- else:
- # HEX: no tags
- pieces["closest-tag"] = None
- count_out, rc = run_command(GITS, ["rev-list", "HEAD", "--count"],
- cwd=root)
- pieces["distance"] = int(count_out) # total number of commits
-
- # commit date: see ISO-8601 comment in git_versions_from_keywords()
- date = run_command(GITS, ["show", "-s", "--format=%ci", "HEAD"],
- cwd=root)[0].strip()
- pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1)
-
- return pieces
-
-
-def do_vcs_install(manifest_in, versionfile_source, ipy):
- """Git-specific installation logic for Versioneer.
-
- For Git, this means creating/changing .gitattributes to mark _version.py
- for export-subst keyword substitution.
- """
- GITS = ["git"]
- if sys.platform == "win32":
- GITS = ["git.cmd", "git.exe"]
- files = [manifest_in, versionfile_source]
- if ipy:
- files.append(ipy)
- try:
- me = __file__
- if me.endswith(".pyc") or me.endswith(".pyo"):
- me = os.path.splitext(me)[0] + ".py"
- versioneer_file = os.path.relpath(me)
- except NameError:
- versioneer_file = "versioneer.py"
- files.append(versioneer_file)
- present = False
- try:
- f = open(".gitattributes", "r")
- for line in f.readlines():
- if line.strip().startswith(versionfile_source):
- if "export-subst" in line.strip().split()[1:]:
- present = True
- f.close()
- except EnvironmentError:
- pass
- if not present:
- f = open(".gitattributes", "a+")
- f.write("%s export-subst\n" % versionfile_source)
- f.close()
- files.append(".gitattributes")
- run_command(GITS, ["add", "--"] + files)
-
-
-def versions_from_parentdir(parentdir_prefix, root, verbose):
- """Try to determine the version from the parent directory name.
-
- Source tarballs conventionally unpack into a directory that includes both
- the project name and a version string. We will also support searching up
- two directory levels for an appropriately named parent directory
- """
- rootdirs = []
-
- for i in range(3):
- dirname = os.path.basename(root)
- if dirname.startswith(parentdir_prefix):
- return {"version": dirname[len(parentdir_prefix):],
- "full-revisionid": None,
- "dirty": False, "error": None, "date": None}
- else:
- rootdirs.append(root)
- root = os.path.dirname(root) # up a level
-
- if verbose:
- print("Tried directories %s but none started with prefix %s" %
- (str(rootdirs), parentdir_prefix))
- raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
-
-
-SHORT_VERSION_PY = """
-# This file was generated by 'versioneer.py' (0.18) from
-# revision-control system data, or from the parent directory name of an
-# unpacked source archive. Distribution tarballs contain a pre-generated copy
-# of this file.
-
-import json
-
-version_json = '''
-%s
-''' # END VERSION_JSON
-
-
-def get_versions():
- return json.loads(version_json)
-"""
-
-
-def versions_from_file(filename):
- """Try to determine the version from _version.py if present."""
- try:
- with open(filename) as f:
- contents = f.read()
- except EnvironmentError:
- raise NotThisMethod("unable to read _version.py")
- mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
- if not mo:
- mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON",
- contents, re.M | re.S)
- if not mo:
- raise NotThisMethod("no version_json in _version.py")
- return json.loads(mo.group(1))
-
-
-def write_to_version_file(filename, versions):
- """Write the given version number to the given _version.py file."""
- os.unlink(filename)
- contents = json.dumps(versions, sort_keys=True,
- indent=1, separators=(",", ": "))
- with open(filename, "w") as f:
- f.write(SHORT_VERSION_PY % contents)
-
- print("set %s to '%s'" % (filename, versions["version"]))
-
-
-def plus_or_dot(pieces):
- """Return a + if we don't already have one, else return a ."""
- if "+" in pieces.get("closest-tag", ""):
- return "."
- return "+"
-
-
-def render_pep440(pieces):
- """Build up version string, with post-release "local version identifier".
-
- Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
- get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
-
- Exceptions:
- 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += plus_or_dot(pieces)
- rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- else:
- # exception #1
- rendered = "0+untagged.%d.g%s" % (pieces["distance"],
- pieces["short"])
- if pieces["dirty"]:
- rendered += ".dirty"
- return rendered
-
-
-def render_pep440_pre(pieces):
- """TAG[.post.devDISTANCE] -- No -dirty.
-
- Exceptions:
- 1: no tags. 0.post.devDISTANCE
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += ".post.dev%d" % pieces["distance"]
- else:
- # exception #1
- rendered = "0.post.dev%d" % pieces["distance"]
- return rendered
-
-
-def render_pep440_post(pieces):
- """TAG[.postDISTANCE[.dev0]+gHEX] .
-
- The ".dev0" means dirty. Note that .dev0 sorts backwards
- (a dirty tree will appear "older" than the corresponding clean one),
- but you shouldn't be releasing software with -dirty anyways.
-
- Exceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += plus_or_dot(pieces)
- rendered += "g%s" % pieces["short"]
- else:
- # exception #1
- rendered = "0.post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- rendered += "+g%s" % pieces["short"]
- return rendered
-
-
-def render_pep440_old(pieces):
- """TAG[.postDISTANCE[.dev0]] .
-
- The ".dev0" means dirty.
-
- Eexceptions:
- 1: no tags. 0.postDISTANCE[.dev0]
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"] or pieces["dirty"]:
- rendered += ".post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- else:
- # exception #1
- rendered = "0.post%d" % pieces["distance"]
- if pieces["dirty"]:
- rendered += ".dev0"
- return rendered
-
-
-def render_git_describe(pieces):
- """TAG[-DISTANCE-gHEX][-dirty].
-
- Like 'git describe --tags --dirty --always'.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- if pieces["distance"]:
- rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render_git_describe_long(pieces):
- """TAG-DISTANCE-gHEX[-dirty].
-
- Like 'git describe --tags --dirty --always -long'.
- The distance/hash is unconditional.
-
- Exceptions:
- 1: no tags. HEX[-dirty] (note: no 'g' prefix)
- """
- if pieces["closest-tag"]:
- rendered = pieces["closest-tag"]
- rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
- else:
- # exception #1
- rendered = pieces["short"]
- if pieces["dirty"]:
- rendered += "-dirty"
- return rendered
-
-
-def render(pieces, style):
- """Render the given version pieces into the requested style."""
- if pieces["error"]:
- return {"version": "unknown",
- "full-revisionid": pieces.get("long"),
- "dirty": None,
- "error": pieces["error"],
- "date": None}
-
- if not style or style == "default":
- style = "pep440" # the default
-
- if style == "pep440":
- rendered = render_pep440(pieces)
- elif style == "pep440-pre":
- rendered = render_pep440_pre(pieces)
- elif style == "pep440-post":
- rendered = render_pep440_post(pieces)
- elif style == "pep440-old":
- rendered = render_pep440_old(pieces)
- elif style == "git-describe":
- rendered = render_git_describe(pieces)
- elif style == "git-describe-long":
- rendered = render_git_describe_long(pieces)
- else:
- raise ValueError("unknown style '%s'" % style)
-
- return {"version": rendered, "full-revisionid": pieces["long"],
- "dirty": pieces["dirty"], "error": None,
- "date": pieces.get("date")}
-
-
-class VersioneerBadRootError(Exception):
- """The project root directory is unknown or missing key files."""
-
-
-def get_versions(verbose=False):
- """Get the project version from whatever source is available.
-
- Returns dict with two keys: 'version' and 'full'.
- """
- if "versioneer" in sys.modules:
- # see the discussion in cmdclass.py:get_cmdclass()
- del sys.modules["versioneer"]
-
- root = get_root()
- cfg = get_config_from_root(root)
-
- assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
- handlers = HANDLERS.get(cfg.VCS)
- assert handlers, "unrecognized VCS '%s'" % cfg.VCS
- verbose = verbose or cfg.verbose
- assert cfg.versionfile_source is not None, \
- "please set versioneer.versionfile_source"
- assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
-
- versionfile_abs = os.path.join(root, cfg.versionfile_source)
-
- # extract version from first of: _version.py, VCS command (e.g. 'git
- # describe'), parentdir. This is meant to work for developers using a
- # source checkout, for users of a tarball created by 'setup.py sdist',
- # and for users of a tarball/zipball created by 'git archive' or github's
- # download-from-tag feature or the equivalent in other VCSes.
-
- get_keywords_f = handlers.get("get_keywords")
- from_keywords_f = handlers.get("keywords")
- if get_keywords_f and from_keywords_f:
- try:
- keywords = get_keywords_f(versionfile_abs)
- ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
- if verbose:
- print("got version from expanded keyword %s" % ver)
- return ver
- except NotThisMethod:
- pass
-
- try:
- ver = versions_from_file(versionfile_abs)
- if verbose:
- print("got version from file %s %s" % (versionfile_abs, ver))
- return ver
- except NotThisMethod:
- pass
-
- from_vcs_f = handlers.get("pieces_from_vcs")
- if from_vcs_f:
- try:
- pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
- ver = render(pieces, cfg.style)
- if verbose:
- print("got version from VCS %s" % ver)
- return ver
- except NotThisMethod:
- pass
-
- try:
- if cfg.parentdir_prefix:
- ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
- if verbose:
- print("got version from parentdir %s" % ver)
- return ver
- except NotThisMethod:
- pass
-
- if verbose:
- print("unable to compute version")
-
- return {"version": "0+unknown", "full-revisionid": None,
- "dirty": None, "error": "unable to compute version",
- "date": None}
-
-
-def get_version():
- """Get the short version string for this project."""
- return get_versions()["version"]
-
-
-def get_cmdclass():
- """Get the custom setuptools/distutils subclasses used by Versioneer."""
- if "versioneer" in sys.modules:
- del sys.modules["versioneer"]
- # this fixes the "python setup.py develop" case (also 'install' and
- # 'easy_install .'), in which subdependencies of the main project are
- # built (using setup.py bdist_egg) in the same python process. Assume
- # a main project A and a dependency B, which use different versions
- # of Versioneer. A's setup.py imports A's Versioneer, leaving it in
- # sys.modules by the time B's setup.py is executed, causing B to run
- # with the wrong versioneer. Setuptools wraps the sub-dep builds in a
- # sandbox that restores sys.modules to it's pre-build state, so the
- # parent is protected against the child's "import versioneer". By
- # removing ourselves from sys.modules here, before the child build
- # happens, we protect the child from the parent's versioneer too.
- # Also see https://github.com/warner/python-versioneer/issues/52
-
- cmds = {}
-
- # we add "version" to both distutils and setuptools
- from distutils.core import Command
-
- class cmd_version(Command):
- description = "report generated version string"
- user_options = []
- boolean_options = []
-
- def initialize_options(self):
- pass
-
- def finalize_options(self):
- pass
-
- def run(self):
- vers = get_versions(verbose=True)
- print("Version: %s" % vers["version"])
- print(" full-revisionid: %s" % vers.get("full-revisionid"))
- print(" dirty: %s" % vers.get("dirty"))
- print(" date: %s" % vers.get("date"))
- if vers["error"]:
- print(" error: %s" % vers["error"])
- cmds["version"] = cmd_version
-
- # we override "build_py" in both distutils and setuptools
- #
- # most invocation pathways end up running build_py:
- # distutils/build -> build_py
- # distutils/install -> distutils/build ->..
- # setuptools/bdist_wheel -> distutils/install ->..
- # setuptools/bdist_egg -> distutils/install_lib -> build_py
- # setuptools/install -> bdist_egg ->..
- # setuptools/develop -> ?
- # pip install:
- # copies source tree to a tempdir before running egg_info/etc
- # if .git isn't copied too, 'git describe' will fail
- # then does setup.py bdist_wheel, or sometimes setup.py install
- # setup.py egg_info -> ?
-
- # we override different "build_py" commands for both environments
- if "setuptools" in sys.modules:
- from setuptools.command.build_py import build_py as _build_py
- else:
- from distutils.command.build_py import build_py as _build_py
-
- class cmd_build_py(_build_py):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- _build_py.run(self)
- # now locate _version.py in the new build/ directory and replace
- # it with an updated value
- if cfg.versionfile_build:
- target_versionfile = os.path.join(self.build_lib,
- cfg.versionfile_build)
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
- cmds["build_py"] = cmd_build_py
-
- if "cx_Freeze" in sys.modules: # cx_freeze enabled?
- from cx_Freeze.dist import build_exe as _build_exe
- # nczeczulin reports that py2exe won't like the pep440-style string
- # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g.
- # setup(console=[{
- # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION
- # "product_version": versioneer.get_version(),
- # ...
-
- class cmd_build_exe(_build_exe):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- target_versionfile = cfg.versionfile_source
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
-
- _build_exe.run(self)
- os.unlink(target_versionfile)
- with open(cfg.versionfile_source, "w") as f:
- LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
- cmds["build_exe"] = cmd_build_exe
- del cmds["build_py"]
-
- if 'py2exe' in sys.modules: # py2exe enabled?
- try:
- from py2exe.distutils_buildexe import py2exe as _py2exe # py3
- except ImportError:
- from py2exe.build_exe import py2exe as _py2exe # py2
-
- class cmd_py2exe(_py2exe):
- def run(self):
- root = get_root()
- cfg = get_config_from_root(root)
- versions = get_versions()
- target_versionfile = cfg.versionfile_source
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile, versions)
-
- _py2exe.run(self)
- os.unlink(target_versionfile)
- with open(cfg.versionfile_source, "w") as f:
- LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG %
- {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
- cmds["py2exe"] = cmd_py2exe
-
- # we override different "sdist" commands for both environments
- if "setuptools" in sys.modules:
- from setuptools.command.sdist import sdist as _sdist
- else:
- from distutils.command.sdist import sdist as _sdist
-
- class cmd_sdist(_sdist):
- def run(self):
- versions = get_versions()
- self._versioneer_generated_versions = versions
- # unless we update this, the command will keep using the old
- # version
- self.distribution.metadata.version = versions["version"]
- return _sdist.run(self)
-
- def make_release_tree(self, base_dir, files):
- root = get_root()
- cfg = get_config_from_root(root)
- _sdist.make_release_tree(self, base_dir, files)
- # now locate _version.py in the new base_dir directory
- # (remembering that it may be a hardlink) and replace it with an
- # updated value
- target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
- print("UPDATING %s" % target_versionfile)
- write_to_version_file(target_versionfile,
- self._versioneer_generated_versions)
- cmds["sdist"] = cmd_sdist
-
- return cmds
-
-
-CONFIG_ERROR = """
-setup.cfg is missing the necessary Versioneer configuration. You need
-a section like:
-
- [versioneer]
- VCS = git
- style = pep440
- versionfile_source = src/myproject/_version.py
- versionfile_build = myproject/_version.py
- tag_prefix =
- parentdir_prefix = myproject-
-
-You will also need to edit your setup.py to use the results:
-
- import versioneer
- setup(version=versioneer.get_version(),
- cmdclass=versioneer.get_cmdclass(), ...)
-
-Please read the docstring in ./versioneer.py for configuration instructions,
-edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
-"""
-
-SAMPLE_CONFIG = """
-# See the docstring in versioneer.py for instructions. Note that you must
-# re-run 'versioneer.py setup' after changing this section, and commit the
-# resulting files.
-
-[versioneer]
-#VCS = git
-#style = pep440
-#versionfile_source =
-#versionfile_build =
-#tag_prefix =
-#parentdir_prefix =
-
-"""
-
-INIT_PY_SNIPPET = """
-from ._version import get_versions
-__version__ = get_versions()['version']
-del get_versions
-"""
-
-
-def do_setup():
- """Main VCS-independent setup function for installing Versioneer."""
- root = get_root()
- try:
- cfg = get_config_from_root(root)
- except (EnvironmentError, configparser.NoSectionError,
- configparser.NoOptionError) as e:
- if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
- print("Adding sample versioneer config to setup.cfg",
- file=sys.stderr)
- with open(os.path.join(root, "setup.cfg"), "a") as f:
- f.write(SAMPLE_CONFIG)
- print(CONFIG_ERROR, file=sys.stderr)
- return 1
-
- print(" creating %s" % cfg.versionfile_source)
- with open(cfg.versionfile_source, "w") as f:
- LONG = LONG_VERSION_PY[cfg.VCS]
- f.write(LONG % {"DOLLAR": "$",
- "STYLE": cfg.style,
- "TAG_PREFIX": cfg.tag_prefix,
- "PARENTDIR_PREFIX": cfg.parentdir_prefix,
- "VERSIONFILE_SOURCE": cfg.versionfile_source,
- })
-
- ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
- "__init__.py")
- if os.path.exists(ipy):
- try:
- with open(ipy, "r") as f:
- old = f.read()
- except EnvironmentError:
- old = ""
- if INIT_PY_SNIPPET not in old:
- print(" appending to %s" % ipy)
- with open(ipy, "a") as f:
- f.write(INIT_PY_SNIPPET)
- else:
- print(" %s unmodified" % ipy)
- else:
- print(" %s doesn't exist, ok" % ipy)
- ipy = None
-
- # Make sure both the top-level "versioneer.py" and versionfile_source
- # (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
- # they'll be copied into source distributions. Pip won't be able to
- # install the package without this.
- manifest_in = os.path.join(root, "MANIFEST.in")
- simple_includes = set()
- try:
- with open(manifest_in, "r") as f:
- for line in f:
- if line.startswith("include "):
- for include in line.split()[1:]:
- simple_includes.add(include)
- except EnvironmentError:
- pass
- # That doesn't cover everything MANIFEST.in can do
- # (http://docs.python.org/2/distutils/sourcedist.html#commands), so
- # it might give some false negatives. Appending redundant 'include'
- # lines is safe, though.
- if "versioneer.py" not in simple_includes:
- print(" appending 'versioneer.py' to MANIFEST.in")
- with open(manifest_in, "a") as f:
- f.write("include versioneer.py\n")
- else:
- print(" 'versioneer.py' already in MANIFEST.in")
- if cfg.versionfile_source not in simple_includes:
- print(" appending versionfile_source ('%s') to MANIFEST.in" %
- cfg.versionfile_source)
- with open(manifest_in, "a") as f:
- f.write("include %s\n" % cfg.versionfile_source)
- else:
- print(" versionfile_source already in MANIFEST.in")
-
- # Make VCS-specific changes. For git, this means creating/changing
- # .gitattributes to mark _version.py for export-subst keyword
- # substitution.
- do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
- return 0
-
-
-def scan_setup_py():
- """Validate the contents of setup.py against Versioneer's expectations."""
- found = set()
- setters = False
- errors = 0
- with open("setup.py", "r") as f:
- for line in f.readlines():
- if "import versioneer" in line:
- found.add("import")
- if "versioneer.get_cmdclass()" in line:
- found.add("cmdclass")
- if "versioneer.get_version()" in line:
- found.add("get_version")
- if "versioneer.VCS" in line:
- setters = True
- if "versioneer.versionfile_source" in line:
- setters = True
- if len(found) != 3:
- print("")
- print("Your setup.py appears to be missing some important items")
- print("(but I might be wrong). Please make sure it has something")
- print("roughly like the following:")
- print("")
- print(" import versioneer")
- print(" setup( version=versioneer.get_version(),")
- print(" cmdclass=versioneer.get_cmdclass(), ...)")
- print("")
- errors += 1
- if setters:
- print("You should remove lines like 'versioneer.VCS = ' and")
- print("'versioneer.versionfile_source = ' . This configuration")
- print("now lives in setup.cfg, and should be removed from setup.py")
- print("")
- errors += 1
- return errors
-
-
-if __name__ == "__main__":
- cmd = sys.argv[1]
- if cmd == "setup":
- errors = do_setup()
- errors += scan_setup_py()
- if errors:
- sys.exit(1)
diff --git a/doc/Makefile b/doc/Makefile
index 4db17b297..776f9b778 100644
--- a/doc/Makefile
+++ b/doc/Makefile
@@ -46,7 +46,8 @@ help:
@echo " upload USERNAME=... RELEASE=... to upload built docs to docs.scipy.org"
clean:
- -rm -rf build/* source/reference/generated
+ -rm -rf build/*
+ find . -name generated -type d -prune -exec rm -rf "{}" ";"
version-check:
ifeq "$(GITVER)" "Unknown"
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 072a3b44e..dec8fff05 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -19,11 +19,19 @@ needs_sphinx = '1.0'
sys.path.insert(0, os.path.abspath('../sphinxext'))
-extensions = ['sphinx.ext.autodoc', 'numpydoc',
- 'sphinx.ext.intersphinx', 'sphinx.ext.coverage',
- 'sphinx.ext.doctest', 'sphinx.ext.autosummary',
- 'sphinx.ext.graphviz', 'sphinx.ext.ifconfig',
- 'matplotlib.sphinxext.plot_directive']
+extensions = [
+ 'sphinx.ext.autodoc',
+ 'numpydoc',
+ 'sphinx.ext.intersphinx',
+ 'sphinx.ext.coverage',
+ 'sphinx.ext.doctest',
+ 'sphinx.ext.autosummary',
+ 'sphinx.ext.graphviz',
+ 'sphinx.ext.ifconfig',
+ 'matplotlib.sphinxext.plot_directive',
+ 'IPython.sphinxext.ipython_console_highlighting',
+ 'IPython.sphinxext.ipython_directive',
+]
if sphinx.__version__ >= "1.4":
extensions.append('sphinx.ext.imgmath')
@@ -234,7 +242,7 @@ numpydoc_use_plots = True
# -----------------------------------------------------------------------------
import glob
-autosummary_generate = glob.glob("reference/*.rst")
+autosummary_generate = True
# -----------------------------------------------------------------------------
# Coverage checker
@@ -355,3 +363,8 @@ def linkcode_resolve(domain, info):
else:
return "https://github.com/numpy/numpy/blob/v%s/numpy/%s%s" % (
numpy.__version__, fn, linespec)
+
+doctest_global_setup = '''
+import numpy as np
+from numpy.random import randomgen
+'''
diff --git a/_randomgen/doc/source/brng/dsfmt.rst b/doc/source/reference/randomgen/brng/dsfmt.rst
index bd660c938..f9de48d61 100644
--- a/_randomgen/doc/source/brng/dsfmt.rst
+++ b/doc/source/reference/randomgen/brng/dsfmt.rst
@@ -1,12 +1,13 @@
Double SIMD Mersenne Twister (dSFMT)
------------------------------------
-.. module:: randomgen.dsfmt
+.. module:: numpy.random.randomgen.dsfmt
-.. currentmodule:: randomgen.dsfmt
+.. currentmodule:: numpy.random.randomgen.dsfmt
.. autoclass:: DSFMT
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/index.rst b/doc/source/reference/randomgen/brng/index.rst
index aceecc792..aceecc792 100644
--- a/_randomgen/doc/source/brng/index.rst
+++ b/doc/source/reference/randomgen/brng/index.rst
diff --git a/_randomgen/doc/source/brng/mt19937.rst b/doc/source/reference/randomgen/brng/mt19937.rst
index 23f8e4594..7739e16ce 100644
--- a/_randomgen/doc/source/brng/mt19937.rst
+++ b/doc/source/reference/randomgen/brng/mt19937.rst
@@ -1,11 +1,12 @@
Mersenne Twister (MT19937)
--------------------------
-.. module:: randomgen.mt19937
+.. module:: numpy.random.randomgen.mt19937
-.. currentmodule:: randomgen.mt19937
+.. currentmodule:: numpy.random.randomgen.mt19937
.. autoclass:: MT19937
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/pcg32.rst b/doc/source/reference/randomgen/brng/pcg32.rst
index 1854b4c68..aaf3929e8 100644
--- a/_randomgen/doc/source/brng/pcg32.rst
+++ b/doc/source/reference/randomgen/brng/pcg32.rst
@@ -1,11 +1,12 @@
Parallel Congruent Generator (32-bit, PCG32)
--------------------------------------------
-.. module:: randomgen.pcg32
+.. module:: numpy.random.randomgen.pcg32
-.. currentmodule:: randomgen.pcg32
+.. currentmodule:: numpy.random.randomgen.pcg32
.. autoclass:: PCG32
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/pcg64.rst b/doc/source/reference/randomgen/brng/pcg64.rst
index 496825dc4..94e73e491 100644
--- a/_randomgen/doc/source/brng/pcg64.rst
+++ b/doc/source/reference/randomgen/brng/pcg64.rst
@@ -1,11 +1,12 @@
Parallel Congruent Generator (64-bit, PCG64)
--------------------------------------------
-.. module:: randomgen.pcg64
+.. module:: numpy.random.randomgen.pcg64
-.. currentmodule:: randomgen.pcg64
+.. currentmodule:: numpy.random.randomgen.pcg64
.. autoclass:: PCG64
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/philox.rst b/doc/source/reference/randomgen/brng/philox.rst
index c2ffc44eb..091c4d3e0 100644
--- a/_randomgen/doc/source/brng/philox.rst
+++ b/doc/source/reference/randomgen/brng/philox.rst
@@ -1,11 +1,12 @@
Philox Counter-based RNG
------------------------
-.. module:: randomgen.philox
+.. module:: numpy.random.randomgen.philox
-.. currentmodule:: randomgen.philox
+.. currentmodule:: numpy.random.randomgen.philox
.. autoclass:: Philox
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/threefry.rst b/doc/source/reference/randomgen/brng/threefry.rst
index 98141d648..4f5c56bae 100644
--- a/_randomgen/doc/source/brng/threefry.rst
+++ b/doc/source/reference/randomgen/brng/threefry.rst
@@ -1,11 +1,12 @@
ThreeFry Counter-based RNG
--------------------------
-.. module:: randomgen.threefry
+.. module:: numpy.random.randomgen.threefry
-.. currentmodule:: randomgen.threefry
+.. currentmodule:: numpy.random.randomgen.threefry
.. autoclass:: ThreeFry
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/threefry32.rst b/doc/source/reference/randomgen/brng/threefry32.rst
index 2869cbac8..bd85db4a7 100644
--- a/_randomgen/doc/source/brng/threefry32.rst
+++ b/doc/source/reference/randomgen/brng/threefry32.rst
@@ -1,11 +1,12 @@
ThreeFry32 Counter-based RNG
----------------------------
-.. module:: randomgen.threefry32
+.. module:: numpy.random.randomgen.threefry32
-.. currentmodule:: randomgen.threefry32
+.. currentmodule:: numpy.random.randomgen.threefry32
.. autoclass:: ThreeFry32
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/xoroshiro128.rst b/doc/source/reference/randomgen/brng/xoroshiro128.rst
index 3d6735c62..6796c4457 100644
--- a/_randomgen/doc/source/brng/xoroshiro128.rst
+++ b/doc/source/reference/randomgen/brng/xoroshiro128.rst
@@ -1,11 +1,12 @@
Xoroshiro128+
-------------
-.. module:: randomgen.xoroshiro128
+.. module:: numpy.random.randomgen.xoroshiro128
-.. currentmodule:: randomgen.xoroshiro128
+.. currentmodule:: numpy.random.randomgen.xoroshiro128
.. autoclass:: Xoroshiro128
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/xorshift1024.rst b/doc/source/reference/randomgen/brng/xorshift1024.rst
index 38b293dcd..64df7e050 100644
--- a/_randomgen/doc/source/brng/xorshift1024.rst
+++ b/doc/source/reference/randomgen/brng/xorshift1024.rst
@@ -1,11 +1,12 @@
Xorshift1024*φ
--------------
-.. module:: randomgen.xorshift1024
+.. module:: numpy.random.randomgen.xorshift1024
-.. currentmodule:: randomgen.xorshift1024
+.. currentmodule:: numpy.random.randomgen.xorshift1024
.. autoclass:: Xorshift1024
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/xoshiro256starstar.rst b/doc/source/reference/randomgen/brng/xoshiro256starstar.rst
index 903e76bbb..7603e6f1b 100644
--- a/_randomgen/doc/source/brng/xoshiro256starstar.rst
+++ b/doc/source/reference/randomgen/brng/xoshiro256starstar.rst
@@ -1,11 +1,12 @@
Xoshiro256**
------------
-.. module:: randomgen.xoshiro256starstar
+.. module:: numpy.random.randomgen.xoshiro256starstar
-.. currentmodule:: randomgen.xoshiro256starstar
+.. currentmodule:: numpy.random.randomgen.xoshiro256starstar
.. autoclass:: Xoshiro256StarStar
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/brng/xoshiro512starstar.rst b/doc/source/reference/randomgen/brng/xoshiro512starstar.rst
index 3501b2c9c..64f95f750 100644
--- a/_randomgen/doc/source/brng/xoshiro512starstar.rst
+++ b/doc/source/reference/randomgen/brng/xoshiro512starstar.rst
@@ -1,11 +1,12 @@
Xoshiro512**
------------
-.. module:: randomgen.xoshiro512starstar
+.. module:: numpy.random.randomgen.xoshiro512starstar
-.. currentmodule:: randomgen.xoshiro512starstar
+.. currentmodule:: numpy.random.randomgen.xoshiro512starstar
.. autoclass:: Xoshiro512StarStar
+ :exclude-members:
Seeding and State
=================
diff --git a/_randomgen/doc/source/change-log.rst b/doc/source/reference/randomgen/change-log.rst
index f791c8f54..f791c8f54 100644
--- a/_randomgen/doc/source/change-log.rst
+++ b/doc/source/reference/randomgen/change-log.rst
diff --git a/_randomgen/doc/source/entropy.rst b/doc/source/reference/randomgen/entropy.rst
index 3e50c892f..6814edfbe 100644
--- a/_randomgen/doc/source/entropy.rst
+++ b/doc/source/reference/randomgen/entropy.rst
@@ -1,6 +1,6 @@
System Entropy
==============
-.. module:: randomgen.entropy
+.. module:: numpy.random.randomgen.entropy
.. autofunction:: random_entropy
diff --git a/_randomgen/doc/source/extending.rst b/doc/source/reference/randomgen/extending.rst
index c9d987b59..c9d987b59 100644
--- a/_randomgen/doc/source/extending.rst
+++ b/doc/source/reference/randomgen/extending.rst
diff --git a/_randomgen/doc/source/generator.rst b/doc/source/reference/randomgen/generator.rst
index dba51eb6d..d59efd68c 100644
--- a/_randomgen/doc/source/generator.rst
+++ b/doc/source/reference/randomgen/generator.rst
@@ -12,10 +12,10 @@ distributions. The default basic RNG used by
changed by passing an instantized basic RNG to
:class:`~randomgen.generator.RandomGenerator`.
-.. currentmodule:: randomgen.generator
+.. currentmodule:: numpy.random.randomgen.generator
-.. autoclass::
- RandomGenerator
+.. autoclass:: RandomGenerator
+ :exclude-members:
Seed and State Manipulation
===========================
@@ -37,8 +37,6 @@ Simple random data
~RandomGenerator.random_sample
~RandomGenerator.choice
~RandomGenerator.bytes
- ~RandomGenerator.random_uintegers
- ~RandomGenerator.random_raw
Permutations
============
@@ -56,7 +54,6 @@ Distributions
~RandomGenerator.beta
~RandomGenerator.binomial
~RandomGenerator.chisquare
- ~RandomGenerator.complex_normal
~RandomGenerator.dirichlet
~RandomGenerator.exponential
~RandomGenerator.f
@@ -88,4 +85,4 @@ Distributions
~RandomGenerator.vonmises
~RandomGenerator.wald
~RandomGenerator.weibull
- ~RandomGenerator.zipf \ No newline at end of file
+ ~RandomGenerator.zipf
diff --git a/_randomgen/doc/source/index.rst b/doc/source/reference/randomgen/index.rst
index eac8c1ef1..67d0441a2 100644
--- a/_randomgen/doc/source/index.rst
+++ b/doc/source/reference/randomgen/index.rst
@@ -1,9 +1,11 @@
-RandomGen
-=========
+Randomgen.RandomGen
+===================
This package contains replacements for the NumPy
:class:`~numpy.random.RandomState` object that allows the core random number
generator be be changed.
+.. current_module numpy.random.randomgen
+
Quick Start
-----------
@@ -187,14 +189,14 @@ Random Generator
.. toctree::
:maxdepth: 1
- Random Generation <generator>
+ generator
legacy
Basic Random Number Generators
------------------------------
.. toctree::
- :maxdepth: 3
+ :maxdepth: 1
Basic Random Number Generators <brng/index>
diff --git a/_randomgen/doc/source/legacy.rst b/doc/source/reference/randomgen/legacy.rst
index befd7abb5..7e87f871c 100644
--- a/_randomgen/doc/source/legacy.rst
+++ b/doc/source/reference/randomgen/legacy.rst
@@ -47,10 +47,10 @@ when accessing the state so that these extra values are saved.
lg.standard_exponential()
-.. currentmodule:: randomgen.legacy.legacy
+.. currentmodule:: numpy.random.randomgen.legacy
-.. autoclass::
- LegacyGenerator
+.. autoclass:: LegacyGenerator
+ :exclude-members:
Seeding and State
=================
@@ -58,7 +58,8 @@ Seeding and State
.. autosummary::
:toctree: generated/
- ~LegacyGenerator.state
+ ~LegacyGenerator.get_state
+ ~LegacyGenerator.set_state
Simple random data
==================
@@ -66,6 +67,19 @@ Simple random data
:toctree: generated/
~LegacyGenerator.randn
+ ~LegacyGenerator.randint
+ ~LegacyGenerator.random_integers
+ ~LegacyGenerator.random_sample
+ ~LegacyGenerator.choice
+ ~LegacyGenerator.bytes
+
+Permutations
+============
+.. autosummary::
+ :toctree: generated/
+
+ ~LegacyGenerator.shuffle
+ ~LegacyGenerator.permutation
Distributions
=============
@@ -93,3 +107,4 @@ Distributions
~LegacyGenerator.standard_t
~LegacyGenerator.wald
~LegacyGenerator.weibull
+ ~LegacyGenerator.zipf
diff --git a/_randomgen/doc/source/multithreading.rst b/doc/source/reference/randomgen/multithreading.rst
index 6efbcdbe7..6efbcdbe7 100644
--- a/_randomgen/doc/source/multithreading.rst
+++ b/doc/source/reference/randomgen/multithreading.rst
diff --git a/_randomgen/doc/source/new-or-different.rst b/doc/source/reference/randomgen/new-or-different.rst
index 6598c13fe..6598c13fe 100644
--- a/_randomgen/doc/source/new-or-different.rst
+++ b/doc/source/reference/randomgen/new-or-different.rst
diff --git a/_randomgen/doc/source/parallel.rst b/doc/source/reference/randomgen/parallel.rst
index df6f58d75..df6f58d75 100644
--- a/_randomgen/doc/source/parallel.rst
+++ b/doc/source/reference/randomgen/parallel.rst
diff --git a/_randomgen/doc/source/performance.py b/doc/source/reference/randomgen/performance.py
index 12cbbc5d3..12cbbc5d3 100644
--- a/_randomgen/doc/source/performance.py
+++ b/doc/source/reference/randomgen/performance.py
diff --git a/_randomgen/doc/source/performance.rst b/doc/source/reference/randomgen/performance.rst
index 2dfb32101..2dfb32101 100644
--- a/_randomgen/doc/source/performance.rst
+++ b/doc/source/reference/randomgen/performance.rst
diff --git a/_randomgen/doc/source/references.rst b/doc/source/reference/randomgen/references.rst
index 0dc99868f..0dc99868f 100644
--- a/_randomgen/doc/source/references.rst
+++ b/doc/source/reference/randomgen/references.rst
diff --git a/doc/source/reference/routines.rst b/doc/source/reference/routines.rst
index a9e80480b..0ed99cbda 100644
--- a/doc/source/reference/routines.rst
+++ b/doc/source/reference/routines.rst
@@ -42,6 +42,7 @@ indentation.
routines.padding
routines.polynomials
routines.random
+ randomgen/index
routines.set
routines.sort
routines.statistics
diff --git a/numpy/random/__init__.py b/numpy/random/__init__.py
index 965ab5ea9..53e947797 100644
--- a/numpy/random/__init__.py
+++ b/numpy/random/__init__.py
@@ -86,8 +86,6 @@ set_state Set state of generator.
"""
from __future__ import division, absolute_import, print_function
-import warnings
-
__all__ = [
'beta',
'binomial',
@@ -138,9 +136,8 @@ __all__ = [
'zipf'
]
-with warnings.catch_warnings():
- warnings.filterwarnings("ignore", message="numpy.ndarray size changed")
- from .mtrand import *
+from .randomgen import mtrand
+from .randomgen.mtrand import *
# Some aliases:
ranf = random = sample = random_sample
diff --git a/numpy/random/mtrand/distributions.c b/numpy/random/mtrand/distributions.c
index 2b1f5e7a9..1b410db57 100644
--- a/numpy/random/mtrand/distributions.c
+++ b/numpy/random/mtrand/distributions.c
@@ -447,6 +447,10 @@ long rk_binomial(rk_state *state, long n, double p)
{
double q;
+ if ((n == 0LL) || (p == 0.0f)) {
+ return 0;
+ }
+
if (p <= 0.5)
{
if (p*n <= 30.0)
diff --git a/numpy/random/mtrand/initarray.c b/numpy/random/mtrand/initarray.c
index 21f1dc05a..beff78510 100644
--- a/numpy/random/mtrand/initarray.c
+++ b/numpy/random/mtrand/initarray.c
@@ -71,7 +71,6 @@
* http://www.math.keio.ac.jp/matumoto/emt.html
* email: matumoto@math.keio.ac.jp
*/
-#define NPY_NO_DEPRECATED_API NPY_API_VERSION
#include "initarray.h"
diff --git a/numpy/random/mtrand/numpy.pxd b/numpy/random/mtrand/numpy.pxd
index 1b4fe6c10..e146054b1 100644
--- a/numpy/random/mtrand/numpy.pxd
+++ b/numpy/random/mtrand/numpy.pxd
@@ -3,8 +3,6 @@
# :Author: Travis Oliphant
from cpython.exc cimport PyErr_Print
-cdef extern from "numpy/npy_no_deprecated_api.h": pass
-
cdef extern from "numpy/arrayobject.h":
cdef enum NPY_TYPES:
diff --git a/_randomgen/LICENSE.md b/numpy/random/randomgen/LICENSE.md
index e159d505e..caa665373 100644
--- a/_randomgen/LICENSE.md
+++ b/numpy/random/randomgen/LICENSE.md
@@ -1,4 +1,8 @@
-**Copyright (c) 2018 Kevin Sheppard. All rights reserved.**
+**This software is dual-licensed under the The University of Illinois/NCSA
+Open Source License (NCSA) and The 3-Clause BSD License**
+
+# NCSA Open Source License
+**Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
Developed by: Kevin Sheppard (<kevin.sheppard@economics.ox.ac.uk>,
<kevin.k.sheppard@gmail.com>)
@@ -30,11 +34,43 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
THE SOFTWARE.**
-## NumPy
-Many parts of this module have been derived from NumPy.
+# 3-Clause BSD License
+**Copyright (c) 2019 Kevin Sheppard. All rights reserved.**
-Copyright (c) 2005-2017, NumPy Developers.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+**THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.**
+
+# NumPy and Other Components
+
+Many parts of this module have been derived from NumPy. Other parts have been
+derived from original sources, often the algorithm's designer. The NumPy license
+is reproduced below. Component licenses are located with the component code.
+
+Copyright (c) 2005-2019, NumPy Developers.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -68,4 +104,5 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
## Components
Many of the components have their own license terms which are stored
-with the source for the component. \ No newline at end of file
+with the source for the component.
+
diff --git a/numpy/random/randomgen/__init__.py b/numpy/random/randomgen/__init__.py
new file mode 100644
index 000000000..1af2fc3b2
--- /dev/null
+++ b/numpy/random/randomgen/__init__.py
@@ -0,0 +1,21 @@
+from .dsfmt import DSFMT
+from .generator import RandomGenerator
+from .mt19937 import MT19937
+from .pcg32 import PCG32
+from .pcg64 import PCG64
+from .philox import Philox
+from .threefry import ThreeFry
+from .threefry32 import ThreeFry32
+from .xoroshiro128 import Xoroshiro128
+from .xorshift1024 import Xorshift1024
+from .xoshiro256starstar import Xoshiro256StarStar
+from .xoshiro512starstar import Xoshiro512StarStar
+from .mtrand import RandomState
+__all__ = ['RandomGenerator', 'DSFMT', 'MT19937', 'PCG64', 'PCG32', 'Philox',
+ 'ThreeFry', 'ThreeFry32', 'Xoroshiro128', 'Xorshift1024',
+ 'Xoshiro256StarStar', 'Xoshiro512StarStar', 'RandomState']
+
+#from ._version import get_versions
+
+#__version__ = get_versions()['version']
+#del get_versions
diff --git a/_randomgen/randomgen/pickle.py b/numpy/random/randomgen/_pickle.py
index 459f9c2fc..1c365b0c7 100644
--- a/_randomgen/randomgen/pickle.py
+++ b/numpy/random/randomgen/_pickle.py
@@ -8,19 +8,23 @@ from .threefry import ThreeFry
from .threefry32 import ThreeFry32
from .xoroshiro128 import Xoroshiro128
from .xorshift1024 import Xorshift1024
+from .xoshiro256starstar import Xoshiro256StarStar
+from .xoshiro512starstar import Xoshiro512StarStar
+from .mtrand import RandomState
+
+BasicRNGS = {'MT19937': MT19937,
+ 'DSFMT': DSFMT,
+ 'PCG32': PCG32,
+ 'PCG64': PCG64,
+ 'Philox': Philox,
+ 'ThreeFry': ThreeFry,
+ 'ThreeFry32': ThreeFry32,
+ 'Xorshift1024': Xorshift1024,
+ 'Xoroshiro128': Xoroshiro128,
+ 'Xoshiro256StarStar': Xoshiro256StarStar,
+ 'Xoshiro512StarStar': Xoshiro512StarStar,
+ }
-PRNGS = {'MT19937': MT19937,
- 'DSFMT': DSFMT,
- 'PCG32': PCG32,
- 'PCG64': PCG64,
- 'Philox': Philox,
- 'ThreeFry': ThreeFry,
- 'ThreeFry32': ThreeFry32,
- 'Xorshift1024': Xorshift1024,
- 'Xoroshiro128': Xoroshiro128,
- 'Xoshiro256StarStar': Xoshiro256StarStar,
- 'Xoshiro512StarStar': Xoshiro512StarStar,
- }
def __generator_ctor(brng_name='mt19937'):
"""
@@ -72,3 +76,28 @@ def __brng_ctor(brng_name='mt19937'):
raise ValueError(str(brng_name) + ' is not a known PRNG module.')
return brng()
+
+def __randomstate_ctor(brng_name='mt19937'):
+ """
+ Pickling helper function that returns a legacy RandomState-like object
+
+ Parameters
+ ----------
+ brng_name: str
+ String containing the core BasicRNG
+
+ Returns
+ -------
+ rs: RandomState
+ Legacy RandomState using the named core BasicRNG
+ """
+ try:
+ brng_name = brng_name.decode('ascii')
+ except AttributeError:
+ pass
+ if brng_name in BasicRNGS:
+ brng = BasicRNGS[brng_name]
+ else:
+ raise ValueError(str(brng_name) + ' is not a known BasicRNG module.')
+
+ return RandomState(brng())
diff --git a/_randomgen/randomgen/_version.py b/numpy/random/randomgen/_version.py
index e400e3efb..e400e3efb 100644
--- a/_randomgen/randomgen/_version.py
+++ b/numpy/random/randomgen/_version.py
diff --git a/_randomgen/randomgen/bounded_integers.pxd.in b/numpy/random/randomgen/bounded_integers.pxd.in
index 5f723733c..4ab389fd9 100644
--- a/_randomgen/randomgen/bounded_integers.pxd.in
+++ b/numpy/random/randomgen/bounded_integers.pxd.in
@@ -1,23 +1,10 @@
-from __future__ import absolute_import
-
from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t, intptr_t)
import numpy as np
-cimport numpy as np
+cimport numpy as np
ctypedef np.npy_bool bool_t
-from randomgen.common cimport brng_t
-
-_randint_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)
- }
+from .common cimport brng_t
cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
"""Mask generator for use in bounded random numbers"""
diff --git a/_randomgen/randomgen/bounded_integers.pyx.in b/numpy/random/randomgen/bounded_integers.pyx.in
index eb8555b31..03068a8fd 100644
--- a/_randomgen/randomgen/bounded_integers.pyx.in
+++ b/numpy/random/randomgen/bounded_integers.pyx.in
@@ -1,14 +1,22 @@
#!python
#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True
-from __future__ import absolute_import
import numpy as np
cimport numpy as np
-from randomgen.distributions cimport *
+from .distributions cimport *
np.import_array()
+_randint_types = {'bool': (0, 2),
+ 'int8': (-2**7, 2**7),
+ 'int16': (-2**15, 2**15),
+ 'int32': (-2**31, 2**31),
+ 'int64': (-2**63, 2**63),
+ 'uint8': (0, 2**8),
+ 'uint16': (0, 2**16),
+ 'uint32': (0, 2**32),
+ 'uint64': (0, 2**64)}
{{
py:
type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000ULL'),
@@ -21,7 +29,6 @@ type_info = (('uint32', 'uint32', 'uint64', 'NPY_UINT64', 0, 0, 0, '0X100000000U
)}}
{{for nptype, utype, nptype_up, npctype, remaining, bitshift, lb, ub in type_info}}
{{ py: otype = nptype + '_' if nptype == 'bool' else nptype }}
-
cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object size,
bint use_masked,
brng_t *state, object lock):
@@ -42,7 +49,6 @@ cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object s
cdef np.broadcast it
cdef int buf_rem = 0
-
# Array path
low_arr = <np.ndarray>low
high_arr = <np.ndarray>high
@@ -83,7 +89,6 @@ cdef object _rand_{{nptype}}_broadcast(np.ndarray low, np.ndarray high, object s
np.PyArray_MultiIter_NEXT(it)
return out_arr
{{endfor}}
-
{{
py:
big_type_info = (('uint64', 'uint64', 'NPY_UINT64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
@@ -166,7 +171,6 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
return out_arr
{{endfor}}
-
{{
py:
type_info = (('uint64', 'uint64', '0x0ULL', '0xFFFFFFFFFFFFFFFFULL'),
@@ -241,8 +245,8 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
high_arr = <np.ndarray>np.array(high, copy=False)
low_ndim = np.PyArray_NDIM(low_arr)
high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim==1 and low_arr.size==1 and size is not None)) and
- (high_ndim == 0 or (high_ndim==1 and high_arr.size==1 and size is not None))):
+ if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
+ (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
low = int(low_arr)
high = int(high_arr)
# Subtract 1 since internal generator produces on closed interval [low, high]
diff --git a/_randomgen/randomgen/common.pxd b/numpy/random/randomgen/common.pxd
index 63a1b3f7d..f6748e5aa 100644
--- a/_randomgen/randomgen/common.pxd
+++ b/numpy/random/randomgen/common.pxd
@@ -1,13 +1,11 @@
#cython: language_level=3
-from __future__ import absolute_import
-
from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t, intptr_t,
uintptr_t)
from libc.math cimport sqrt
-from randomgen.distributions cimport brng_t
+from .distributions cimport brng_t
import numpy as np
cimport numpy as np
@@ -18,6 +16,7 @@ cdef enum ConstraintType:
CONS_NONE
CONS_NON_NEGATIVE
CONS_POSITIVE
+ CONS_POSITIVE_NOT_NAN
CONS_BOUNDED_0_1
CONS_BOUNDED_0_1_NOTNAN
CONS_BOUNDED_GT_0_1
@@ -27,14 +26,18 @@ cdef enum ConstraintType:
ctypedef ConstraintType constraint_type
+cdef object benchmark(brng_t *brng, object lock, Py_ssize_t cnt, object method)
+cdef object random_raw(brng_t *brng, object lock, object size, object output)
+cdef object prepare_cffi(brng_t *brng)
+cdef object prepare_ctypes(brng_t *brng)
cdef int check_constraint(double val, object name, constraint_type cons) except -1
cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1
cdef extern from "src/aligned_malloc/aligned_malloc.h":
- cdef void *PyArray_realloc_aligned(void *p, size_t n);
- cdef void *PyArray_malloc_aligned(size_t n);
- cdef void *PyArray_calloc_aligned(size_t n, size_t s);
- cdef void PyArray_free_aligned(void *p);
+ cdef void *PyArray_realloc_aligned(void *p, size_t n)
+ cdef void *PyArray_malloc_aligned(size_t n)
+ cdef void *PyArray_calloc_aligned(size_t n, size_t s)
+ cdef void PyArray_free_aligned(void *p)
ctypedef double (*random_double_fill)(brng_t *state, np.npy_intp count, double* out) nogil
ctypedef double (*random_double_0)(void *state) nogil
@@ -96,14 +99,3 @@ cdef object discrete_broadcast_iii(void *func, void *state, object size, object
np.ndarray a_arr, object a_name, constraint_type a_constraint,
np.ndarray b_arr, object b_name, constraint_type b_constraint,
np.ndarray c_arr, object c_name, constraint_type c_constraint)
-
-cdef inline void compute_complex(double *rv_r, double *rv_i, double loc_r,
- double loc_i, double var_r, double var_i, double rho) nogil:
- cdef double scale_c, scale_i, scale_r
-
- scale_c = sqrt(1 - rho * rho)
- scale_r = sqrt(var_r)
- scale_i = sqrt(var_i)
-
- rv_i[0] = loc_i + scale_i * (rho * rv_r[0] + scale_c * rv_i[0])
- rv_r[0] = loc_r + scale_r * rv_r[0]
diff --git a/_randomgen/randomgen/common.pyx b/numpy/random/randomgen/common.pyx
index 41d3074f3..1f7cd40ca 100644
--- a/_randomgen/randomgen/common.pyx
+++ b/numpy/random/randomgen/common.pyx
@@ -1,14 +1,12 @@
#!python
#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
-from __future__ import absolute_import
-
from collections import namedtuple
from cpython cimport PyFloat_AsDouble
import sys
import numpy as np
cimport numpy as np
-from randomgen.common cimport *
+from .common cimport *
np.import_array()
@@ -16,6 +14,150 @@ interface = namedtuple('interface', ['state_address', 'state', 'next_uint64',
'next_uint32', 'next_double', 'brng'])
+cdef object benchmark(brng_t *brng, object lock, Py_ssize_t cnt, object method):
+ """Benchmark command used by BasicRNG"""
+ cdef Py_ssize_t i
+ if method==u'uint64':
+ with lock, nogil:
+ for i in range(cnt):
+ brng.next_uint64(brng.state)
+ elif method==u'double':
+ with lock, nogil:
+ for i in range(cnt):
+ brng.next_double(brng.state)
+ else:
+ raise ValueError('Unknown method')
+
+
+cdef object random_raw(brng_t *brng, object lock, object size, object output):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying PRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ cdef np.ndarray randoms
+ cdef uint64_t *randoms_data
+ cdef Py_ssize_t i, n
+
+ if not output:
+ if size is None:
+ with lock:
+ brng.next_raw(brng.state)
+ return None
+ n = np.asarray(size).sum()
+ with lock, nogil:
+ for i in range(n):
+ brng.next_raw(brng.state)
+ return None
+
+ if size is None:
+ with lock:
+ return brng.next_raw(brng.state)
+
+ randoms = <np.ndarray>np.empty(size, np.uint64)
+ randoms_data = <uint64_t*>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ with lock, nogil:
+ for i in range(n):
+ randoms_data[i] = brng.next_raw(brng.state)
+ return randoms
+
+cdef object prepare_cffi(brng_t *brng):
+ """
+ Bundles the interfaces to interact with a Basic RNG using cffi
+
+ Parameters
+ ----------
+ brng : pointer
+ A pointer to a Basic RNG instance
+
+ Returns
+ -------
+ interface : namedtuple
+ The functions required to interface with the Basic RNG using cffi
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * brng - pointer to the Basic RNG struct
+ """
+ try:
+ import cffi
+ except ImportError:
+ raise ImportError('cffi cannot be imported.')
+
+ ffi = cffi.FFI()
+ _cffi = interface(<uintptr_t>brng.state,
+ ffi.cast('void *', <uintptr_t>brng.state),
+ ffi.cast('uint64_t (*)(void *)', <uintptr_t>brng.next_uint64),
+ ffi.cast('uint32_t (*)(void *)', <uintptr_t>brng.next_uint32),
+ ffi.cast('double (*)(void *)', <uintptr_t>brng.next_double),
+ ffi.cast('void *', <uintptr_t>brng))
+ return _cffi
+
+cdef object prepare_ctypes(brng_t *brng):
+ """
+ Bundles the interfaces to interact with a Basic RNG using ctypes
+
+ Parameters
+ ----------
+ brng : pointer
+ A pointer to a Basic RNG instance
+
+ Returns
+ -------
+ interface : namedtuple
+ The functions required to interface with the Basic RNG using ctypes:
+
+ * state_address - Memory address of the state struct
+ * state - pointer to the state struct
+ * next_uint64 - function pointer to produce 64 bit integers
+ * next_uint32 - function pointer to produce 32 bit integers
+ * next_double - function pointer to produce doubles
+ * brng - pointer to the Basic RNG struct
+ """
+ import ctypes
+
+ _ctypes = interface(<uintptr_t>brng.state,
+ ctypes.c_void_p(<uintptr_t>brng.state),
+ ctypes.cast(<uintptr_t>brng.next_uint64,
+ ctypes.CFUNCTYPE(ctypes.c_uint64,
+ ctypes.c_void_p)),
+ ctypes.cast(<uintptr_t>brng.next_uint32,
+ ctypes.CFUNCTYPE(ctypes.c_uint32,
+ ctypes.c_void_p)),
+ ctypes.cast(<uintptr_t>brng.next_double,
+ ctypes.CFUNCTYPE(ctypes.c_double,
+ ctypes.c_void_p)),
+ ctypes.c_void_p(<uintptr_t>brng))
+ return _ctypes
+
cdef double kahan_sum(double *darr, np.npy_intp n):
cdef double c, y, t, sum
cdef np.npy_intp i
@@ -29,6 +171,7 @@ cdef double kahan_sum(double *darr, np.npy_intp n):
return sum
cdef np.ndarray int_to_array(object value, object name, object bits, object uint_size):
+ """Convert a large integer to an array of unsigned integers"""
len = bits // uint_size
value = np.asarray(value)
if uint_size == 32:
@@ -67,8 +210,12 @@ cdef check_output(object out, object dtype, object size):
raise TypeError('Supplied output array has the wrong type. '
'Expected {0}, got {0}'.format(dtype, out_array.dtype))
if size is not None:
- # TODO: enable this !!! if tuple(size) != out_array.shape:
- raise ValueError('size and out cannot be simultaneously used')
+ try:
+ tup_size = tuple(size)
+ except TypeError:
+ tup_size = tuple([size])
+ if tup_size != out.shape:
+ raise ValueError('size must match out.shape when used together')
cdef object double_fill(void *func, brng_t *state, object size, object lock, object out):
@@ -148,59 +295,58 @@ cdef uint64_t MAXSIZE = <uint64_t>sys.maxsize
cdef int check_array_constraint(np.ndarray val, object name, constraint_type cons) except -1:
if cons == CONS_NON_NEGATIVE:
- if np.any(np.signbit(val)):
+ if np.any(np.logical_and(np.logical_not(np.isnan(val)), np.signbit(val))):
raise ValueError(name + " < 0")
- elif cons == CONS_POSITIVE:
- if np.any(np.less_equal(val, 0)):
+ elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
+ if cons == CONS_POSITIVE_NOT_NAN and np.any(np.isnan(val)):
+ raise ValueError(name + " must not be NaN")
+ elif np.any(np.less_equal(val, 0)):
raise ValueError(name + " <= 0")
- elif cons == CONS_BOUNDED_0_1 or cons == CONS_BOUNDED_0_1_NOTNAN:
- if np.any(np.less(val, 0)) or np.any(np.greater(val, 1)):
- raise ValueError(name + " < 0 or " + name + " > 1")
- if cons == CONS_BOUNDED_0_1_NOTNAN:
- if np.any(np.isnan(val)):
- raise ValueError(name + ' contains NaNs')
+ elif cons == CONS_BOUNDED_0_1:
+ if not np.all(np.greater_equal(val, 0)) or \
+ not np.all(np.less_equal(val, 1)):
+ raise ValueError("{0} < 0 , {0} > 1 or {0} contains NaNs".format(name))
elif cons == CONS_BOUNDED_GT_0_1:
- if np.any(np.less_equal(val, 0)) or np.any(np.greater(val, 1)):
- raise ValueError(name + " <= 0 or " + name + " > 1")
+ if not np.all(np.greater(val, 0)) or not np.all(np.less_equal(val, 1)):
+ raise ValueError("{0} <= 0 , {0} > 1 or {0} contains NaNs".format(name))
elif cons == CONS_GT_1:
- if np.any(np.less_equal(val, 1)):
- raise ValueError(name + " <= 1")
+ if not np.all(np.greater(val, 1)):
+ raise ValueError("{0} <= 1 or {0} contains NaNs".format(name))
elif cons == CONS_GTE_1:
- if np.any(np.less(val, 1)):
- raise ValueError(name + " < 1")
+ if not np.all(np.greater_equal(val, 1)):
+ raise ValueError("{0} < 1 or {0} contains NaNs".format(name))
elif cons == CONS_POISSON:
- if np.any(np.greater(val, POISSON_LAM_MAX)):
- raise ValueError(name + " value too large")
- if np.any(np.less(val, 0.0)):
- raise ValueError(name + " < 0")
+ if not np.all(np.less_equal(val, POISSON_LAM_MAX)):
+ raise ValueError("{0} value too large".format(name))
+ elif not np.all(np.greater_equal(val, 0.0)):
+ raise ValueError("{0} < 0 or {0} contains NaNs".format(name))
return 0
-
cdef int check_constraint(double val, object name, constraint_type cons) except -1:
+ cdef bint is_nan
if cons == CONS_NON_NEGATIVE:
- if np.signbit(val):
+ if not np.isnan(val) and np.signbit(val):
raise ValueError(name + " < 0")
- elif cons == CONS_POSITIVE:
- if val <= 0:
+ elif cons == CONS_POSITIVE or cons == CONS_POSITIVE_NOT_NAN:
+ if cons == CONS_POSITIVE_NOT_NAN and np.isnan(val):
+ raise ValueError(name + " must not be NaN")
+ elif val <= 0:
raise ValueError(name + " <= 0")
- elif cons == CONS_BOUNDED_0_1 or cons == CONS_BOUNDED_0_1_NOTNAN:
- if val < 0 or val > 1:
- raise ValueError(name + " < 0 or " + name + " > 1")
- if cons == CONS_BOUNDED_0_1_NOTNAN:
- if np.isnan(val):
- raise ValueError(name + ' contains NaNs')
+ elif cons == CONS_BOUNDED_0_1:
+ if not (val >= 0) or not (val <= 1):
+ raise ValueError("{0} < 0 , {0} > 1 or {0} is NaN".format(name))
elif cons == CONS_GT_1:
- if val <= 1:
- raise ValueError(name + " <= 1")
+ if not (val > 1):
+ raise ValueError("{0} <= 1 or {0} is NaN".format(name))
elif cons == CONS_GTE_1:
- if val < 1:
- raise ValueError(name + " < 1")
+ if not (val >= 1):
+ raise ValueError("{0} < 1 or {0} is NaN".format(name))
elif cons == CONS_POISSON:
- if val < 0:
- raise ValueError(name + " < 0")
- elif val > POISSON_LAM_MAX:
+ if not (val >= 0):
+ raise ValueError("{0} < 0 or {0} is NaN".format(name))
+ elif not (val <= POISSON_LAM_MAX):
raise ValueError(name + " value too large")
return 0
@@ -262,7 +408,6 @@ cdef object cont_broadcast_2(void *func, void *state, object size, object lock,
randoms = <np.ndarray>np.empty(it.shape, np.double)
# randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
-
randoms_data = <double *>np.PyArray_DATA(randoms)
n = np.PyArray_SIZE(randoms)
@@ -301,7 +446,7 @@ cdef object cont_broadcast_3(void *func, void *state, object size, object lock,
randoms = <np.ndarray>np.empty(size, np.double)
else:
it = np.PyArray_MultiIterNew3(a_arr, b_arr, c_arr)
- #randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
+ # randoms = np.PyArray_SimpleNew(it.nd, np.PyArray_DIMS(it), np.NPY_DOUBLE)
randoms = <np.ndarray>np.empty(it.shape, np.double)
randoms_data = <double *>np.PyArray_DATA(randoms)
@@ -386,11 +531,11 @@ cdef object cont(void *func, void *state, object size, object lock, int narg,
randoms = <np.ndarray>out
n = np.PyArray_SIZE(randoms)
- cdef double *randoms_data = <double *>np.PyArray_DATA(randoms)
- cdef random_double_0 f0;
- cdef random_double_1 f1;
- cdef random_double_2 f2;
- cdef random_double_3 f3;
+ cdef double *randoms_data = <double *>np.PyArray_DATA(randoms)
+ cdef random_double_0 f0
+ cdef random_double_1 f1
+ cdef random_double_2 f2
+ cdef random_double_3 f3
with lock, nogil:
if narg == 0:
@@ -430,7 +575,7 @@ cdef object discrete_broadcast_d(void *func, void *state, object size, object lo
if size is not None:
randoms = np.empty(size, np.int64)
else:
- #randoms = np.empty(np.shape(a_arr), np.double)
+ # randoms = np.empty(np.shape(a_arr), np.double)
randoms = np.PyArray_SimpleNew(np.PyArray_NDIM(a_arr), np.PyArray_DIMS(a_arr), np.NPY_INT64)
randoms_data = <int64_t *>np.PyArray_DATA(randoms)
@@ -490,7 +635,6 @@ cdef object discrete_broadcast_di(void *func, void *state, object size, object l
cdef random_uint_di f = (<random_uint_di>func)
cdef np.npy_intp i, n
-
if a_constraint != CONS_NONE:
check_array_constraint(a_arr, a_name, a_constraint)
@@ -558,7 +702,7 @@ cdef object discrete_broadcast_iii(void *func, void *state, object size, object
return randoms
cdef object discrete_broadcast_i(void *func, void *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint):
+ np.ndarray a_arr, object a_name, constraint_type a_constraint):
cdef np.ndarray randoms
cdef int64_t *randoms_data
cdef np.broadcast it
@@ -594,7 +738,7 @@ cdef object disc(void *func, void *state, object size, object lock,
object c, object c_name, constraint_type c_constraint):
cdef double _da = 0, _db = 0
- cdef int64_t _ia = 0, _ib = 0 , _ic = 0
+ cdef int64_t _ia = 0, _ib = 0, _ic = 0
cdef bint is_scalar = True
if narg_double > 0:
a_arr = <np.ndarray>np.PyArray_FROM_OTF(a, np.NPY_DOUBLE, np.NPY_ALIGNED)
@@ -612,7 +756,7 @@ cdef object disc(void *func, void *state, object size, object lock,
if narg_int64 > 1:
b_arr = <np.ndarray>np.PyArray_FROM_OTF(b, np.NPY_INT64, np.NPY_ALIGNED)
is_scalar = is_scalar and np.PyArray_NDIM(b_arr) == 0
- if narg_int64 > 2 :
+ if narg_int64 > 2:
c_arr = <np.ndarray>np.PyArray_FROM_OTF(c, np.NPY_INT64, np.NPY_ALIGNED)
is_scalar = is_scalar and np.PyArray_NDIM(c_arr) == 0
@@ -636,7 +780,6 @@ cdef object disc(void *func, void *state, object size, object lock,
else:
raise NotImplementedError("No vector path available")
-
if narg_double > 0:
_da = PyFloat_AsDouble(a)
if a_constraint != CONS_NONE and is_scalar:
@@ -659,7 +802,7 @@ cdef object disc(void *func, void *state, object size, object lock,
_ib = <int64_t>b
if b_constraint != CONS_NONE and is_scalar:
check_constraint(<double>_ib, b_name, b_constraint)
- if narg_int64 > 2 :
+ if narg_int64 > 2:
_ic = <int64_t>c
if c_constraint != CONS_NONE and is_scalar:
check_constraint(<double>_ic, c_name, c_constraint)
@@ -684,15 +827,15 @@ cdef object disc(void *func, void *state, object size, object lock,
cdef np.npy_intp i, n
cdef np.ndarray randoms = <np.ndarray>np.empty(size, np.int64)
cdef np.int64_t *randoms_data
- cdef random_uint_0 f0;
- cdef random_uint_d fd;
- cdef random_uint_dd fdd;
- cdef random_uint_di fdi;
- cdef random_uint_i fi;
- cdef random_uint_iii fiii;
+ cdef random_uint_0 f0
+ cdef random_uint_d fd
+ cdef random_uint_dd fdd
+ cdef random_uint_di fdi
+ cdef random_uint_i fi
+ cdef random_uint_iii fiii
n = np.PyArray_SIZE(randoms)
- randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
with lock, nogil:
if narg_int64 == 0:
@@ -726,8 +869,8 @@ cdef object disc(void *func, void *state, object size, object lock,
cdef object cont_broadcast_1_f(void *func, brng_t *state, object size, object lock,
- np.ndarray a_arr, object a_name, constraint_type a_constraint,
- object out):
+ np.ndarray a_arr, object a_name, constraint_type a_constraint,
+ object out):
cdef np.ndarray randoms
cdef float a_val
@@ -793,8 +936,8 @@ cdef object cont_f(void *func, brng_t *state, object size, object lock,
randoms = <np.ndarray>out
n = np.PyArray_SIZE(randoms)
- cdef float *randoms_data = <float *>np.PyArray_DATA(randoms)
- cdef random_float_1 f1 = <random_float_1>func;
+ cdef float *randoms_data = <float *>np.PyArray_DATA(randoms)
+ cdef random_float_1 f1 = <random_float_1>func
with lock, nogil:
for i in range(n):
@@ -803,4 +946,4 @@ cdef object cont_f(void *func, brng_t *state, object size, object lock,
if out is None:
return randoms
else:
- return out \ No newline at end of file
+ return out
diff --git a/_randomgen/randomgen/distributions.pxd b/numpy/random/randomgen/distributions.pxd
index 35d92db51..ddb7a84bf 100644
--- a/_randomgen/randomgen/distributions.pxd
+++ b/numpy/random/randomgen/distributions.pxd
@@ -3,7 +3,7 @@
from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
int8_t, int16_t, int32_t, int64_t, intptr_t)
import numpy as np
-cimport numpy as np
+cimport numpy as np
cdef extern from "src/distributions/distributions.h":
@@ -80,13 +80,13 @@ cdef extern from "src/distributions/distributions.h":
double random_rayleigh(brng_t *brng_state, double mode) nogil
double random_standard_t(brng_t *brng_state, double df) nogil
double random_noncentral_chisquare(brng_t *brng_state, double df,
- double nonc) nogil
+ double nonc) nogil
double random_noncentral_f(brng_t *brng_state, double dfnum,
- double dfden, double nonc) nogil
+ double dfden, double nonc) nogil
double random_wald(brng_t *brng_state, double mean, double scale) nogil
double random_vonmises(brng_t *brng_state, double mu, double kappa) nogil
double random_triangular(brng_t *brng_state, double left, double mode,
- double right) nogil
+ double right) nogil
int64_t random_poisson(brng_t *brng_state, double lam) nogil
int64_t random_negative_binomial(brng_t *brng_state, double n, double p) nogil
diff --git a/_randomgen/randomgen/dsfmt.pyx b/numpy/random/randomgen/dsfmt.pyx
index ee8ef270d..9a7199e85 100644
--- a/_randomgen/randomgen/dsfmt.pyx
+++ b/numpy/random/randomgen/dsfmt.pyx
@@ -1,39 +1,40 @@
-from __future__ import absolute_import
-
import operator
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy
np.import_array()
DEF DSFMT_MEXP = 19937
-DEF DSFMT_N = 191 # ((DSFMT_MEXP - 128) / 104 + 1)
-DEF DSFMT_N_PLUS_1 = 192 # DSFMT_N + 1
+DEF DSFMT_N = 191 # ((DSFMT_MEXP - 128) / 104 + 1)
+DEF DSFMT_N_PLUS_1 = 192 # DSFMT_N + 1
DEF DSFMT_N64 = DSFMT_N * 2
cdef extern from "src/dsfmt/dSFMT.h":
union W128_T:
- uint64_t u[2];
- uint32_t u32[4];
- double d[2];
+ uint64_t u[2]
+ uint32_t u32[4]
+ double d[2]
- ctypedef W128_T w128_t;
+ ctypedef W128_T w128_t
struct DSFMT_T:
- w128_t status[DSFMT_N_PLUS_1];
- int idx;
+ w128_t status[DSFMT_N_PLUS_1]
+ int idx
- ctypedef DSFMT_T dsfmt_t;
+ ctypedef DSFMT_T dsfmt_t
struct s_dsfmt_state:
dsfmt_t *state
@@ -52,7 +53,7 @@ cdef extern from "src/dsfmt/dSFMT.h":
void dsfmt_init_gen_rand(dsfmt_t *dsfmt, uint32_t seed)
void dsfmt_init_by_array(dsfmt_t *dsfmt, uint32_t init_key[], int key_length)
- void dsfmt_jump(dsfmt_state *state);
+ void dsfmt_jump(dsfmt_state *state)
cdef uint64_t dsfmt_uint64(void* st) nogil:
return dsfmt_next64(<dsfmt_state *>st)
@@ -101,8 +102,8 @@ cdef class DSFMT:
generators should be initialized with the same seed to ensure that the
segments come from the same sequence.
- >>> from randomgen.entropy import random_entropy
- >>> from randomgen import RandomGenerator, DSFMT
+ >>> from numpy.random.randomgen.entropy import random_entropy
+ >>> from numpy.random.randomgen import RandomGenerator, DSFMT
>>> seed = random_entropy()
>>> rs = [RandomGenerator(DSFMT(seed)) for _ in range(10)]
# Advance rs[i] by i jumps
@@ -138,12 +139,13 @@ cdef class DSFMT:
Jump Ahead Algorithm for Linear Recurrences in a Polynomial Space",
Sequences and Their Applications - SETA, 290--298, 2008.
"""
- cdef dsfmt_state *rng_state
+ cdef dsfmt_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef public object _cffi
cdef public object _ctypes
cdef public object _generator
+ cdef public object lock
def __init__(self, seed=None):
self.rng_state = <dsfmt_state *>malloc(sizeof(dsfmt_state))
@@ -152,6 +154,8 @@ cdef class DSFMT:
self.rng_state.buffer_loc = DSFMT_N64
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed)
+ self.lock = Lock()
+
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &dsfmt_uint64
self._brng.next_uint32 = &dsfmt_uint32
@@ -172,7 +176,8 @@ cdef class DSFMT:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -185,17 +190,39 @@ cdef class DSFMT:
cdef _reset_state_variables(self):
self.rng_state.buffer_loc = DSFMT_N64
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None):
"""
@@ -289,14 +316,14 @@ cdef class DSFMT:
for j in range(2):
state[loc] = self.rng_state.state.status[i].u[j]
loc += 1
- buffered_uniforms = np.empty(DSFMT_N64,dtype=np.double)
+ buffered_uniforms = np.empty(DSFMT_N64, dtype=np.double)
for i in range(DSFMT_N64):
buffered_uniforms[i] = self.rng_state.buffered_uniforms[i]
return {'brng': self.__class__.__name__,
- 'state': {'state':np.asarray(state),
- 'idx':self.rng_state.state.idx},
+ 'state': {'state': np.asarray(state),
+ 'idx': self.rng_state.state.idx},
'buffer_loc': self.rng_state.buffer_loc,
- 'buffered_uniforms':np.asarray(buffered_uniforms)}
+ 'buffered_uniforms': np.asarray(buffered_uniforms)}
@state.setter
def state(self, value):
@@ -321,12 +348,12 @@ cdef class DSFMT:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -335,25 +362,10 @@ cdef class DSFMT:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&dsfmt_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&dsfmt_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&dsfmt_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -374,19 +386,8 @@ cdef class DSFMT:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -395,10 +396,10 @@ cdef class DSFMT:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the basic RNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/entropy.pyx b/numpy/random/randomgen/entropy.pyx
index d7822bfdf..0e429e9f2 100644
--- a/_randomgen/randomgen/entropy.pyx
+++ b/numpy/random/randomgen/entropy.pyx
@@ -1,7 +1,3 @@
-from __future__ import absolute_import
-
-import operator
-
cimport numpy as np
import numpy as np
@@ -25,6 +21,7 @@ cdef Py_ssize_t compute_numel(size):
n = size
return n
+
def seed_by_array(object seed, Py_ssize_t n):
"""
Transforms a seed array into an initial state
@@ -150,4 +147,4 @@ def random_entropy(size=None, source='system'):
if n == 0:
return random
- return np.asarray(randoms).reshape(size) \ No newline at end of file
+ return np.asarray(randoms).reshape(size)
diff --git a/_randomgen/randomgen/examples/cython/extending.pyx b/numpy/random/randomgen/examples/cython/extending.pyx
index c387a13af..b472312b4 100644
--- a/_randomgen/randomgen/examples/cython/extending.pyx
+++ b/numpy/random/randomgen/examples/cython/extending.pyx
@@ -1,13 +1,17 @@
+#cython: language_level=3
+from libc.stdint cimport uint32_t
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+
import numpy as np
cimport numpy as np
cimport cython
-from libc.stdint cimport uint32_t
-from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from randomgen.common cimport brng_t
-from randomgen.xoroshiro128 import Xoroshiro128
+
+from numpy.random.randomgen.common cimport brng_t
+from numpy.random.randomgen import Xoroshiro128
np.import_array()
+
def uniform_mean(Py_ssize_t N):
cdef Py_ssize_t i
cdef brng_t *rng
@@ -26,6 +30,7 @@ def uniform_mean(Py_ssize_t N):
randoms = np.asarray(random_values)
return randoms.mean()
+
cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, brng_t *rng):
cdef uint32_t mask, delta, val
mask = delta = ub - lb
@@ -41,6 +46,7 @@ cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, brng_t *rng):
return lb + val
+
@cython.boundscheck(False)
@cython.wraparound(False)
def bounded_uints(uint32_t lb, uint32_t ub, Py_ssize_t n):
diff --git a/numpy/random/randomgen/examples/cython/extending_distributions.pyx b/numpy/random/randomgen/examples/cython/extending_distributions.pyx
new file mode 100644
index 000000000..26d749b10
--- /dev/null
+++ b/numpy/random/randomgen/examples/cython/extending_distributions.pyx
@@ -0,0 +1,51 @@
+#cython: language_level=3
+import numpy as np
+cimport numpy as np
+cimport cython
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from numpy.random.randomgen.common cimport *
+from numpy.random.randomgen.distributions cimport random_gauss_zig
+from numpy.random.randomgen import Xoroshiro128
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def normals_zig(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef brng_t *rng
+ cdef const char *capsule_name = "BasicRNG"
+ cdef double[::1] random_values
+
+ x = Xoroshiro128()
+ capsule = x.capsule
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ rng = <brng_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ for i in range(n):
+ random_values[i] = random_gauss_zig(rng)
+ randoms = np.asarray(random_values)
+ return randoms
+
+
+@cython.boundscheck(False)
+@cython.wraparound(False)
+def uniforms(Py_ssize_t n):
+ cdef Py_ssize_t i
+ cdef brng_t *rng
+ cdef const char *capsule_name = "BasicRNG"
+ cdef double[::1] random_values
+
+ x = Xoroshiro128()
+ capsule = x.capsule
+ # Optional check that the capsule if from a Basic RNG
+ if not PyCapsule_IsValid(capsule, capsule_name):
+ raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
+ rng = <brng_t *> PyCapsule_GetPointer(capsule, capsule_name)
+ random_values = np.empty(n)
+ for i in range(n):
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
+ randoms = np.asarray(random_values)
+ return randoms
diff --git a/_randomgen/randomgen/examples/cython/setup.py b/numpy/random/randomgen/examples/cython/setup.py
index d7a04f75a..d7a04f75a 100644
--- a/_randomgen/randomgen/examples/cython/setup.py
+++ b/numpy/random/randomgen/examples/cython/setup.py
diff --git a/_randomgen/randomgen/examples/numba/extending.py b/numpy/random/randomgen/examples/numba/extending.py
index 4dafeb5c4..72e903b1f 100644
--- a/_randomgen/randomgen/examples/numba/extending.py
+++ b/numpy/random/randomgen/examples/numba/extending.py
@@ -26,7 +26,7 @@ def bounded_uint(lb, ub, state):
return lb + val
-bounded_uint(323, 2394691, s.value)
+print(bounded_uint(323, 2394691, s.value))
@nb.jit(nopython=True)
diff --git a/_randomgen/randomgen/examples/numba/extending_distributions.py b/numpy/random/randomgen/examples/numba/extending_distributions.py
index 17ba2704c..17ba2704c 100644
--- a/_randomgen/randomgen/examples/numba/extending_distributions.py
+++ b/numpy/random/randomgen/examples/numba/extending_distributions.py
diff --git a/_randomgen/randomgen/generator.pyx b/numpy/random/randomgen/generator.pyx
index 6a68a1905..a244bca57 100644
--- a/_randomgen/randomgen/generator.pyx
+++ b/numpy/random/randomgen/generator.pyx
@@ -1,42 +1,24 @@
#!python
#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
-from __future__ import absolute_import
-
import operator
import warnings
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from cpython cimport (Py_INCREF, PyComplex_RealAsDouble,
- PyComplex_ImagAsDouble, PyComplex_FromDoubles, PyFloat_AsDouble)
+from cpython cimport (Py_INCREF, PyFloat_AsDouble)
from libc cimport string
from libc.stdlib cimport malloc, free
cimport numpy as np
import numpy as np
cimport cython
-try:
- from threading import Lock
-except ImportError:
- from dummy_threading import Lock
-
-from randomgen.bounded_integers cimport *
-from randomgen.common cimport *
-from randomgen.distributions cimport *
-from randomgen.xoroshiro128 import Xoroshiro128
-import randomgen.pickle
+from .bounded_integers cimport *
+from .bounded_integers import _randint_types
+from .common cimport *
+from .distributions cimport *
+from .xoroshiro128 import Xoroshiro128
np.import_array()
-_randint_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)
- }
cdef class RandomGenerator:
"""
@@ -76,13 +58,13 @@ cdef class RandomGenerator:
Examples
--------
- >>> from randomgen import RandomGenerator
+ >>> from np.random.randomgen import RandomGenerator
>>> rg = RandomGenerator()
>>> rg.standard_normal()
Using a specific generator
- >>> from randomgen import MT19937
+ >>> from np.random.randomgen import MT19937
>>> rg = RandomGenerator(MT19937())
The generator is also directly available from basic RNGs
@@ -107,7 +89,7 @@ cdef class RandomGenerator:
raise ValueError("Invalid brng. The brng must be instantized.")
self._brng = <brng_t *> PyCapsule_GetPointer(capsule, name)
self._binomial = <binomial_t *>malloc(sizeof(binomial_t))
- self.lock = Lock()
+ self.lock = brng.lock
def __dealloc__(self):
free(self._binomial)
@@ -128,7 +110,8 @@ cdef class RandomGenerator:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__generator_ctor,
+ from ._pickle import __generator_ctor
+ return (__generator_ctor,
(self.state['brng'],),
self.state)
@@ -146,7 +129,7 @@ cdef class RandomGenerator:
The best method to access seed is to directly use a basic RNG instance.
This example demonstrates this best practice.
- >>> from randomgen import RandomGenerator, PCG64
+ >>> from numpy.random.randomgen import RandomGenerator, PCG64
>>> brng = PCG64(1234567891011)
>>> rg = brng.generator
>>> brng.seed(1110987654321)
@@ -161,6 +144,7 @@ cdef class RandomGenerator:
>>> rg = RandomGenerator(PCG64(1234567891011))
>>> rg.seed(1110987654321)
+
"""
# TODO: Should this remain
self._basicrng.seed(*args, **kwargs)
@@ -181,6 +165,7 @@ cdef class RandomGenerator:
-----
This is a trivial pass-through function. RandomGenerator does not
directly contain or manipulate the basic RNG's state.
+
"""
return self._basicrng.state
@@ -188,122 +173,6 @@ cdef class RandomGenerator:
def state(self, value):
self._basicrng.state = value
- def random_uintegers(self, size=None, int bits=64):
- """
- random_uintegers(size=None, bits=64)
-
- Return random unsigned integers
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- bits : int {32, 64}
- Size of the unsigned integer to return, either 32 bit or 64 bit.
-
- Returns
- -------
- out : uint or ndarray
- Drawn samples.
-
- Notes
- -----
- This method effectively exposes access to the raw underlying
- pseudo-random number generator since these all produce unsigned
- integers. In practice these are most useful for generating other
- random numbers.
-
- These should not be used to produce bounded random numbers by
- simple truncation.
- """
- cdef np.npy_intp i, n
- cdef np.ndarray array
- cdef uint32_t* data32
- cdef uint64_t* data64
- if bits == 64:
- if size is None:
- with self.lock:
- return self._brng.next_uint64(self._brng.state)
- array = <np.ndarray>np.empty(size, np.uint64)
- n = np.PyArray_SIZE(array)
- data64 = <uint64_t *>np.PyArray_DATA(array)
- with self.lock, nogil:
- for i in range(n):
- data64[i] = self._brng.next_uint64(self._brng.state)
- elif bits == 32:
- if size is None:
- with self.lock:
- return self._brng.next_uint32(self._brng.state)
- array = <np.ndarray>np.empty(size, np.uint32)
- n = np.PyArray_SIZE(array)
- data32 = <uint32_t *>np.PyArray_DATA(array)
- with self.lock, nogil:
- for i in range(n):
- data32[i] = self._brng.next_uint32(self._brng.state)
- else:
- raise ValueError('Unknown value of bits. Must be either 32 or 64.')
-
- return array
-
- def random_raw(self, size=None, output=True):
- """
- random_raw(self, size=None)
-
- Return randoms as generated by the underlying PRNG
-
- Parameters
- ----------
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- output : bool, optional
- Output values. Used for performance testing since the generated
- values are not returned.
-
- Returns
- -------
- out : uint or ndarray
- Drawn samples.
-
- Notes
- -----
- This method directly exposes the the raw underlying pseudo-random
- number generator. All values are returned as unsigned 64-bit
- values irrespective of the number of bits produced by the PRNG.
-
- See the class docstring for the number of bits returned.
- """
- cdef np.ndarray randoms
- cdef uint64_t *randoms_data
- cdef Py_ssize_t i, n
-
- if not output:
- if size is None:
- with self.lock:
- self._brng.next_raw(self._brng.state)
- return None
- n = np.asarray(size).sum()
- with self.lock, nogil:
- for i in range(n):
- self._brng.next_raw(self._brng.state)
- return None
-
- if size is None:
- with self.lock:
- return self._brng.next_raw(self._brng.state)
-
- randoms = <np.ndarray>np.empty(size, np.uint64)
- randoms_data = <uint64_t*>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- with self.lock, nogil:
- for i in range(n):
- randoms_data[i] = self._brng.next_raw(self._brng.state)
- return randoms
-
def random_sample(self, size=None, dtype=np.float64, out=None):
"""
random_sample(size=None, dtype='d', out=None)
@@ -339,19 +208,20 @@ cdef class RandomGenerator:
Examples
--------
- >>> randomgen.generator.random_sample()
+ >>> np.random.random_sample()
0.47108547995356098 # random
- >>> type(randomgen.generator.random_sample())
+ >>> type(np.random.random_sample())
<class 'float'>
- >>> randomgen.generator.random_sample((5,))
+ >>> np.random.random_sample((5,))
array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
Three-by-two array of random numbers from [-5, 0):
- >>> 5 * randomgen.random_sample((3, 2)) - 5
+ >>> 5 * np.random.random_sample((3, 2)) - 5
array([[-3.99149989, -0.52338984], # random
[-2.99091858, -0.79479508],
[-1.23204345, -1.75224494]])
+
"""
cdef double temp
key = np.dtype(dtype).name
@@ -405,7 +275,6 @@ cdef class RandomGenerator:
b, 'b', CONS_POSITIVE,
0.0, '', CONS_NONE, None)
-
def exponential(self, scale=1.0, size=None):
"""
exponential(scale=1.0, size=None)
@@ -494,7 +363,8 @@ cdef class RandomGenerator:
--------
Output a 3x8000 array:
- >>> n = randomgen.generator.standard_exponential((3, 8000))
+ >>> n = np.random.standard_exponential((3, 8000))
+
"""
key = np.dtype(dtype).name
if key == 'float64':
@@ -515,10 +385,9 @@ cdef class RandomGenerator:
"""
tomaxint(size=None)
- Random integers between 0 and ``sys.maxint``, inclusive.
-
Return a sample of uniformly distributed random integers in the interval
- [0, ``sys.maxint``].
+ [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long
+ integer type and its precision is platform dependent.
Parameters
----------
@@ -540,16 +409,15 @@ cdef class RandomGenerator:
Examples
--------
- >>> rg = randomgen.RandomGenerator() # need a RandomGenerator object
+ >>> rg = np.random.randomgen.RandomGenerator() # need a RandomGenerator object
>>> rg.tomaxint((2,2,2))
array([[[1170048599, 1600360186], # random
[ 739731006, 1947757578]],
[[1871712945, 752307660],
[1601631370, 1479324245]]])
- >>> import sys
- >>> sys.maxint
+ >>> np.iinfo(np.int).max
2147483647
- >>> rg.tomaxint((2,2,2)) < sys.maxint
+ >>> rg.tomaxint((2,2,2)) < np.iinfo(np.int).max
array([[[ True, True],
[ True, True]],
[[ True, True],
@@ -627,68 +495,69 @@ cdef class RandomGenerator:
Examples
--------
- >>> randomgen.generator.randint(2, size=10)
- array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
- >>> randomgen.generator.randint(1, size=10)
+ >>> np.random.randint(2, size=10)
+ array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
+ >>> np.random.randint(1, size=10)
array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
Generate a 2 x 4 array of ints between 0 and 4, inclusive:
- >>> randomgen.generator.randint(5, size=(2, 4))
- array([[4, 0, 2, 1], # random
- [3, 2, 2, 0]])
+ >>> np.random.randint(5, size=(2, 4))
+ array([[4, 0, 2, 1],
+ [3, 2, 2, 0]]) # random
Generate a 1 x 3 array with 3 different upper bounds
- >>> randomgen.generator.randint(1, [3, 5, 10])
- array([2, 2, 9])
+ >>> np.random.randint(1, [3, 5, 10])
+ array([2, 2, 9]) # random
Generate a 1 by 3 array with 3 different lower bounds
- >>> randomgen.generator.randint([1, 5, 7], 10)
- array([9, 8, 7])
+ >>> np.random.randint([1, 5, 7], 10)
+ array([9, 8, 7]) # random
Generate a 2 by 4 array using broadcasting with dtype of uint8
- >>> randomgen.generator.randint([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
+ >>> np.random.randint([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
array([[ 8, 6, 9, 7],
- [ 1, 16, 9, 12]], dtype=uint8)
+ [ 1, 16, 9, 12]], dtype=uint8) # random
References
----------
.. [1] Daniel Lemire., "Fast Random Integer Generation in an Interval",
CoRR, Aug. 13, 2018, http://arxiv.org/abs/1805.10941.
+
"""
if high is None:
high = low
low = 0
key = np.dtype(dtype).name
- if not key in _randint_types:
+ if key not in _randint_types:
raise TypeError('Unsupported dtype "%s" for randint' % key)
if key == 'int32':
- ret = _rand_int32(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_int32(low, high, size, use_masked, self._brng, self.lock)
elif key == 'int64':
- ret = _rand_int64(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_int64(low, high, size, use_masked, self._brng, self.lock)
elif key == 'int16':
- ret = _rand_int16(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_int16(low, high, size, use_masked, self._brng, self.lock)
elif key == 'int8':
- ret = _rand_int8(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_int8(low, high, size, use_masked, self._brng, self.lock)
elif key == 'uint64':
- ret = _rand_uint64(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_uint64(low, high, size, use_masked, self._brng, self.lock)
elif key == 'uint32':
- ret = _rand_uint32(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_uint32(low, high, size, use_masked, self._brng, self.lock)
elif key == 'uint16':
- ret = _rand_uint16(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_uint16(low, high, size, use_masked, self._brng, self.lock)
elif key == 'uint8':
- ret = _rand_uint8(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_uint8(low, high, size, use_masked, self._brng, self.lock)
elif key == 'bool':
- ret = _rand_bool(low, high, size, use_masked, self._brng, self.lock)
+ ret = _rand_bool(low, high, size, use_masked, self._brng, self.lock)
if size is None and dtype in (np.bool, np.int, np.long):
- if np.array(ret).shape == ():
- return dtype(ret)
+ if np.array(ret).shape == ():
+ return dtype(ret)
return ret
def bytes(self, np.npy_intp length):
@@ -709,14 +578,13 @@ cdef class RandomGenerator:
Examples
--------
- >>> randomgen.generator.bytes(10)
+ >>> np.random.bytes(10)
' eh\\x85\\x022SZ\\xbf\\xa4' #random
"""
cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
return self.randint(0, 4294967296, size=n_uint32, dtype=np.uint32).tobytes()[:length]
-
@cython.wraparound(True)
def choice(self, a, size=None, replace=True, p=None):
"""
@@ -764,33 +632,33 @@ cdef class RandomGenerator:
--------
Generate a uniform random sample from np.arange(5) of size 3:
- >>> randomgen.generator.choice(5, 3)
+ >>> np.random.choice(5, 3)
array([0, 3, 4]) # random
- >>> #This is equivalent to randomgen.randint(0,5,3)
+ >>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
- >>> randomgen.generator.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
+ >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
array([3, 3, 0]) # random
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
- >>> randomgen.generator.choice(5, 3, replace=False)
+ >>> np.random.choice(5, 3, replace=False)
array([3,1,0]) # random
- >>> #This is equivalent to randomgen.permutation(np.arange(5))[:3]
+ >>> #This is equivalent to np.random.permutation(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
- >>> randomgen.generator.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
+ >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
array([2, 3, 0]) # random
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
- >>> randomgen.generator.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
+ >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random
dtype='<U11')
@@ -849,7 +717,7 @@ cdef class RandomGenerator:
cdf /= cdf[-1]
uniform_samples = self.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
- idx = np.array(idx, copy=False) # searchsorted returns a scalar
+ idx = np.array(idx, copy=False) # searchsorted returns a scalar
else:
idx = self.randint(0, pop_size, size=shape)
else:
@@ -888,7 +756,7 @@ cdef class RandomGenerator:
# In most cases a scalar will have been made an array
idx = idx.item(0)
- #Use samples as indices for a if a is array-like
+ # Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
@@ -904,7 +772,6 @@ cdef class RandomGenerator:
return a[idx]
-
def uniform(self, low=0.0, high=1.0, size=None):
"""
uniform(low=0.0, high=1.0, size=None)
@@ -964,7 +831,7 @@ cdef class RandomGenerator:
--------
Draw samples from the distribution:
- >>> s = randomgen.generator.uniform(-1,0,1000)
+ >>> s = np.random.uniform(-1,0,1000)
All values are within the given interval:
@@ -980,6 +847,7 @@ cdef class RandomGenerator:
>>> count, bins, ignored = plt.hist(s, 15, density=True)
>>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
>>> plt.show()
+
"""
cdef bint is_scalar = True
cdef np.ndarray alow, ahigh, arange
@@ -1003,8 +871,10 @@ cdef class RandomGenerator:
None)
temp = np.subtract(ahigh, alow)
- Py_INCREF(temp) # needed to get around Pyrex's automatic reference-counting
- # rules because EnsureArray steals a reference
+ # needed to get around Pyrex's automatic reference-counting
+ # rules because EnsureArray steals a reference
+ Py_INCREF(temp)
+
arange = <np.ndarray>np.PyArray_EnsureArray(temp)
if not np.all(np.isfinite(arange)):
raise OverflowError('Range exceeds valid bounds')
@@ -1033,7 +903,7 @@ cdef class RandomGenerator:
Parameters
----------
d0, d1, ..., dn : int, optional
- The dimensions of the returned array, should all be positive.
+ The dimensions of the returned array, must be non-negative.
If no argument is given a single Python float is returned.
dtype : {str, dtype}, optional
Desired dtype of the result, either 'd' (or 'float64') or 'f'
@@ -1049,19 +919,13 @@ cdef class RandomGenerator:
--------
random
- Notes
- -----
- This is a convenience function. If you want an interface that takes
- a shape-tuple as the first argument, refer to randomgen.random_sample.
-
- ``dtype`` can only be changed using a keyword argument.
-
Examples
--------
- >>> randomgen.generator.rand(3,2)
+ >>> np.random.rand(3,2)
array([[ 0.14022471, 0.96360618], #random
[ 0.37601032, 0.25528411], #random
[ 0.49313049, 0.94909878]]) #random
+
"""
if len(args) == 0:
return self.random_sample(dtype=dtype)
@@ -1086,13 +950,10 @@ cdef class RandomGenerator:
distribution of mean 0 and variance 1. A single float randomly sampled
from the distribution is returned if no argument is provided.
- This is a convenience function. If you want an interface that takes a
- tuple as the first argument, use `numpy.random.standard_normal` instead.
-
Parameters
----------
d0, d1, ..., dn : int, optional
- The dimensions of the returned array, should be all positive.
+ The dimensions of the returned array, must be non-negative.
If no argument is given a single Python float is returned.
dtype : {str, dtype}, optional
Desired dtype of the result, either 'd' (or 'float64') or 'f'
@@ -1109,17 +970,17 @@ cdef class RandomGenerator:
See Also
--------
standard_normal : Similar, but takes a tuple as its argument.
- normal : Also accepts mu and sigma arguments
+ normal : Also accepts mu and sigma arguments.
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use:
- ``sigma * randomgen.randn(...) + mu``
+ ``sigma * np.random.randn(...) + mu``
Examples
--------
- >>> randomgen.generator.randn()
+ >>> np.random.randn()
2.1923875335537315 # random
Two-by-four array of samples from N(3, 6.25):
@@ -1138,11 +999,13 @@ cdef class RandomGenerator:
"""
random_integers(low, high=None, size=None)
- Random integers of type np.int64 between `low` and `high`, inclusive.
+ Random integers of type np.int between `low` and `high`, inclusive.
- Return random integers of type np.int64 from the "discrete uniform"
+ Return random integers of type np.int from the "discrete uniform"
distribution in the closed interval [`low`, `high`]. If `high` is
- None (the default), then results are from [1, `low`].
+ None (the default), then results are from [1, `low`]. The np.int
+ type translates to the C long integer type and its precision
+ is platform dependent.
This function has been deprecated. Use randint instead.
@@ -1179,15 +1042,15 @@ cdef class RandomGenerator:
To sample from N evenly spaced floating-point numbers between a and b,
use::
- a + (b - a) * (randomgen.random_integers(N) - 1) / (N - 1.)
+ a + (b - a) * (np.random.random_integers(N) - 1) / (N - 1.)
Examples
--------
- >>> randomgen.generator.random_integers(5)
+ >>> np.random.random_integers(5)
4 # random
- >>> type(randomgen.generator.random_integers(5))
+ >>> type(np.random.random_integers(5))
<class 'numpy.int64'>
- >>> randomgen.generator.random_integers(5, size=(3, 2))
+ >>> np.random.random_integers(5, size=(3,2))
array([[5, 4], # random
[3, 3],
[4, 5]])
@@ -1196,13 +1059,13 @@ cdef class RandomGenerator:
numbers between 0 and 2.5, inclusive (*i.e.*, from the set
:math:`{0, 5/8, 10/8, 15/8, 20/8}`):
- >>> 2.5 * (randomgen.generator.random_integers(5, size=(5,)) - 1) / 4.
+ >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.
array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ]) # random
Roll two six sided dice 1000 times and sum the results:
- >>> d1 = randomgen.generator.random_integers(1, 6, 1000)
- >>> d2 = randomgen.generator.random_integers(1, 6, 1000)
+ >>> d1 = np.random.random_integers(1, 6, 1000)
+ >>> d2 = np.random.random_integers(1, 6, 1000)
>>> dsums = d1 + d2
Display results as a histogram:
@@ -1221,8 +1084,9 @@ cdef class RandomGenerator:
else:
warnings.warn(("This function is deprecated. Please call "
- "randint({low}, {high} + 1) instead".format(
- low=low, high=high)), DeprecationWarning)
+ "randint({low}, {high} + 1)"
+ "instead".format(low=low, high=high)),
+ DeprecationWarning)
return self.randint(low, high + 1, size=size, dtype='l')
@@ -1298,7 +1162,6 @@ cdef class RandomGenerator:
else:
raise TypeError('Unsupported dtype "%s" for standard_normal' % key)
-
def normal(self, loc=0.0, scale=1.0, size=None):
"""
normal(loc=0.0, scale=1.0, size=None)
@@ -1368,7 +1231,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> mu, sigma = 0, 0.1 # mean and standard deviation
- >>> s = randomgen.generator.normal(mu, sigma, 1000)
+ >>> s = np.random.normal(mu, sigma, 1000)
Verify the mean and the variance:
@@ -1401,167 +1264,6 @@ cdef class RandomGenerator:
0.0, '', CONS_NONE,
None)
- def complex_normal(self, loc=0.0, gamma=1.0, relation=0.0, size=None):
- """
- complex_normal(loc=0.0, gamma=1.0, relation=0.0, size=None)
-
- Draw random samples from a complex normal (Gaussian) distribution.
-
- Parameters
- ----------
- loc : complex or array_like of complex
- Mean of the distribution.
- gamma : float, complex or array_like of float or complex
- Variance of the distribution
- relation : float, complex or array_like of float or complex
- Relation between the two component normals
- size : int or tuple of ints, optional
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``loc``, ``gamma`` and ``relation``
- are all scalars. Otherwise,
- ``np.broadcast(loc, gamma, relation).size`` samples are drawn.
-
- Returns
- -------
- out : ndarray or scalar
- Drawn samples from the parameterized complex normal distribution.
-
- See Also
- --------
- numpy.random.normal : random values from a real-valued normal
- distribution
-
- Notes
- -----
- **EXPERIMENTAL** Not part of official NumPy RandomState, may change until
- formal release on PyPi.
-
- Complex normals are generated from a bivariate normal where the
- variance of the real component is 0.5 Re(gamma + relation), the
- variance of the imaginary component is 0.5 Re(gamma - relation), and
- the covariance between the two is 0.5 Im(relation). The implied
- covariance matrix must be positive semi-definite and so both variances
- must be zero and the covariance must be weakly smaller than the
- product of the two standard deviations.
-
- References
- ----------
- .. [1] Wikipedia, "Complex normal distribution",
- https://en.wikipedia.org/wiki/Complex_normal_distribution
- .. [2] Leigh J. Halliwell, "Complex Random Variables" in "Casualty
- Actuarial Society E-Forum", Fall 2015.
-
- Examples
- --------
- Draw samples from the distribution:
-
- >>> s = randomgen.generator.complex_normal(size=1000)
- """
- cdef np.ndarray ogamma, orelation, oloc, randoms, v_real, v_imag, rho
- cdef double *randoms_data
- cdef double fgamma_r, fgamma_i, frelation_r, frelation_i, frho, fvar_r , fvar_i, \
- floc_r, floc_i, f_real, f_imag, i_r_scale, r_scale, i_scale, f_rho
- cdef np.npy_intp i, j, n, n2
- cdef np.broadcast it
-
- oloc = <np.ndarray>np.PyArray_FROM_OTF(loc, np.NPY_COMPLEX128, np.NPY_ALIGNED)
- ogamma = <np.ndarray>np.PyArray_FROM_OTF(gamma, np.NPY_COMPLEX128, np.NPY_ALIGNED)
- orelation = <np.ndarray>np.PyArray_FROM_OTF(relation, np.NPY_COMPLEX128, np.NPY_ALIGNED)
-
- if np.PyArray_NDIM(ogamma) == np.PyArray_NDIM(orelation) == np.PyArray_NDIM(oloc) == 0:
- floc_r = PyComplex_RealAsDouble(loc)
- floc_i = PyComplex_ImagAsDouble(loc)
- fgamma_r = PyComplex_RealAsDouble(gamma)
- fgamma_i = PyComplex_ImagAsDouble(gamma)
- frelation_r = PyComplex_RealAsDouble(relation)
- frelation_i = 0.5 * PyComplex_ImagAsDouble(relation)
-
- fvar_r = 0.5 * (fgamma_r + frelation_r)
- fvar_i = 0.5 * (fgamma_r - frelation_r)
- if fgamma_i != 0:
- raise ValueError('Im(gamma) != 0')
- if fvar_i < 0:
- raise ValueError('Re(gamma - relation) < 0')
- if fvar_r < 0:
- raise ValueError('Re(gamma + relation) < 0')
- f_rho = 0.0
- if fvar_i > 0 and fvar_r > 0:
- f_rho = frelation_i / sqrt(fvar_i * fvar_r)
- if f_rho > 1.0 or f_rho < -1.0:
- raise ValueError('Im(relation) ** 2 > Re(gamma ** 2 - relation** 2)')
-
- if size is None:
- f_real = random_gauss_zig(self._brng)
- f_imag = random_gauss_zig(self._brng)
-
- compute_complex(&f_real, &f_imag, floc_r, floc_i, fvar_r, fvar_i, f_rho)
- return PyComplex_FromDoubles(f_real, f_imag)
-
- randoms = <np.ndarray>np.empty(size, np.complex128)
- randoms_data = <double *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- i_r_scale = sqrt(1 - f_rho * f_rho)
- r_scale = sqrt(fvar_r)
- i_scale = sqrt(fvar_i)
- j = 0
- with self.lock, nogil:
- for i in range(n):
- f_real = random_gauss_zig(self._brng)
- f_imag = random_gauss_zig(self._brng)
- randoms_data[j+1] = floc_i + i_scale * (f_rho * f_real + i_r_scale * f_imag)
- randoms_data[j] = floc_r + r_scale * f_real
- j += 2
-
- return randoms
-
- gpc = ogamma + orelation
- gmc = ogamma - orelation
- v_real = <np.ndarray>(0.5 * np.real(gpc))
- if np.any(np.less(v_real, 0)):
- raise ValueError('Re(gamma + relation) < 0')
- v_imag = <np.ndarray>(0.5 * np.real(gmc))
- if np.any(np.less(v_imag, 0)):
- raise ValueError('Re(gamma - relation) < 0')
- if np.any(np.not_equal(np.imag(ogamma), 0)):
- raise ValueError('Im(gamma) != 0')
-
- cov = 0.5 * np.imag(orelation)
- rho = np.zeros_like(cov)
- idx = (v_real.flat > 0) & (v_imag.flat > 0)
- rho.flat[idx] = cov.flat[idx] / np.sqrt(v_real.flat[idx] * v_imag.flat[idx])
- if np.any(cov.flat[~idx] != 0) or np.any(np.abs(rho) > 1):
- raise ValueError('Im(relation) ** 2 > Re(gamma ** 2 - relation ** 2)')
-
- if size is not None:
- randoms = <np.ndarray>np.empty(size, np.complex128)
- else:
- it = np.PyArray_MultiIterNew4(oloc, v_real, v_imag, rho)
- randoms = <np.ndarray>np.empty(it.shape, np.complex128)
-
- randoms_data = <double *>np.PyArray_DATA(randoms)
- n = np.PyArray_SIZE(randoms)
-
- it = np.PyArray_MultiIterNew5(randoms, oloc, v_real, v_imag, rho)
- with self.lock, nogil:
- n2 = 2 * n # Avoid compiler noise for cast
- for i in range(n2):
- randoms_data[i] = random_gauss_zig(self._brng)
- with nogil:
- j = 0
- for i in range(n):
- floc_r= (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
- floc_i= (<double*>np.PyArray_MultiIter_DATA(it, 1))[1]
- fvar_r = (<double*>np.PyArray_MultiIter_DATA(it, 2))[0]
- fvar_i = (<double*>np.PyArray_MultiIter_DATA(it, 3))[0]
- f_rho = (<double*>np.PyArray_MultiIter_DATA(it, 4))[0]
- compute_complex(&randoms_data[j], &randoms_data[j+1], floc_r, floc_i, fvar_r, fvar_i, f_rho)
- j += 2
- np.PyArray_MultiIter_NEXT(it)
-
- return randoms
-
def standard_gamma(self, shape, size=None, dtype=np.float64, out=None):
"""
standard_gamma(shape, size=None, dtype='d', out=None)
@@ -1625,7 +1327,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> shape, scale = 2., 1. # mean and width
- >>> s = randomgen.generator.standard_gamma(shape, 1000000)
+ >>> s = np.random.standard_gamma(shape, 1000000)
Display the histogram of the samples, along with
the probability density function:
@@ -1637,6 +1339,7 @@ cdef class RandomGenerator:
... (sps.gamma(shape) * scale**shape))
>>> plt.plot(bins, y, linewidth=2, color='r')
>>> plt.show()
+
"""
cdef void *func
key = np.dtype(dtype).name
@@ -1711,8 +1414,8 @@ cdef class RandomGenerator:
--------
Draw samples from the distribution:
- >>> shape, scale = 2., 2. # mean and dispersion
- >>> s = randomgen.generator.gamma(shape, scale, 1000)
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
+ >>> s = np.random.gamma(shape, scale, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -1739,7 +1442,7 @@ cdef class RandomGenerator:
Samples are drawn from an F distribution with specified parameters,
`dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
- freedom in denominator), where both parameters should be greater than
+ freedom in denominator), where both parameters must be greater than
zero.
The random variate of the F distribution (also known as the
@@ -1749,10 +1452,10 @@ cdef class RandomGenerator:
Parameters
----------
- dfnum : int or array_like of ints
- Degrees of freedom in numerator. Must be non-negative.
- dfden : int or array_like of ints
- Degrees of freedom in denominator. Must be non-negative.
+ dfnum : float or array_like of floats
+ Degrees of freedom in numerator, must be > 0.
+ dfden : float or array_like of float
+ Degrees of freedom in denominator, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1802,7 +1505,7 @@ cdef class RandomGenerator:
>>> dfnum = 1. # between group degrees of freedom
>>> dfden = 48. # within groups degrees of freedom
- >>> s = randomgen.generator.f(dfnum, dfden, 1000)
+ >>> s = np.random.f(dfnum, dfden, 1000)
The lower bound for the top 1% of the samples is :
@@ -1832,12 +1535,16 @@ cdef class RandomGenerator:
Parameters
----------
- dfnum : int or array_like of ints
- Parameter, should be > 1.
- dfden : int or array_like of ints
- Parameter, should be > 1.
+ dfnum : float or array_like of floats
+ Numerator degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.14.0
+ Earlier NumPy versions required dfnum > 1.
+ dfden : float or array_like of floats
+ Denominator degrees of freedom, must be > 0.
nonc : float or array_like of floats
- Parameter, should be >= 0.
+ Non-centrality parameter, the sum of the squares of the numerator
+ means, must be >= 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1877,9 +1584,9 @@ cdef class RandomGenerator:
>>> dfnum = 3 # between group deg of freedom
>>> dfden = 20 # within groups degrees of freedom
>>> nonc = 3.0
- >>> nc_vals = randomgen.generator.noncentral_f(dfnum, dfden, nonc, 1000000)
+ >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)
>>> NF = np.histogram(nc_vals, bins=50, density=True)
- >>> c_vals = randomgen.generator.f(dfnum, dfden, 1000000)
+ >>> c_vals = np.random.f(dfnum, dfden, 1000000)
>>> F = np.histogram(c_vals, bins=50, density=True)
>>> import matplotlib.pyplot as plt
>>> plt.plot(F[1][1:], F[0])
@@ -1905,8 +1612,8 @@ cdef class RandomGenerator:
Parameters
----------
- df : int or array_like of ints
- Number of degrees of freedom.
+ df : float or array_like of floats
+ Number of degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1951,7 +1658,7 @@ cdef class RandomGenerator:
Examples
--------
- >>> randomgen.generator.chisquare(2,4)
+ >>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
"""
@@ -1966,16 +1673,18 @@ cdef class RandomGenerator:
Draw samples from a noncentral chi-square distribution.
- The noncentral :math:`\\chi^2` distribution is a generalisation of
+ The noncentral :math:`\\chi^2` distribution is a generalization of
the :math:`\\chi^2` distribution.
Parameters
----------
- df : int or array_like of ints
- Degrees of freedom, should be > 0 as of NumPy 1.10.0,
- should be > 1 for earlier versions.
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.10.0
+ Earlier NumPy versions required dfnum > 1.
nonc : float or array_like of floats
- Non-centrality, should be non-negative.
+ Non-centrality, must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -1994,21 +1703,21 @@ cdef class RandomGenerator:
.. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
\\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
- \\P_{Y_{df+2i}}(x),
+ P_{Y_{df+2i}}(x),
where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
References
----------
- .. [1] Wikipedia, "Noncentral chi-square distribution"
- https://en.wikipedia.org/wiki/Noncentral_chi-square_distribution
+ .. [1] Wikipedia, "Noncentral chi-squared distribution"
+ https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
Examples
--------
Draw values from the distribution and plot the histogram
>>> import matplotlib.pyplot as plt
- >>> values = plt.hist(randomgen.generator.noncentral_chisquare(3, 20, 100000),
+ >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
... bins=200, density=True)
>>> plt.show()
@@ -2016,9 +1725,9 @@ cdef class RandomGenerator:
and compare to a chisquare.
>>> plt.figure()
- >>> values = plt.hist(randomgen.generator.noncentral_chisquare(3, .0000001, 100000),
+ >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),
... bins=np.arange(0., 25, .1), density=True)
- >>> values2 = plt.hist(randomgen.generator.chisquare(3, 100000),
+ >>> values2 = plt.hist(np.random.chisquare(3, 100000),
... bins=np.arange(0., 25, .1), density=True)
>>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
>>> plt.show()
@@ -2027,7 +1736,7 @@ cdef class RandomGenerator:
distribution.
>>> plt.figure()
- >>> values = plt.hist(randomgen.generator.noncentral_chisquare(3, 20, 100000),
+ >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
... bins=200, density=True)
>>> plt.show()
@@ -2081,7 +1790,7 @@ cdef class RandomGenerator:
----------
.. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
Distribution",
- http://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
.. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
Wolfram Web Resource.
http://mathworld.wolfram.com/CauchyDistribution.html
@@ -2092,9 +1801,9 @@ cdef class RandomGenerator:
--------
Draw samples and plot the distribution:
- >>> s = randomgen.generator.standard_cauchy(1000000)
- >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
>>> import matplotlib.pyplot as plt
+ >>> s = np.random.standard_cauchy(1000000)
+ >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
>>> plt.hist(s, bins=100)
>>> plt.show()
@@ -2115,8 +1824,8 @@ cdef class RandomGenerator:
Parameters
----------
- df : int or array_like of ints
- Degrees of freedom, should be > 0.
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2166,7 +1875,7 @@ cdef class RandomGenerator:
We have 10 degrees of freedom, so is the sample mean within 95% of the
recommended value?
- >>> s = randomgen.generator.standard_t(10, size=100000)
+ >>> s = np.random.standard_t(10, size=100000)
>>> np.mean(intake)
6753.636363636364
>>> intake.std(ddof=1)
@@ -2260,7 +1969,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> mu, kappa = 0.0, 4.0 # mean and dispersion
- >>> s = randomgen.generator.vonmises(mu, kappa, 1000)
+ >>> s = np.random.vonmises(mu, kappa, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -2306,7 +2015,7 @@ cdef class RandomGenerator:
Parameters
----------
a : float or array_like of floats
- Shape of the distribution. All values must be positive.
+ Shape of the distribution. Must be positive.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2360,7 +2069,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> a, m = 3., 2. # shape and mode
- >>> s = (randomgen.generator.pareto(a, 1000) + 1) * m
+ >>> s = (np.random.pareto(a, 1000) + 1) * m
Display the histogram of the samples, along with the probability
density function:
@@ -2453,7 +2162,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> a = 5. # shape
- >>> s = randomgen.generator.weibull(a, 1000)
+ >>> s = np.random.weibull(a, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -2463,7 +2172,7 @@ cdef class RandomGenerator:
>>> def weib(x,n,a):
... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
- >>> count, bins, ignored = plt.hist(randomgen.generator.weibull(5.,1000))
+ >>> count, bins, ignored = plt.hist(np.random.weibull(5.,1000))
>>> x = np.arange(1,100.)/50.
>>> scale = count.max()/weib(x, 1., 5.).max()
>>> plt.plot(x, weib(x, 1., 5.)*scale)
@@ -2487,7 +2196,7 @@ cdef class RandomGenerator:
Parameters
----------
a : float or array_like of floats
- Parameter of the distribution. Must be positive.
+ Parameter of the distribution. Must be non-negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2525,7 +2234,7 @@ cdef class RandomGenerator:
Dataplot Reference Manual, Volume 2: Let Subcommands and Library
Functions", National Institute of Standards and Technology
Handbook Series, June 2003.
- http://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
+ https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
Examples
--------
@@ -2533,7 +2242,7 @@ cdef class RandomGenerator:
>>> a = 5. # shape
>>> samples = 1000
- >>> s = randomgen.generator.power(a, samples)
+ >>> s = np.random.power(a, samples)
Display the histogram of the samples, along with
the probability density function:
@@ -2549,20 +2258,20 @@ cdef class RandomGenerator:
Compare the power function distribution to the inverse of the Pareto.
>>> from scipy import stats
- >>> rvs = randomgen.generator.power(5, 1000000)
- >>> rvsp = randomgen.generator.pareto(5, 1000000)
+ >>> rvs = np.random.power(5, 1000000)
+ >>> rvsp = np.random.pareto(5, 1000000)
>>> xx = np.linspace(0,1,100)
>>> powpdf = stats.powerlaw.pdf(xx,5)
>>> plt.figure()
>>> plt.hist(rvs, bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
- >>> plt.title('randomgen.generator.power(5)')
+ >>> plt.title('np.random.power(5)')
>>> plt.figure()
>>> plt.hist(1./(1.+rvsp), bins=50, density=True)
>>> plt.plot(xx,powpdf,'r-')
- >>> plt.title('inverse of 1 + randomgen.generator.pareto(5)')
+ >>> plt.title('inverse of 1 + np.random.pareto(5)')
>>> plt.figure()
>>> plt.hist(1./(1.+rvsp), bins=50, density=True)
@@ -2592,8 +2301,8 @@ cdef class RandomGenerator:
loc : float or array_like of floats, optional
The position, :math:`\\mu`, of the distribution peak. Default is 0.
scale : float or array_like of floats, optional
- :math:`\\lambda`, the exponential decay. Default is 1. Must be
- non-negative.
+ :math:`\\lambda`, the exponential decay. Default is 1. Must be non-
+ negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2637,7 +2346,7 @@ cdef class RandomGenerator:
Draw samples from the distribution
>>> loc, scale = 0., 1.
- >>> s = randomgen.generator.laplace(loc, scale, 1000)
+ >>> s = np.random.laplace(loc, scale, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -2675,8 +2384,8 @@ cdef class RandomGenerator:
loc : float or array_like of floats, optional
The location of the mode of the distribution. Default is 0.
scale : float or array_like of floats, optional
- The scale parameter of the distribution. Default is 1. Must be
- non-negative.
+ The scale parameter of the distribution. Default is 1. Must be non-
+ negative.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -2740,7 +2449,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> mu, beta = 0, 0.1 # location and scale
- >>> s = randomgen.generator.gumbel(mu, beta, 1000)
+ >>> s = np.random.gumbel(mu, beta, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -2758,7 +2467,7 @@ cdef class RandomGenerator:
>>> means = []
>>> maxima = []
>>> for i in range(0,1000) :
- ... a = randomgen.generator.normal(mu, beta, 1000)
+ ... a = np.random.normal(mu, beta, 1000)
... means.append(a.mean())
... maxima.append(a.max())
>>> count, bins, ignored = plt.hist(maxima, 30, density=True)
@@ -2792,7 +2501,7 @@ cdef class RandomGenerator:
loc : float or array_like of floats, optional
Parameter of the distribution. Default is 0.
scale : float or array_like of floats, optional
- Parameter of the distribution. Must be >= 0.
+ Parameter of the distribution. Must be non-negative.
Default is 1.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -2840,7 +2549,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> loc, scale = 10, 1
- >>> s = randomgen.generator.logistic(loc, scale, 10000)
+ >>> s = np.random.logistic(loc, scale, 10000)
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s, bins=50)
@@ -2848,8 +2557,8 @@ cdef class RandomGenerator:
>>> def logist(x, loc, scale):
... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
- >>> scale = logist(bins, loc, scale).max()
- >>> plt.plot(bins, logist(bins, loc, scale)*count.max()/scale)
+ >>> lgst_val = logist(bins, loc, scale)
+ >>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
>>> plt.show()
"""
@@ -2923,7 +2632,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> mu, sigma = 3., 1. # mean and standard deviation
- >>> s = randomgen.generator.lognormal(mu, sigma, 1000)
+ >>> s = np.random.lognormal(mu, sigma, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -2947,7 +2656,7 @@ cdef class RandomGenerator:
>>> # values, drawn from a normal distribution.
>>> b = []
>>> for i in range(1000):
- ... a = 10. + randomgen.generator.randn(100)
+ ... a = 10. + np.random.standard_normal(100)
... b.append(np.product(a))
>>> b = np.array(b) / np.min(b) # scale values to be positive
@@ -3015,7 +2724,7 @@ cdef class RandomGenerator:
Draw values from the distribution and plot the histogram
>>> from matplotlib.pyplot import hist
- >>> values = hist(randomgen.generator.rayleigh(3, 100000), bins=200, density=True)
+ >>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
Wave heights tend to follow a Rayleigh distribution. If the mean wave
height is 1 meter, what fraction of waves are likely to be larger than 3
@@ -3023,7 +2732,7 @@ cdef class RandomGenerator:
>>> meanvalue = 1
>>> modevalue = np.sqrt(2 / np.pi) * meanvalue
- >>> s = randomgen.generator.rayleigh(modevalue, 1000000)
+ >>> s = np.random.rayleigh(modevalue, 1000000)
The percentage of waves larger than 3 meters is:
@@ -3087,15 +2796,15 @@ cdef class RandomGenerator:
.. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
Distribution: Theory : Methodology, and Applications", CRC Press,
1988.
- .. [3] Wikipedia, "Wald distribution"
- https://en.wikipedia.org/wiki/Wald_distribution
+ .. [3] Wikipedia, "Inverse Gaussian distribution"
+ https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
Examples
--------
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
- >>> h = plt.hist(randomgen.generator.wald(3, 2, 100000), bins=200, density=True)
+ >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, density=True)
>>> plt.show()
"""
@@ -3122,9 +2831,9 @@ cdef class RandomGenerator:
Lower limit.
mode : float or array_like of floats
The value where the peak of the distribution occurs.
- The value should fulfill the condition ``left <= mode <= right``.
+ The value must fulfill the condition ``left <= mode <= right``.
right : float or array_like of floats
- Upper limit, should be larger than `left`.
+ Upper limit, must be larger than `left`.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
@@ -3162,7 +2871,7 @@ cdef class RandomGenerator:
Draw values from the distribution and plot the histogram:
>>> import matplotlib.pyplot as plt
- >>> h = plt.hist(randomgen.generator.triangular(-3, 0, 8, 100000), bins=200,
+ >>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200,
... density=True)
>>> plt.show()
@@ -3275,7 +2984,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> n, p = 10, .5 # number of trials, probability of each trial
- >>> s = randomgen.generator.binomial(n, p, 1000)
+ >>> s = np.random.binomial(n, p, 1000)
# result of flipping a coin 10 times, tested 1000 times.
A real world example. A company drills 9 wild-cat oil exploration
@@ -3285,8 +2994,9 @@ cdef class RandomGenerator:
Let's do 20,000 trials of the model, and count the number that
generate zero positive results.
- >>> sum(randomgen.generator.binomial(9, 0.1, 20000) == 0)/20000.
+ >>> sum(np.random.binomial(9, 0.1, 20000) == 0)/20000.
# answer = 0.38885, or 38%.
+
"""
# Uses a custom implementation since self._binomial is required
@@ -3304,7 +3014,7 @@ cdef class RandomGenerator:
is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0
if not is_scalar:
- check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1_NOTNAN)
+ check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1)
check_array_constraint(n_arr, 'n', CONS_NON_NEGATIVE)
if size is not None:
randoms = <np.ndarray>np.empty(size, np.int64)
@@ -3328,7 +3038,7 @@ cdef class RandomGenerator:
_dp = PyFloat_AsDouble(p)
_in = <int64_t>n
- check_constraint(_dp, 'p', CONS_BOUNDED_0_1_NOTNAN)
+ check_constraint(_dp, 'p', CONS_BOUNDED_0_1)
check_constraint(<double>_in, 'n', CONS_NON_NEGATIVE)
if size is None:
@@ -3337,7 +3047,7 @@ cdef class RandomGenerator:
randoms = <np.ndarray>np.empty(size, np.int64)
cnt = np.PyArray_SIZE(randoms)
- randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
with self.lock, nogil:
for i in range(cnt):
@@ -3346,7 +3056,6 @@ cdef class RandomGenerator:
return randoms
-
def negative_binomial(self, n, p, size=None):
"""
negative_binomial(n, p, size=None)
@@ -3354,14 +3063,13 @@ cdef class RandomGenerator:
Draw samples from a negative binomial distribution.
Samples are drawn from a negative binomial distribution with specified
- parameters, `n` successes and `p` probability of success where `n` is an
- integer > 0 and `p` is in the interval [0, 1].
+ parameters, `n` successes and `p` probability of success where `n`
+ is > 0 and `p` is in the interval [0, 1].
Parameters
----------
- n : int or array_like of ints
- Parameter of the distribution, > 0. Floats are also accepted,
- but they will be truncated to integers.
+ n : float or array_like of floats
+ Parameter of the distribution, > 0.
p : float or array_like of floats
Parameter of the distribution, >= 0 and <=1.
size : int or tuple of ints, optional
@@ -3379,14 +3087,17 @@ cdef class RandomGenerator:
Notes
-----
- The probability density for the negative binomial distribution is
+ The probability mass function of the negative binomial distribution is
- .. math:: P(N;n,p) = \\binom{N+n-1}{N}p^{n}(1-p)^{N},
+ .. math:: P(N;n,p) = \\frac{\\Gamma(N+n)}{N!\\Gamma(n)}p^{n}(1-p)^{N},
where :math:`n` is the number of successes, :math:`p` is the
- probability of success, and :math:`N+n` is the number of trials.
- The negative binomial distribution gives the probability of N
- failures given n successes, with a success on the last trial.
+ probability of success, :math:`N+n` is the number of trials, and
+ :math:`\\Gamma` is the gamma function. When :math:`n` is an integer,
+ :math:`\\frac{\\Gamma(N+n)}{N!\\Gamma(n)} = \\binom{N+n-1}{N}`, which is
+ the more common form of this term in the the pmf. The negative
+ binomial distribution gives the probability of N failures given n
+ successes, with a success on the last trial.
If one throws a die repeatedly until the third time a "1" appears,
then the probability distribution of the number of non-"1"s that
@@ -3410,16 +3121,16 @@ cdef class RandomGenerator:
for each successive well, that is what is the probability of a
single success after drilling 5 wells, after 6 wells, etc.?
- >>> s = randomgen.generator.negative_binomial(1, 0.9, 100000)
+ >>> s = np.random.negative_binomial(1, 0.1, 100000)
>>> for i in range(1, 11): # doctest: +SKIP
... probability = sum(s<i) / 100000.
... print(i, "wells drilled, probability of one success =", probability)
"""
return disc(&random_negative_binomial, self._brng, size, self.lock, 2, 0,
- n, 'n', CONS_POSITIVE,
- p, 'p', CONS_BOUNDED_0_1,
- 0.0, '', CONS_NONE)
+ n, 'n', CONS_POSITIVE_NOT_NAN,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE)
def poisson(self, lam=1.0, size=None):
"""
@@ -3433,7 +3144,7 @@ cdef class RandomGenerator:
Parameters
----------
lam : float or array_like of floats
- Expectation of interval, should be >= 0. A sequence of expectation
+ Expectation of interval, must be >= 0. A sequence of expectation
intervals must be broadcastable over the requested size.
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
@@ -3474,7 +3185,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> import numpy as np
- >>> s = randomgen.generator.poisson(5, 10000)
+ >>> s = np.random.poisson(5, 10000)
Display histogram of the sample:
@@ -3484,13 +3195,13 @@ cdef class RandomGenerator:
Draw each 100 values for lambda 100 and 500:
- >>> s = randomgen.generator.poisson(lam=(100., 500.), size=(100, 2))
+ >>> s = np.random.poisson(lam=(100., 500.), size=(100, 2))
"""
return disc(&random_poisson, self._brng, size, self.lock, 1, 0,
- lam, 'lam', CONS_POISSON,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
+ lam, 'lam', CONS_POISSON,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
def zipf(self, a, size=None):
"""
@@ -3549,7 +3260,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> a = 2. # parameter
- >>> s = randomgen.generator.zipf(a, 1000)
+ >>> s = np.random.zipf(a, 1000)
Display the histogram of the samples, along with
the probability density function:
@@ -3567,9 +3278,9 @@ cdef class RandomGenerator:
"""
return disc(&random_zipf, self._brng, size, self.lock, 1, 0,
- a, 'a', CONS_GT_1,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
+ a, 'a', CONS_GT_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
def geometric(self, p, size=None):
"""
@@ -3609,7 +3320,7 @@ cdef class RandomGenerator:
Draw ten thousand values from the geometric distribution,
with the probability of an individual success equal to 0.35:
- >>> z = randomgen.generator.geometric(p=0.35, size=10000)
+ >>> z = np.random.geometric(p=0.35, size=10000)
How many trials succeeded after a single run?
@@ -3618,9 +3329,9 @@ cdef class RandomGenerator:
"""
return disc(&random_geometric, self._brng, size, self.lock, 1, 0,
- p, 'p', CONS_BOUNDED_GT_0_1,
- 0.0, '', CONS_NONE,
- 0.0, '', CONS_NONE)
+ p, 'p', CONS_BOUNDED_GT_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
def hypergeometric(self, ngood, nbad, nsample, size=None):
"""
@@ -3629,9 +3340,9 @@ cdef class RandomGenerator:
Draw samples from a Hypergeometric distribution.
Samples are drawn from a hypergeometric distribution with specified
- parameters, ngood (ways to make a good selection), nbad (ways to make
- a bad selection), and nsample = number of items sampled, which is less
- than or equal to the sum ngood + nbad.
+ parameters, `ngood` (ways to make a good selection), `nbad` (ways to make
+ a bad selection), and `nsample` (number of items sampled, which is less
+ than or equal to the sum ``ngood + nbad``).
Parameters
----------
@@ -3645,14 +3356,16 @@ cdef class RandomGenerator:
size : int or tuple of ints, optional
Output shape. If the given shape is, e.g., ``(m, n, k)``, then
``m * n * k`` samples are drawn. If size is ``None`` (default),
- a single value is returned if ``ngood``, ``nbad``, and ``nsample``
+ a single value is returned if `ngood`, `nbad`, and `nsample`
are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size``
samples are drawn.
Returns
-------
out : ndarray or scalar
- Drawn samples from the parameterized hypergeometric distribution.
+ Drawn samples from the parameterized hypergeometric distribution. Each
+ sample is the number of good items within a randomly selected subset of
+ size `nsample` taken from a set of `ngood` good items and `nbad` bad items.
See Also
--------
@@ -3667,11 +3380,11 @@ cdef class RandomGenerator:
where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
- for P(x) the probability of x successes, g = ngood, b = nbad, and
- n = number of samples.
+ for P(x) the probability of ``x`` good results in the drawn sample,
+ g = `ngood`, b = `nbad`, and n = `nsample`.
- Consider an urn with black and white marbles in it, ngood of them
- black and nbad are white. If you draw nsample balls without
+ Consider an urn with black and white marbles in it, `ngood` of them
+ are black and `nbad` are white. If you draw `nsample` balls without
replacement, then the hypergeometric distribution describes the
distribution of black balls in the drawn sample.
@@ -3697,7 +3410,7 @@ cdef class RandomGenerator:
>>> ngood, nbad, nsamp = 100, 2, 10
# number of good, number of bad, and number of samples
- >>> s = randomgen.generator.hypergeometric(ngood, nbad, nsamp, 1000)
+ >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)
>>> from matplotlib.pyplot import hist
>>> hist(s)
# note that it is very unlikely to grab both bad items
@@ -3706,7 +3419,7 @@ cdef class RandomGenerator:
If you pull 15 marbles at random, how likely is it that
12 or more of them are one color?
- >>> s = randomgen.generator.hypergeometric(15, 15, 15, 100000)
+ >>> s = np.random.hypergeometric(15, 15, 15, 100000)
>>> sum(s>=12)/100000. + sum(s<=3)/100000.
# answer = 0.003 ... pretty unlikely!
@@ -3725,12 +3438,6 @@ cdef class RandomGenerator:
lnbad = <int64_t>nbad
lnsample = <int64_t>nsample
- if lngood < 0:
- raise ValueError("ngood < 0")
- if lnbad < 0:
- raise ValueError("nbad < 0")
- if lnsample < 1:
- raise ValueError("nsample < 1")
if lngood + lnbad < lnsample:
raise ValueError("ngood + nbad < nsample")
return disc(&random_hypergeometric, self._brng, size, self.lock, 0, 3,
@@ -3738,11 +3445,11 @@ cdef class RandomGenerator:
lnbad, 'nbad', CONS_NON_NEGATIVE,
lnsample, 'nsample', CONS_GTE_1)
- if np.any(np.less(np.add(ongood, onbad),onsample)):
+ if np.any(np.less(np.add(ongood, onbad), onsample)):
raise ValueError("ngood + nbad < nsample")
return discrete_broadcast_iii(&random_hypergeometric, self._brng, size, self.lock,
ongood, 'ngood', CONS_NON_NEGATIVE,
- onbad, nbad, CONS_NON_NEGATIVE,
+ onbad, 'nbad', CONS_NON_NEGATIVE,
onsample, 'nsample', CONS_GTE_1)
def logseries(self, p, size=None):
@@ -3776,7 +3483,7 @@ cdef class RandomGenerator:
Notes
-----
- The probability density for the Log Series distribution is
+ The probability mass function for the Log Series distribution is
.. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},
@@ -3807,7 +3514,7 @@ cdef class RandomGenerator:
Draw samples from the distribution:
>>> a = .6
- >>> s = randomgen.generator.logseries(a, 10000)
+ >>> s = np.random.logseries(a, 10000)
>>> import matplotlib.pyplot as plt
>>> count, bins, ignored = plt.hist(s)
@@ -3815,8 +3522,8 @@ cdef class RandomGenerator:
>>> def logseries(k, p):
... return -p**k/(k*np.log(1-p))
- >>> plt.plot(bins, logseries(bins, a)*count.max()/
- logseries(bins, a).max(), 'r')
+ >>> plt.plot(bins, logseries(bins, a) * count.max()/
+ ... logseries(bins, a).max(), 'r')
>>> plt.show()
"""
@@ -3829,7 +3536,7 @@ cdef class RandomGenerator:
def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
tol=1e-8):
"""
- multivariate_normal(self, mean, cov, size=None, check_valid='warn', tol=1e-8)
+ multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
Draw random samples from a multivariate normal distribution.
@@ -3856,6 +3563,7 @@ cdef class RandomGenerator:
Behavior when the covariance matrix is not positive semidefinite.
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
+ cov is cast to double before the check.
Returns
-------
@@ -3896,7 +3604,7 @@ cdef class RandomGenerator:
Diagonal covariance means that points are oriented along x or y-axis:
>>> import matplotlib.pyplot as plt
- >>> x, y = randomgen.generator.multivariate_normal(mean, cov, 5000).T
+ >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T
>>> plt.plot(x, y, 'x')
>>> plt.axis('equal')
>>> plt.show()
@@ -3916,7 +3624,7 @@ cdef class RandomGenerator:
--------
>>> mean = (1, 2)
>>> cov = [[1, 0], [0, 1]]
- >>> x = randomgen.generator.multivariate_normal(mean, cov, (3, 3))
+ >>> x = np.random.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
@@ -3924,7 +3632,7 @@ cdef class RandomGenerator:
standard deviation:
>>> list((x[0,0,:] - mean) < 0.6)
- [True, True]
+ [True, True] # random
"""
from numpy.dual import svd
@@ -3940,11 +3648,11 @@ cdef class RandomGenerator:
shape = size
if len(mean.shape) != 1:
- raise ValueError("mean must be 1 dimensional")
+ raise ValueError("mean must be 1 dimensional")
if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
- raise ValueError("cov must be 2 dimensional and square")
+ raise ValueError("cov must be 2 dimensional and square")
if mean.shape[0] != cov.shape[0]:
- raise ValueError("mean and cov must have same length")
+ raise ValueError("mean and cov must have same length")
# Compute shape of output and create a matrix of independent
# standard normally distributed random numbers. The matrix has rows
@@ -3968,6 +3676,8 @@ cdef class RandomGenerator:
# order to preserve current outputs. Note that symmetry has not
# been checked.
+ # GH10839, ensure double to make tol meaningful
+ cov = cov.astype(np.double)
(u, s, v) = svd(cov)
if check_valid != 'ignore':
@@ -3993,7 +3703,7 @@ cdef class RandomGenerator:
Draw samples from a multinomial distribution.
- The multinomial distribution is a multivariate generalisation of the
+ The multinomial distribution is a multivariate generalization of the
binomial distribution. Take an experiment with one of ``p``
possible outcomes. An example of such an experiment is throwing a dice,
where the outcome can be 1 through 6. Each sample drawn from the
@@ -4007,7 +3717,7 @@ cdef class RandomGenerator:
Number of experiments.
pvals : sequence of floats, length p
Probabilities of each of the ``p`` different outcomes. These
- should sum to 1 (however, the last element is always assumed to
+ must sum to 1 (however, the last element is always assumed to
account for the remaining probability, as long as
``sum(pvals[:-1]) <= 1)``.
size : int or tuple of ints, optional
@@ -4028,24 +3738,24 @@ cdef class RandomGenerator:
--------
Throw a dice 20 times:
- >>> randomgen.generator.multinomial(20, [1/6.]*6, size=1)
- array([[4, 1, 7, 5, 2, 1]])
+ >>> np.random.multinomial(20, [1/6.]*6, size=1)
+ array([[4, 1, 7, 5, 2, 1]]) # random
It landed 4 times on 1, once on 2, etc.
Now, throw the dice 20 times, and 20 times again:
- >>> randomgen.generator.multinomial(20, [1/6.]*6, size=2)
+ >>> np.random.multinomial(20, [1/6.]*6, size=2)
array([[3, 4, 3, 3, 4, 3],
- [2, 4, 3, 4, 0, 7]])
+ [2, 4, 3, 4, 0, 7]]) # random
For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
we threw 2 times 1, 4 times 2, etc.
A loaded die is more likely to land on number 6:
- >>> randomgen.generator.multinomial(100, [1/7.]*5 + [2/7.])
- array([11, 16, 14, 17, 16, 26]) # random
+ >>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
+ array([11, 16, 14, 17, 16, 26]) # random
The probability inputs should be normalized. As an implementation
detail, the value of the last entry is ignored and assumed to take
@@ -4053,12 +3763,12 @@ cdef class RandomGenerator:
A biased coin which has twice as much weight on one side as on the
other should be sampled like so:
- >>> randomgen.generator.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
- array([38, 62]) # random
+ >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
+ array([38, 62]) # random
not like:
- >>> randomgen.generator.multinomial(100, [1.0, 2.0]) # WRONG
+ >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG
array([100, 0])
"""
@@ -4175,7 +3885,7 @@ cdef class RandomGenerator:
average length, but allowing some variation in the relative sizes of
the pieces.
- >>> s = randomgen.generator.dirichlet((10, 5, 3), 20).transpose()
+ >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()
>>> import matplotlib.pyplot as plt
>>> plt.barh(range(20), s[0])
@@ -4185,39 +3895,36 @@ cdef class RandomGenerator:
"""
- #=================
+ # =================
# Pure python algo
- #=================
- #alpha = N.atleast_1d(alpha)
- #k = alpha.size
-
- #if n == 1:
- # val = N.zeros(k)
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val)
- #else:
- # val = N.zeros((k, n))
- # for i in range(k):
- # val[i] = sgamma(alpha[i], n)
- # val /= N.sum(val, axis = 0)
- # val = val.T
-
- #return val
-
- cdef np.npy_intp k
- cdef np.npy_intp totsize
- cdef np.ndarray alpha_arr, val_arr
- cdef double *alpha_data
- cdef double *val_data
- cdef np.npy_intp i, j
- cdef double acc, invacc
-
- k = len(alpha)
- alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(alpha, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ # =================
+ # alpha = N.atleast_1d(alpha)
+ # k = alpha.size
+
+ # if n == 1:
+ # val = N.zeros(k)
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val)
+ # else:
+ # val = N.zeros((k, n))
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val, axis = 0)
+ # val = val.T
+ # return val
+
+ cdef np.npy_intp k, totsize, i, j
+ cdef np.ndarray alpha_arr, val_arr
+ cdef double *alpha_data
+ cdef double *val_data
+ cdef double acc, invacc
+
+ k = len(alpha)
+ alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(alpha, np.NPY_DOUBLE, np.NPY_ALIGNED)
if np.any(np.less_equal(alpha_arr, 0)):
raise ValueError('alpha <= 0')
- alpha_data = <double*>np.PyArray_DATA(alpha_arr)
+ alpha_data = <double*>np.PyArray_DATA(alpha_arr)
if size is None:
shape = (k,)
@@ -4227,7 +3934,7 @@ cdef class RandomGenerator:
except:
shape = tuple(size) + (k,)
- diric = np.zeros(shape, np.float64)
+ diric = np.zeros(shape, np.float64)
val_arr = <np.ndarray>diric
val_data= <double*>np.PyArray_DATA(val_arr)
@@ -4239,10 +3946,10 @@ cdef class RandomGenerator:
for j in range(k):
val_data[i+j] = random_standard_gamma_zig(self._brng,
alpha_data[j])
- acc = acc + val_data[i + j]
- invacc = 1/acc
+ acc = acc + val_data[i + j]
+ invacc = 1/acc
for j in range(k):
- val_data[i + j] = val_data[i + j] * invacc
+ val_data[i + j] = val_data[i + j] * invacc
i = i + k
return diric
@@ -4270,14 +3977,14 @@ cdef class RandomGenerator:
Examples
--------
>>> arr = np.arange(10)
- >>> randomgen.shuffle(arr)
+ >>> np.random.shuffle(arr)
>>> arr
[1 7 5 2 9 4 3 6 0 8] # random
Multi-dimensional arrays are only shuffled along the first axis:
>>> arr = np.arange(9).reshape((3, 3))
- >>> randomgen.generator.shuffle(arr)
+ >>> np.random.shuffle(arr)
>>> arr
array([[3, 4, 5], # random
[6, 7, 8],
@@ -4300,7 +4007,7 @@ cdef class RandomGenerator:
# of bytes for the swaps to avoid leaving one of the objects
# within the buffer and erroneously decrementing it's refcount
# when the function exits.
- buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
+ buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
buf_ptr = <char*><size_t>buf.ctypes.data
with self.lock:
# We trick gcc into providing a specialized implementation for
@@ -4311,11 +4018,13 @@ cdef class RandomGenerator:
else:
self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr)
elif isinstance(x, np.ndarray) and x.ndim and x.size:
- buf = np.empty_like(x[0,...])
+ buf = np.empty_like(x[0, ...])
with self.lock:
for i in reversed(range(1, n)):
j = random_interval(self._brng, i)
- if i == j : continue # i == j is not needed and memcpy is undefined.
+ if i == j:
+ # i == j is not needed and memcpy is undefined.
+ continue
buf[...] = x[j]
x[j] = x[i]
x[i] = buf
@@ -4358,14 +4067,14 @@ cdef class RandomGenerator:
Examples
--------
- >>> randomgen.generator.permutation(10)
+ >>> np.random.permutation(10)
array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
- >>> randomgen.generator.permutation([1, 4, 9, 12, 15])
+ >>> np.random.permutation([1, 4, 9, 12, 15])
array([15, 1, 9, 4, 12]) # random
>>> arr = np.arange(9).reshape((3, 3))
- >>> randomgen.generator.permutation(arr)
+ >>> np.random.permutation(arr)
array([[6, 7, 8], # random
[0, 1, 2],
[3, 4, 5]])
@@ -4398,7 +4107,6 @@ binomial = _random_generator.binomial
bytes = _random_generator.bytes
chisquare = _random_generator.chisquare
choice = _random_generator.choice
-complex_normal = _random_generator.complex_normal
dirichlet = _random_generator.dirichlet
exponential = _random_generator.exponential
f = _random_generator.f
@@ -4424,9 +4132,7 @@ rand = _random_generator.rand
randint = _random_generator.randint
randn = _random_generator.randn
random_integers = _random_generator.random_integers
-random_raw = _random_generator.random_raw
random_sample = _random_generator.random_sample
-random_uintegers = _random_generator.random_uintegers
rayleigh = _random_generator.rayleigh
shuffle = _random_generator.shuffle
standard_cauchy = _random_generator.standard_cauchy
@@ -4440,4 +4146,4 @@ uniform = _random_generator.uniform
vonmises = _random_generator.vonmises
wald = _random_generator.wald
weibull = _random_generator.weibull
-zipf = _random_generator.zipf \ No newline at end of file
+zipf = _random_generator.zipf
diff --git a/numpy/random/randomgen/legacy/__init__.py b/numpy/random/randomgen/legacy/__init__.py
new file mode 100644
index 000000000..9ce1f665d
--- /dev/null
+++ b/numpy/random/randomgen/legacy/__init__.py
@@ -0,0 +1,3 @@
+from ..mtrand import RandomState as LegacyGenerator
+
+__all__ = ['LegacyGenerator']
diff --git a/_randomgen/randomgen/legacy/legacy_distributions.pxd b/numpy/random/randomgen/legacy/legacy_distributions.pxd
index e2157f706..bc00994db 100644
--- a/_randomgen/randomgen/legacy/legacy_distributions.pxd
+++ b/numpy/random/randomgen/legacy/legacy_distributions.pxd
@@ -3,16 +3,16 @@
from libc.stdint cimport uint64_t
import numpy as np
-cimport numpy as np
+cimport numpy as np
-from randomgen.distributions cimport brng_t
+from ..distributions cimport brng_t
cdef extern from "../src/legacy/distributions-boxmuller.h":
struct aug_brng:
brng_t *basicrng
int has_gauss
- double gauss
+ double gauss
ctypedef aug_brng aug_brng_t
diff --git a/_randomgen/randomgen/mt19937.pyx b/numpy/random/randomgen/mt19937.pyx
index c88dd02b9..306ce23e2 100644
--- a/_randomgen/randomgen/mt19937.pyx
+++ b/numpy/random/randomgen/mt19937.pyx
@@ -1,18 +1,19 @@
-from __future__ import absolute_import
-
import operator
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy
np.import_array()
@@ -97,8 +98,8 @@ cdef class MT19937:
generators should be initialized with the same seed to ensure that the
segments come from the same sequence.
- >>> from randomgen.entropy import random_entropy
- >>> from randomgen import RandomGenerator, MT19937
+ >>> from numpy.random.randomgen.entropy import random_entropy
+ >>> from numpy.random.randomgen import RandomGenerator, MT19937
>>> seed = random_entropy()
>>> rs = [RandomGenerator(MT19937(seed) for _ in range(10)]
# Advance rs[i] by i jumps
@@ -122,11 +123,13 @@ cdef class MT19937:
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None):
self.rng_state = <mt19937_state *>malloc(sizeof(mt19937_state))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &mt19937_uint64
@@ -153,45 +156,44 @@ cdef class MT19937:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
- def __random_integer(self, bits=64):
+ def random_raw(self, size=None, output=True):
"""
- 64-bit Random Integers from the PRNG
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
Parameters
----------
- bits : {32, 64}
- Number of random bits to return
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
Returns
-------
- rv : int
- Next random value
+ out : uint or ndarray
+ Drawn samples.
Notes
-----
- Testing only
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
"""
- if bits == 64:
- return self._brng.next_uint64(self._brng.state)
- elif bits == 32:
- return self._brng.next_uint32(self._brng.state)
- else:
- raise ValueError('bits must be 32 or 64')
+ return random_raw(self._brng, self.lock, size, output)
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None):
"""
@@ -231,7 +233,12 @@ cdef class MT19937:
raise ValueError("Seed must be between 0 and 2**32 - 1")
mt19937_seed(self.rng_state, seed)
except TypeError:
- obj = np.asarray(seed).astype(np.int64, casting='safe')
+ obj = np.asarray(seed)
+ if obj.size == 0:
+ raise ValueError("Seed must be non-empty")
+ obj = obj.astype(np.int64, casting='safe')
+ if obj.ndim != 1:
+ raise ValueError("Seed array must be 1-d")
if ((obj > int(2**32 - 1)) | (obj < 0)).any():
raise ValueError("Seed must be between 0 and 2**32 - 1")
obj = obj.astype(np.uint32, casting='unsafe', order='C')
@@ -274,16 +281,15 @@ cdef class MT19937:
key[i] = self.rng_state.key[i]
return {'brng': self.__class__.__name__,
- 'state': {'key':key, 'pos': self.rng_state.pos}}
+ 'state': {'key': key, 'pos': self.rng_state.pos}}
@state.setter
def state(self, value):
if isinstance(value, tuple):
- if value[0] != 'MT19937' or len(value) not in (3,5):
- raise ValueError('state is not a legacy MT19937 state')
+ if value[0] != 'MT19937' or len(value) not in (3, 5):
+ raise ValueError('state is not a legacy MT19937 state')
value ={'brng': 'MT19937',
- 'state':{'key': value[1], 'pos': value[2]}}
-
+ 'state': {'key': value[1], 'pos': value[2]}}
if not isinstance(value, dict):
raise TypeError('state must be a dict')
@@ -299,12 +305,12 @@ cdef class MT19937:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -313,25 +319,10 @@ cdef class MT19937:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&mt19937_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&mt19937_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&mt19937_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -352,19 +343,8 @@ cdef class MT19937:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -373,10 +353,10 @@ cdef class MT19937:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the core PRNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/numpy/random/randomgen/mtrand.pyx b/numpy/random/randomgen/mtrand.pyx
new file mode 100644
index 000000000..b5d6ff9bc
--- /dev/null
+++ b/numpy/random/randomgen/mtrand.pyx
@@ -0,0 +1,4223 @@
+#!python
+#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True, language_level=3
+import operator
+import warnings
+
+from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
+from cpython cimport (Py_INCREF, PyFloat_AsDouble)
+from libc cimport string
+from libc.stdlib cimport malloc, free
+cimport numpy as np
+import numpy as np
+cimport cython
+
+from .bounded_integers cimport *
+from .bounded_integers import _randint_types
+from .common cimport *
+from .distributions cimport *
+from .legacy.legacy_distributions cimport *
+from .mt19937 import MT19937 as _MT19937
+
+np.import_array()
+
+cdef class RandomState:
+ """
+ RandomState(brng=None)
+
+ Container for the Mersenne Twister pseudo-random number generator.
+
+ `RandomState` exposes a number of methods for generating random numbers
+ drawn from a variety of probability distributions. In addition to the
+ distribution-specific arguments, each method takes a keyword argument
+ `size` that defaults to ``None``. If `size` is ``None``, then a single
+ value is generated and returned. If `size` is an integer, then a 1-D
+ array filled with generated values is returned. If `size` is a tuple,
+ then an array with that shape is filled and returned.
+
+ *Compatibility Guarantee*
+ A fixed seed and a fixed series of calls to 'RandomState' methods using
+ the same parameters will always produce the same results up to roundoff
+ error except when the values were incorrect. Incorrect values will be
+ fixed and the NumPy version in which the fix was made will be noted in
+ the relevant docstring. Extension of existing parameter ranges and the
+ addition of new parameters is allowed as long the previous behavior
+ remains unchanged.
+
+ Parameters
+ ----------
+ brng : {None, int, array_like, BasicRNG}, optional
+ Random seed used to initialize the pseudo-random number generator or
+ an instantized BasicRNG. If an integer or array, used as a seed for
+ the MT19937 BasicRNG. Values can be any integer between 0 and
+ 2**32 - 1 inclusive, an array (or other sequence) of such integers,
+ or ``None`` (the default). If `seed` is ``None``, then the `MT19937`
+ BasicRNG is initialized by reading data from ``/dev/urandom``
+ (or the Windows analogue) if available or seed from the clock
+ otherwise.
+
+ Notes
+ -----
+ The Python stdlib module "random" also contains a Mersenne Twister
+ pseudo-random number generator with a number of methods that are similar
+ to the ones available in `RandomState`. `RandomState`, besides being
+ NumPy-aware, has the advantage that it provides a much larger number
+ of probability distributions to choose from.
+
+ """
+ cdef public object _basicrng
+ cdef brng_t *_brng
+ cdef aug_brng_t *_aug_state
+ cdef binomial_t *_binomial
+ cdef object lock
+ poisson_lam_max = POISSON_LAM_MAX
+
+ def __init__(self, brng=None):
+ if brng is None:
+ brng = _MT19937()
+ elif not hasattr(brng, 'capsule'):
+ brng = _MT19937(brng)
+
+ self._basicrng = brng
+ capsule = brng.capsule
+ cdef const char *name = "BasicRNG"
+ if not PyCapsule_IsValid(capsule, name):
+ raise ValueError("Invalid brng. The brng must be instantized.")
+ self._brng = <brng_t *> PyCapsule_GetPointer(capsule, name)
+ self._aug_state = <aug_brng_t *>malloc(sizeof(aug_brng_t))
+ self._aug_state.basicrng = self._brng
+ self._binomial = <binomial_t *>malloc(sizeof(binomial_t))
+ self._reset_gauss()
+ self.lock = brng.lock
+
+ def __dealloc__(self):
+ free(self._binomial)
+ free(self._aug_state)
+
+ def __repr__(self):
+ return self.__str__() + ' at 0x{:X}'.format(id(self))
+
+ def __str__(self):
+ _str = self.__class__.__name__
+ _str += '(' + self._basicrng.__class__.__name__ + ')'
+ return _str
+
+ # Pickling support:
+ def __getstate__(self):
+ return self.get_state(legacy=False)
+
+ def __setstate__(self, state):
+ self.set_state(state)
+
+ def __reduce__(self):
+ state = self.get_state(legacy=False)
+ from ._pickle import __randomstate_ctor
+ return (__randomstate_ctor,
+ (state['brng'],),
+ state)
+
+ cdef _reset_gauss(self):
+ self._aug_state.has_gauss = 0
+ self._aug_state.gauss = 0.0
+
+ def seed(self, *args, **kwargs):
+ """
+ seed(self, *args, **kwargs)
+
+ Reseed the basic RNG.
+
+ Parameters depend on the basic RNG used.
+
+ Notes
+ -----
+ Arguments are directly passed to the basic RNG. This is a convenience
+ function.
+
+ The best method to access seed is to directly use a basic RNG instance.
+ This example demonstrates this best practice.
+
+ >>> from numpy.random.randomgen import MT19937
+ >>> from numpy.random import RandomState
+ >>> brng = MT19937(123456789)
+ >>> rs = RandomState(brng)
+ >>> brng.seed(987654321)
+
+ These best practice examples are equivalent to
+
+ >>> rs = RandomState(MT19937())
+ >>> rs.seed(987654321)
+ """
+ self._basicrng.seed(*args, **kwargs)
+ self._reset_gauss()
+
+ def get_state(self, legacy=True):
+ """
+ get_state()
+
+ Return a tuple representing the internal state of the generator.
+
+ For more details, see `set_state`.
+
+ Returns
+ -------
+ out : {tuple(str, ndarray of 624 uints, int, int, float), dict}
+ The returned tuple has the following items:
+
+ 1. the string 'MT19937'.
+ 2. a 1-D array of 624 unsigned integer keys.
+ 3. an integer ``pos``.
+ 4. an integer ``has_gauss``.
+ 5. a float ``cached_gaussian``.
+
+ If `legacy` is False, or the basic RNG is not NT19937, then
+ state is returned as a dictionary.
+
+ legacy : bool
+ Flag indicating the return a legacy tuple state when the basic RNG
+ is MT19937.
+
+ See Also
+ --------
+ set_state
+
+ Notes
+ -----
+ `set_state` and `get_state` are not needed to work with any of the
+ random distributions in NumPy. If the internal state is manually altered,
+ the user should know exactly what he/she is doing.
+
+ """
+ st = self._basicrng.state
+ if st['brng'] != 'MT19937' and legacy:
+ warnings.warn('get_state and legacy can only be used with the '
+ 'MT19937 basic RNG. To silence this warning, '
+ 'set `legacy` to False.', RuntimeWarning)
+ legacy = False
+ st['has_gauss'] = self._aug_state.has_gauss
+ st['gauss'] = self._aug_state.gauss
+ if legacy:
+ return (st['brng'], st['state']['key'], st['state']['pos'],
+ st['has_gauss'], st['gauss'])
+ return st
+
+ def set_state(self, state):
+ """
+ set_state(state)
+
+ Set the internal state of the generator from a tuple.
+
+ For use if one has reason to manually (re-)set the internal state of the
+ Basic RNG used by the RandomState instance. By default, RandomState uses
+ the "Mersenne Twister"[1]_ pseudo-random number generating algorithm.
+
+ Parameters
+ ----------
+ state : {tuple(str, ndarray of 624 uints, int, int, float), dict}
+ The `state` tuple has the following items:
+
+ 1. the string 'MT19937', specifying the Mersenne Twister algorithm.
+ 2. a 1-D array of 624 unsigned integers ``keys``.
+ 3. an integer ``pos``.
+ 4. an integer ``has_gauss``.
+ 5. a float ``cached_gaussian``.
+
+ If state is a dictionary, it is directly set using the BasicRNGs
+ `state` property.
+
+ Returns
+ -------
+ out : None
+ Returns 'None' on success.
+
+ See Also
+ --------
+ get_state
+
+ Notes
+ -----
+ `set_state` and `get_state` are not needed to work with any of the
+ random distributions in NumPy. If the internal state is manually altered,
+ the user should know exactly what he/she is doing.
+
+ For backwards compatibility, the form (str, array of 624 uints, int) is
+ also accepted although it is missing some information about the cached
+ Gaussian value: ``state = ('MT19937', keys, pos)``.
+
+ References
+ ----------
+ .. [1] M. Matsumoto and T. Nishimura, "Mersenne Twister: A
+ 623-dimensionally equidistributed uniform pseudorandom number
+ generator," *ACM Trans. on Modeling and Computer Simulation*,
+ Vol. 8, No. 1, pp. 3-30, Jan. 1998.
+
+ """
+ if isinstance(state, dict):
+ if 'brng' not in state or 'state' not in state:
+ raise ValueError('state dictionary is not valid.')
+ st = state
+ else:
+ if not isinstance(state, (tuple, list)):
+ raise TypeError('state must be a dict or a tuple.')
+ if state[0] != 'MT19937':
+ raise ValueError('set_state can only be used with legacy MT19937'
+ 'state instances.')
+ st = {'brng': state[0],
+ 'state': {'key': state[1], 'pos': state[2]}}
+ if len(state) > 3:
+ st['has_gauss'] = state[3]
+ st['gauss'] = state[4]
+ value = st
+
+ self._aug_state.gauss = st.get('gauss', 0.0)
+ self._aug_state.has_gauss = st.get('has_gauss', 0)
+ self._basicrng.state = st
+
+ def random_sample(self, size=None):
+ """
+ random_sample(size=None)
+
+ Return random floats in the half-open interval [0.0, 1.0).
+
+ Results are from the "continuous uniform" distribution over the
+ stated interval. To sample :math:`Unif[a, b), b > a` multiply
+ the output of `random_sample` by `(b-a)` and add `a`::
+
+ (b - a) * random_sample() + a
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : float or ndarray of floats
+ Array of random floats of shape `size` (unless ``size=None``, in which
+ case a single float is returned).
+
+ Examples
+ --------
+ >>> np.random.random_sample()
+ 0.47108547995356098 # random
+ >>> type(np.random.random_sample())
+ <class 'float'>
+ >>> np.random.random_sample((5,))
+ array([ 0.30220482, 0.86820401, 0.1654503 , 0.11659149, 0.54323428]) # random
+
+ Three-by-two array of random numbers from [-5, 0):
+
+ >>> 5 * np.random.random_sample((3, 2)) - 5
+ array([[-3.99149989, -0.52338984], # random
+ [-2.99091858, -0.79479508],
+ [-1.23204345, -1.75224494]])
+
+ """
+ cdef double temp
+ return double_fill(&random_double_fill, self._brng, size, self.lock, None)
+
+ def beta(self, a, b, size=None):
+ """
+ beta(a, b, size=None)
+
+ Draw samples from a Beta distribution.
+
+ The Beta distribution is a special case of the Dirichlet distribution,
+ and is related to the Gamma distribution. It has the probability
+ distribution function
+
+ .. math:: f(x; a,b) = \\frac{1}{B(\\alpha, \\beta)} x^{\\alpha - 1}
+ (1 - x)^{\\beta - 1},
+
+ where the normalization, B, is the beta function,
+
+ .. math:: B(\\alpha, \\beta) = \\int_0^1 t^{\\alpha - 1}
+ (1 - t)^{\\beta - 1} dt.
+
+ It is often seen in Bayesian inference and order statistics.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Alpha, positive (>0).
+ b : float or array_like of floats
+ Beta, positive (>0).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` and ``b`` are both scalars.
+ Otherwise, ``np.broadcast(a, b).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized beta distribution.
+
+ """
+ return cont(&legacy_beta, self._aug_state, size, self.lock, 2,
+ a, 'a', CONS_POSITIVE,
+ b, 'b', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def exponential(self, scale=1.0, size=None):
+ """
+ exponential(scale=1.0, size=None)
+
+ Draw samples from an exponential distribution.
+
+ Its probability density function is
+
+ .. math:: f(x; \\frac{1}{\\beta}) = \\frac{1}{\\beta} \\exp(-\\frac{x}{\\beta}),
+
+ for ``x > 0`` and 0 elsewhere. :math:`\\beta` is the scale parameter,
+ which is the inverse of the rate parameter :math:`\\lambda = 1/\\beta`.
+ The rate parameter is an alternative, widely used parameterization
+ of the exponential distribution [3]_.
+
+ The exponential distribution is a continuous analogue of the
+ geometric distribution. It describes many common situations, such as
+ the size of raindrops measured over many rainstorms [1]_, or the time
+ between page requests to Wikipedia [2]_.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats
+ The scale parameter, :math:`\\beta = 1/\\lambda`. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized exponential distribution.
+
+ References
+ ----------
+ .. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
+ Random Signal Principles", 4th ed, 2001, p. 57.
+ .. [2] Wikipedia, "Poisson process",
+ https://en.wikipedia.org/wiki/Poisson_process
+ .. [3] Wikipedia, "Exponential distribution",
+ https://en.wikipedia.org/wiki/Exponential_distribution
+
+ """
+ return cont(&legacy_exponential, self._aug_state, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_exponential(self, size=None):
+ """
+ standard_exponential(size=None)
+
+ Draw samples from the standard exponential distribution.
+
+ `standard_exponential` is identical to the exponential distribution
+ with a scale parameter of 1.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : float or ndarray
+ Drawn samples.
+
+ Examples
+ --------
+ Output a 3x8000 array:
+
+ >>> n = np.random.standard_exponential((3, 8000))
+
+ """
+ return cont(&legacy_standard_exponential, self._aug_state, size, self.lock, 0,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None)
+
+ def tomaxint(self, size=None):
+ """
+ tomaxint(size=None)
+
+ Return a sample of uniformly distributed random integers in the interval
+ [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long
+ integer type and its precision is platform dependent.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : ndarray
+ Drawn samples, with shape `size`.
+
+ See Also
+ --------
+ randint : Uniform sampling over a given half-open interval of integers.
+ random_integers : Uniform sampling over a given closed interval of
+ integers.
+
+ Examples
+ --------
+ >>> rs = np.random.RandomState() # need a RandomState object
+ >>> rs.tomaxint((2,2,2))
+ array([[[1170048599, 1600360186], # random
+ [ 739731006, 1947757578]],
+ [[1871712945, 752307660],
+ [1601631370, 1479324245]]])
+ >>> np.iinfo(np.int).max
+ 2147483647
+ >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int).max
+ array([[[ True, True],
+ [ True, True]],
+ [[ True, True],
+ [ True, True]]])
+
+ """
+ cdef np.npy_intp n
+ cdef np.ndarray randoms
+ cdef int64_t *randoms_data
+
+ if size is None:
+ with self.lock:
+ return random_positive_int(self._brng)
+
+ randoms = <np.ndarray>np.empty(size, dtype=np.int64)
+ randoms_data = <int64_t*>np.PyArray_DATA(randoms)
+ n = np.PyArray_SIZE(randoms)
+
+ for i in range(n):
+ with self.lock, nogil:
+ randoms_data[i] = random_positive_int(self._brng)
+ return randoms
+
+ def randint(self, low, high=None, size=None, dtype=int):
+ """
+ randint(low, high=None, size=None, dtype='l')
+
+ Return random integers from `low` (inclusive) to `high` (exclusive).
+
+ Return random integers from the "discrete uniform" distribution of
+ the specified dtype in the "half-open" interval [`low`, `high`). If
+ `high` is None (the default), then results are from [0, `low`).
+
+ Parameters
+ ----------
+ low : int or array-like of ints
+ Lowest (signed) integers to be drawn from the distribution (unless
+ ``high=None``, in which case this parameter is one above the
+ *highest* such integer).
+ high : int or array-like of ints, optional
+ If provided, one above the largest (signed) integer to be drawn
+ from the distribution (see above for behavior if ``high=None``).
+ If array-like, must contain integer values
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ dtype : dtype, optional
+ Desired dtype of the result. All dtypes are determined by their
+ name, i.e., 'int64', 'int', etc, so byteorder is not available
+ and a specific precision may have different C types depending
+ on the platform. The default value is 'np.int'.
+
+ .. versionadded:: 1.11.0
+
+ Returns
+ -------
+ out : int or ndarray of ints
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ See Also
+ --------
+ random.random_integers : similar to `randint`, only for the closed
+ interval [`low`, `high`], and 1 is the lowest value if `high` is
+ omitted. In particular, this other one is the one to use to generate
+ uniformly distributed discrete non-integers.
+
+ Examples
+ --------
+ >>> np.random.randint(2, size=10)
+ array([1, 0, 0, 0, 1, 1, 0, 0, 1, 0]) # random
+ >>> np.random.randint(1, size=10)
+ array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
+
+ Generate a 2 x 4 array of ints between 0 and 4, inclusive:
+
+ >>> np.random.randint(5, size=(2, 4))
+ array([[4, 0, 2, 1], # random
+ [3, 2, 2, 0]])
+
+ Generate a 1 x 3 array with 3 different upper bounds
+
+ >>> np.random.randint(1, [3, 5, 10])
+ array([2, 2, 9]) # random
+
+ Generate a 1 by 3 array with 3 different lower bounds
+
+ >>> np.random.randint([1, 5, 7], 10)
+ array([9, 8, 7]) # random
+
+ Generate a 2 by 4 array using broadcasting with dtype of uint8
+
+ >>> np.random.randint([1, 3, 5, 7], [[10], [20]], dtype=np.uint8)
+ array([[ 8, 6, 9, 7], # random
+ [ 1, 16, 9, 12]], dtype=uint8)
+ """
+ cdef bint use_masked=1
+
+ if high is None:
+ high = low
+ low = 0
+
+ key = np.dtype(dtype).name
+ if key not in _randint_types:
+ raise TypeError('Unsupported dtype "%s" for randint' % key)
+
+ if key == 'int32':
+ ret = _rand_int32(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'int64':
+ ret = _rand_int64(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'int16':
+ ret = _rand_int16(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'int8':
+ ret = _rand_int8(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'uint64':
+ ret = _rand_uint64(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'uint32':
+ ret = _rand_uint32(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'uint16':
+ ret = _rand_uint16(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'uint8':
+ ret = _rand_uint8(low, high, size, use_masked, self._brng, self.lock)
+ elif key == 'bool':
+ ret = _rand_bool(low, high, size, use_masked, self._brng, self.lock)
+
+ if size is None and dtype in (np.bool, np.int, np.long):
+ if np.array(ret).shape == ():
+ return dtype(ret)
+ return ret
+
+ def bytes(self, np.npy_intp length):
+ """
+ bytes(length)
+
+ Return random bytes.
+
+ Parameters
+ ----------
+ length : int
+ Number of random bytes.
+
+ Returns
+ -------
+ out : str
+ String of length `length`.
+
+ Examples
+ --------
+ >>> np.random.bytes(10)
+ ' eh\\x85\\x022SZ\\xbf\\xa4' #random
+
+ """
+ cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
+ return self.randint(0, 4294967296, size=n_uint32, dtype=np.uint32).tobytes()[:length]
+
+ @cython.wraparound(True)
+ def choice(self, a, size=None, replace=True, p=None):
+ """
+ choice(a, size=None, replace=True, p=None)
+
+ Generates a random sample from a given 1-D array
+
+ .. versionadded:: 1.7.0
+
+ Parameters
+ ----------
+ a : 1-D array-like or int
+ If an ndarray, a random sample is generated from its elements.
+ If an int, the random sample is generated as if a were np.arange(a)
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ replace : boolean, optional
+ Whether the sample is with or without replacement
+ p : 1-D array-like, optional
+ The probabilities associated with each entry in a.
+ If not given the sample assumes a uniform distribution over all
+ entries in a.
+
+ Returns
+ -------
+ samples : single item or ndarray
+ The generated random samples
+
+ Raises
+ ------
+ ValueError
+ If a is an int and less than zero, if a or p are not 1-dimensional,
+ if a is an array-like of size 0, if p is not a vector of
+ probabilities, if a and p have different lengths, or if
+ replace=False and the sample size is greater than the population
+ size
+
+ See Also
+ --------
+ randint, shuffle, permutation
+
+ Examples
+ --------
+ Generate a uniform random sample from np.arange(5) of size 3:
+
+ >>> np.random.choice(5, 3)
+ array([0, 3, 4]) # random
+ >>> #This is equivalent to np.random.randint(0,5,3)
+
+ Generate a non-uniform random sample from np.arange(5) of size 3:
+
+ >>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0])
+ array([3, 3, 0]) # random
+
+ Generate a uniform random sample from np.arange(5) of size 3 without
+ replacement:
+
+ >>> np.random.choice(5, 3, replace=False)
+ array([3,1,0]) # random
+ >>> #This is equivalent to np.random.permutation(np.arange(5))[:3]
+
+ Generate a non-uniform random sample from np.arange(5) of size
+ 3 without replacement:
+
+ >>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
+ array([2, 3, 0]) # random
+
+ Any of the above can be repeated with an arbitrary array-like
+ instead of just integers. For instance:
+
+ >>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
+ >>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
+ array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'], # random
+ dtype='<U11')
+
+ """
+
+ # Format and Verify input
+ a = np.array(a, copy=False)
+ if a.ndim == 0:
+ try:
+ # __index__ must return an integer by python rules.
+ pop_size = operator.index(a.item())
+ except TypeError:
+ raise ValueError("a must be 1-dimensional or an integer")
+ if pop_size <= 0 and np.prod(size) != 0:
+ raise ValueError("a must be greater than 0 unless no samples are taken")
+ elif a.ndim != 1:
+ raise ValueError("a must be 1-dimensional")
+ else:
+ pop_size = a.shape[0]
+ if pop_size is 0 and np.prod(size) != 0:
+ raise ValueError("'a' cannot be empty unless no samples are taken")
+
+ if p is not None:
+ d = len(p)
+
+ atol = np.sqrt(np.finfo(np.float64).eps)
+ if isinstance(p, np.ndarray):
+ if np.issubdtype(p.dtype, np.floating):
+ atol = max(atol, np.sqrt(np.finfo(p.dtype).eps))
+
+ p = <np.ndarray>np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ pix = <double*>np.PyArray_DATA(p)
+
+ if p.ndim != 1:
+ raise ValueError("'p' must be 1-dimensional")
+ if p.size != pop_size:
+ raise ValueError("'a' and 'p' must have same size")
+ p_sum = kahan_sum(pix, d)
+ if np.isnan(p_sum):
+ raise ValueError("probabilities contain NaN")
+ if np.logical_or.reduce(p < 0):
+ raise ValueError("probabilities are not non-negative")
+ if abs(p_sum - 1.) > atol:
+ raise ValueError("probabilities do not sum to 1")
+
+ shape = size
+ if shape is not None:
+ size = np.prod(shape, dtype=np.intp)
+ else:
+ size = 1
+
+ # Actual sampling
+ if replace:
+ if p is not None:
+ cdf = p.cumsum()
+ cdf /= cdf[-1]
+ uniform_samples = self.random_sample(shape)
+ idx = cdf.searchsorted(uniform_samples, side='right')
+ idx = np.array(idx, copy=False) # searchsorted returns a scalar
+ else:
+ idx = self.randint(0, pop_size, size=shape)
+ else:
+ if size > pop_size:
+ raise ValueError("Cannot take a larger sample than "
+ "population when 'replace=False'")
+ elif size < 0:
+ raise ValueError("negative dimensions are not allowed")
+
+ if p is not None:
+ if np.count_nonzero(p > 0) < size:
+ raise ValueError("Fewer non-zero entries in p than size")
+ n_uniq = 0
+ p = p.copy()
+ found = np.zeros(shape, dtype=np.int64)
+ flat_found = found.ravel()
+ while n_uniq < size:
+ x = self.rand(size - n_uniq)
+ if n_uniq > 0:
+ p[flat_found[0:n_uniq]] = 0
+ cdf = np.cumsum(p)
+ cdf /= cdf[-1]
+ new = cdf.searchsorted(x, side='right')
+ _, unique_indices = np.unique(new, return_index=True)
+ unique_indices.sort()
+ new = new.take(unique_indices)
+ flat_found[n_uniq:n_uniq + new.size] = new
+ n_uniq += new.size
+ idx = found
+ else:
+ idx = self.permutation(pop_size)[:size]
+ if shape is not None:
+ idx.shape = shape
+
+ if shape is None and isinstance(idx, np.ndarray):
+ # In most cases a scalar will have been made an array
+ idx = idx.item(0)
+
+ # Use samples as indices for a if a is array-like
+ if a.ndim == 0:
+ return idx
+
+ if shape is not None and idx.ndim == 0:
+ # If size == () then the user requested a 0-d array as opposed to
+ # a scalar object when size is None. However a[idx] is always a
+ # scalar and not an array. So this makes sure the result is an
+ # array, taking into account that np.array(item) may not work
+ # for object arrays.
+ res = np.empty((), dtype=a.dtype)
+ res[()] = a[idx]
+ return res
+
+ return a[idx]
+
+ def uniform(self, low=0.0, high=1.0, size=None):
+ """
+ uniform(low=0.0, high=1.0, size=None)
+
+ Draw samples from a uniform distribution.
+
+ Samples are uniformly distributed over the half-open interval
+ ``[low, high)`` (includes low, but excludes high). In other words,
+ any value within the given interval is equally likely to be drawn
+ by `uniform`.
+
+ Parameters
+ ----------
+ low : float or array_like of floats, optional
+ Lower boundary of the output interval. All values generated will be
+ greater than or equal to low. The default value is 0.
+ high : float or array_like of floats
+ Upper boundary of the output interval. All values generated will be
+ less than high. The default value is 1.0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``low`` and ``high`` are both scalars.
+ Otherwise, ``np.broadcast(low, high).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized uniform distribution.
+
+ See Also
+ --------
+ randint : Discrete uniform distribution, yielding integers.
+ random_integers : Discrete uniform distribution over the closed
+ interval ``[low, high]``.
+ random_sample : Floats uniformly distributed over ``[0, 1)``.
+ random : Alias for `random_sample`.
+ rand : Convenience function that accepts dimensions as input, e.g.,
+ ``rand(2,2)`` would generate a 2-by-2 array of floats,
+ uniformly distributed over ``[0, 1)``.
+
+ Notes
+ -----
+ The probability density function of the uniform distribution is
+
+ .. math:: p(x) = \\frac{1}{b - a}
+
+ anywhere within the interval ``[a, b)``, and zero elsewhere.
+
+ When ``high`` == ``low``, values of ``low`` will be returned.
+ If ``high`` < ``low``, the results are officially undefined
+ and may eventually raise an error, i.e. do not rely on this
+ function to behave when passed arguments satisfying that
+ inequality condition.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> s = np.random.uniform(-1,0,1000)
+
+ All values are within the given interval:
+
+ >>> np.all(s >= -1)
+ True
+ >>> np.all(s < 0)
+ True
+
+ Display the histogram of the samples, along with the
+ probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 15, density=True)
+ >>> plt.plot(bins, np.ones_like(bins), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ cdef bint is_scalar = True
+ cdef np.ndarray alow, ahigh, arange
+ cdef double _low, _high, range
+ cdef object temp
+
+ alow = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ ahigh = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_DOUBLE, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(alow) == np.PyArray_NDIM(ahigh) == 0:
+ _low = PyFloat_AsDouble(low)
+ _high = PyFloat_AsDouble(high)
+ range = _high - _low
+ if not np.isfinite(range):
+ raise OverflowError('Range exceeds valid bounds')
+
+ return cont(&random_uniform, self._brng, size, self.lock, 2,
+ _low, '', CONS_NONE,
+ range, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ temp = np.subtract(ahigh, alow)
+ Py_INCREF(temp)
+ # needed to get around Pyrex's automatic reference-counting
+ # rules because EnsureArray steals a reference
+ arange = <np.ndarray>np.PyArray_EnsureArray(temp)
+ if not np.all(np.isfinite(arange)):
+ raise OverflowError('Range exceeds valid bounds')
+ return cont(&random_uniform, self._brng, size, self.lock, 2,
+ alow, '', CONS_NONE,
+ arange, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def rand(self, *args):
+ """
+ rand(d0, d1, ..., dn)
+
+ Random values in a given shape.
+
+ .. note::
+ This is a convenience function for users porting code from Matlab,
+ and wraps `numpy.random.random_sample`. That function takes a
+ tuple to specify the size of the output, which is consistent with
+ other NumPy functions like `numpy.zeros` and `numpy.ones`.
+
+ Create an array of the given shape and populate it with
+ random samples from a uniform distribution
+ over ``[0, 1)``.
+
+ Parameters
+ ----------
+ d0, d1, ..., dn : int, optional
+ The dimensions of the returned array, must be non-negative.
+ If no argument is given a single Python float is returned.
+
+ Returns
+ -------
+ out : ndarray, shape ``(d0, d1, ..., dn)``
+ Random values.
+
+ See Also
+ --------
+ random
+
+ Examples
+ --------
+ >>> np.random.rand(3,2)
+ array([[ 0.14022471, 0.96360618], #random
+ [ 0.37601032, 0.25528411], #random
+ [ 0.49313049, 0.94909878]]) #random
+
+ """
+ if len(args) == 0:
+ return self.random_sample()
+ else:
+ return self.random_sample(size=args)
+
+ def randn(self, *args):
+ """
+ randn(d0, d1, ..., dn)
+
+ Return a sample (or samples) from the "standard normal" distribution.
+
+ .. note::
+ This is a convenience function for users porting code from Matlab,
+ and wraps `numpy.random.standard_normal`. That function takes a
+ tuple to specify the size of the output, which is consistent with
+ other NumPy functions like `numpy.zeros` and `numpy.ones`.
+
+ If positive int_like arguments are provided, `randn` generates an array
+ of shape ``(d0, d1, ..., dn)``, filled
+ with random floats sampled from a univariate "normal" (Gaussian)
+ distribution of mean 0 and variance 1. A single float randomly sampled
+ from the distribution is returned if no argument is provided.
+
+ Parameters
+ ----------
+ d0, d1, ..., dn : int, optional
+ The dimensions of the returned array, must be non-negative.
+ If no argument is given a single Python float is returned.
+
+ Returns
+ -------
+ Z : ndarray or float
+ A ``(d0, d1, ..., dn)``-shaped array of floating-point samples from
+ the standard normal distribution, or a single such float if
+ no parameters were supplied.
+
+ See Also
+ --------
+ standard_normal : Similar, but takes a tuple as its argument.
+ normal : Also accepts mu and sigma arguments.
+
+ Notes
+ -----
+ For random samples from :math:`N(\\mu, \\sigma^2)`, use:
+
+ ``sigma * np.random.randn(...) + mu``
+
+ Examples
+ --------
+ >>> np.random.randn()
+ 2.1923875335537315 # random
+
+ Two-by-four array of samples from N(3, 6.25):
+
+ >>> 3 + 2.5 * np.random.randn(2, 4)
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ if len(args) == 0:
+ return self.standard_normal()
+ else:
+ return self.standard_normal(size=args)
+
+ def random_integers(self, low, high=None, size=None):
+ """
+ random_integers(low, high=None, size=None)
+
+ Random integers of type np.int between `low` and `high`, inclusive.
+
+ Return random integers of type np.int from the "discrete uniform"
+ distribution in the closed interval [`low`, `high`]. If `high` is
+ None (the default), then results are from [1, `low`]. The np.int
+ type translates to the C long integer type and its precision
+ is platform dependent.
+
+ This function has been deprecated. Use randint instead.
+
+ .. deprecated:: 1.11.0
+
+ Parameters
+ ----------
+ low : int
+ Lowest (signed) integer to be drawn from the distribution (unless
+ ``high=None``, in which case this parameter is the *highest* such
+ integer).
+ high : int, optional
+ If provided, the largest (signed) integer to be drawn from the
+ distribution (see above for behavior if ``high=None``).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : int or ndarray of ints
+ `size`-shaped array of random integers from the appropriate
+ distribution, or a single such random int if `size` not provided.
+
+ See Also
+ --------
+ randint : Similar to `random_integers`, only for the half-open
+ interval [`low`, `high`), and 0 is the lowest value if `high` is
+ omitted.
+
+ Notes
+ -----
+ To sample from N evenly spaced floating-point numbers between a and b,
+ use::
+
+ a + (b - a) * (np.random.random_integers(N) - 1) / (N - 1.)
+
+ Examples
+ --------
+ >>> np.random.random_integers(5)
+ 4 # random
+ >>> type(np.random.random_integers(5))
+ <class 'numpy.int64'>
+ >>> np.random.random_integers(5, size=(3,2))
+ array([[5, 4], # random
+ [3, 3],
+ [4, 5]])
+
+ Choose five random numbers from the set of five evenly-spaced
+ numbers between 0 and 2.5, inclusive (*i.e.*, from the set
+ :math:`{0, 5/8, 10/8, 15/8, 20/8}`):
+
+ >>> 2.5 * (np.random.random_integers(5, size=(5,)) - 1) / 4.
+ array([ 0.625, 1.25 , 0.625, 0.625, 2.5 ]) # random
+
+ Roll two six sided dice 1000 times and sum the results:
+
+ >>> d1 = np.random.random_integers(1, 6, 1000)
+ >>> d2 = np.random.random_integers(1, 6, 1000)
+ >>> dsums = d1 + d2
+
+ Display results as a histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(dsums, 11, density=True)
+ >>> plt.show()
+
+ """
+ if high is None:
+ warnings.warn(("This function is deprecated. Please call "
+ "randint(1, {low} + 1) instead".format(low=low)),
+ DeprecationWarning)
+ high = low
+ low = 1
+
+ else:
+ warnings.warn(("This function is deprecated. Please call "
+ "randint({low}, {high} + 1) "
+ "instead".format(low=low, high=high)),
+ DeprecationWarning)
+
+ return self.randint(low, high + 1, size=size, dtype='l')
+
+ # Complicated, continuous distributions:
+ def standard_normal(self, size=None):
+ """
+ standard_normal(size=None)
+
+ Draw samples from a standard Normal distribution (mean=0, stdev=1).
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : float or ndarray
+ A floating-point array of shape ``size`` of drawn samples, or a
+ single sample if ``size`` was not specified.
+
+ Notes
+ -----
+ For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
+
+ mu + sigma * np.random.standard_normal(size=...)
+ np.random.normal(mu, sigma, size=...)
+
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+
+ Examples
+ --------
+ >>> np.random.standard_normal()
+ 2.1923875335537315 #random
+
+ >>> s = np.random.standard_normal(8000)
+ >>> s
+ array([ 0.6888893 , 0.78096262, -0.89086505, ..., 0.49876311, # random
+ -0.38672696, -0.4685006 ]) # random
+ >>> s.shape
+ (8000,)
+ >>> s = np.random.standard_normal(size=(3, 4, 2))
+ >>> s.shape
+ (3, 4, 2)
+
+ Two-by-four array of samples from :math:`N(3, 6.25)`:
+
+ >>> 3 + 2.5 * np.random.standard_normal(size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ return cont(&legacy_gauss, self._aug_state, size, self.lock, 0,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None, None, CONS_NONE,
+ None)
+
+ def normal(self, loc=0.0, scale=1.0, size=None):
+ """
+ normal(loc=0.0, scale=1.0, size=None)
+
+ Draw random samples from a normal (Gaussian) distribution.
+
+ The probability density function of the normal distribution, first
+ derived by De Moivre and 200 years later by both Gauss and Laplace
+ independently [2]_, is often called the bell curve because of
+ its characteristic shape (see the example below).
+
+ The normal distributions occurs often in nature. For example, it
+ describes the commonly occurring distribution of samples influenced
+ by a large number of tiny, random disturbances, each with its own
+ unique distribution [2]_.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats
+ Mean ("centre") of the distribution.
+ scale : float or array_like of floats
+ Standard deviation (spread or "width") of the distribution. Must be
+ non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized normal distribution.
+
+ See Also
+ --------
+ scipy.stats.norm : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gaussian distribution is
+
+ .. math:: p(x) = \\frac{1}{\\sqrt{ 2 \\pi \\sigma^2 }}
+ e^{ - \\frac{ (x - \\mu)^2 } {2 \\sigma^2} },
+
+ where :math:`\\mu` is the mean and :math:`\\sigma` the standard
+ deviation. The square of the standard deviation, :math:`\\sigma^2`,
+ is called the variance.
+
+ The function has its peak at the mean, and its "spread" increases with
+ the standard deviation (the function reaches 0.607 times its maximum at
+ :math:`x + \\sigma` and :math:`x - \\sigma` [2]_). This implies that
+ `numpy.random.normal` is more likely to return samples lying close to
+ the mean, rather than those far away.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Normal distribution",
+ https://en.wikipedia.org/wiki/Normal_distribution
+ .. [2] P. R. Peebles Jr., "Central Limit Theorem" in "Probability,
+ Random Variables and Random Signal Principles", 4th ed., 2001,
+ pp. 51, 51, 125.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, sigma = 0, 0.1 # mean and standard deviation
+ >>> s = np.random.normal(mu, sigma, 1000)
+
+ Verify the mean and the variance:
+
+ >>> abs(mu - np.mean(s))
+ 0.0 # may vary
+
+ >>> abs(sigma - np.std(s, ddof=1))
+ 0.1 # may vary
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> plt.plot(bins, 1/(sigma * np.sqrt(2 * np.pi)) *
+ ... np.exp( - (bins - mu)**2 / (2 * sigma**2) ),
+ ... linewidth=2, color='r')
+ >>> plt.show()
+
+ Two-by-four array of samples from N(3, 6.25):
+
+ >>> np.random.normal(3, 2.5, size=(2, 4))
+ array([[-4.49401501, 4.00950034, -1.81814867, 7.29718677], # random
+ [ 0.39924804, 4.68456316, 4.99394529, 4.84057254]]) # random
+
+ """
+ return cont(&legacy_normal, self._aug_state, size, self.lock, 2,
+ loc, '', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def standard_gamma(self, shape, size=None):
+ """
+ standard_gamma(shape, size=None)
+
+ Draw samples from a standard Gamma distribution.
+
+ Samples are drawn from a Gamma distribution with specified parameters,
+ shape (sometimes designated "k") and scale=1.
+
+ Parameters
+ ----------
+ shape : float or array_like of floats
+ Parameter, must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``shape`` is a scalar. Otherwise,
+ ``np.array(shape).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized standard gamma distribution.
+
+ See Also
+ --------
+ scipy.stats.gamma : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gamma distribution is
+
+ .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
+
+ where :math:`k` is the shape and :math:`\\theta` the scale,
+ and :math:`\\Gamma` is the Gamma function.
+
+ The Gamma distribution is often used to model the times to failure of
+ electronic components, and arises naturally in processes for which the
+ waiting times between Poisson distributed events are relevant.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/GammaDistribution.html
+ .. [2] Wikipedia, "Gamma distribution",
+ https://en.wikipedia.org/wiki/Gamma_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> shape, scale = 2., 1. # mean and width
+ >>> s = np.random.standard_gamma(shape, 1000000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> import scipy.special as sps
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
+ >>> y = bins**(shape-1) * ((np.exp(-bins/scale))/ \\
+ ... (sps.gamma(shape) * scale**shape))
+ >>> plt.plot(bins, y, linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return cont(&legacy_standard_gamma, self._aug_state, size, self.lock, 1,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE,
+ None)
+
+ def gamma(self, shape, scale=1.0, size=None):
+ """
+ gamma(shape, scale=1.0, size=None)
+
+ Draw samples from a Gamma distribution.
+
+ Samples are drawn from a Gamma distribution with specified parameters,
+ `shape` (sometimes designated "k") and `scale` (sometimes designated
+ "theta"), where both parameters are > 0.
+
+ Parameters
+ ----------
+ shape : float or array_like of floats
+ The shape of the gamma distribution. Must be non-negative.
+ scale : float or array_like of floats, optional
+ The scale of the gamma distribution. Must be non-negative.
+ Default is equal to 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``shape`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(shape, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized gamma distribution.
+
+ See Also
+ --------
+ scipy.stats.gamma : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Gamma distribution is
+
+ .. math:: p(x) = x^{k-1}\\frac{e^{-x/\\theta}}{\\theta^k\\Gamma(k)},
+
+ where :math:`k` is the shape and :math:`\\theta` the scale,
+ and :math:`\\Gamma` is the Gamma function.
+
+ The Gamma distribution is often used to model the times to failure of
+ electronic components, and arises naturally in processes for which the
+ waiting times between Poisson distributed events are relevant.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Gamma Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/GammaDistribution.html
+ .. [2] Wikipedia, "Gamma distribution",
+ https://en.wikipedia.org/wiki/Gamma_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> shape, scale = 2., 2. # mean=4, std=2*sqrt(2)
+ >>> s = np.random.gamma(shape, scale, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> import scipy.special as sps
+ >>> count, bins, ignored = plt.hist(s, 50, density=True)
+ >>> y = bins**(shape-1)*(np.exp(-bins/scale) /
+ ... (sps.gamma(shape)*scale**shape))
+ >>> plt.plot(bins, y, linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return cont(&legacy_gamma, self._aug_state, size, self.lock, 2,
+ shape, 'shape', CONS_NON_NEGATIVE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def f(self, dfnum, dfden, size=None):
+ """
+ f(dfnum, dfden, size=None)
+
+ Draw samples from an F distribution.
+
+ Samples are drawn from an F distribution with specified parameters,
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters must be greater than
+ zero.
+
+ The random variate of the F distribution (also known as the
+ Fisher distribution) is a continuous probability distribution
+ that arises in ANOVA tests, and is the ratio of two chi-square
+ variates.
+
+ Parameters
+ ----------
+ dfnum : float or array_like of floats
+ Degrees of freedom in numerator, must be > 0.
+ dfden : float or array_like of float
+ Degrees of freedom in denominator, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``dfnum`` and ``dfden`` are both scalars.
+ Otherwise, ``np.broadcast(dfnum, dfden).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Fisher distribution.
+
+ See Also
+ --------
+ scipy.stats.f : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The F statistic is used to compare in-group variances to between-group
+ variances. Calculating the distribution depends on the sampling, and
+ so it is a function of the respective degrees of freedom in the
+ problem. The variable `dfnum` is the number of samples minus one, the
+ between-groups degrees of freedom, while `dfden` is the within-groups
+ degrees of freedom, the sum of the number of samples in each group
+ minus the number of groups.
+
+ References
+ ----------
+ .. [1] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
+ Fifth Edition, 2002.
+ .. [2] Wikipedia, "F-distribution",
+ https://en.wikipedia.org/wiki/F-distribution
+
+ Examples
+ --------
+ An example from Glantz[1], pp 47-40:
+
+ Two groups, children of diabetics (25 people) and children from people
+ without diabetes (25 controls). Fasting blood glucose was measured,
+ case group had a mean value of 86.1, controls had a mean value of
+ 82.2. Standard deviations were 2.09 and 2.49 respectively. Are these
+ data consistent with the null hypothesis that the parents diabetic
+ status does not affect their children's blood glucose levels?
+ Calculating the F statistic from the data gives a value of 36.01.
+
+ Draw samples from the distribution:
+
+ >>> dfnum = 1. # between group degrees of freedom
+ >>> dfden = 48. # within groups degrees of freedom
+ >>> s = np.random.f(dfnum, dfden, 1000)
+
+ The lower bound for the top 1% of the samples is :
+
+ >>> np.sort(s)[-10]
+ 7.61988120985 # random
+
+ So there is about a 1% chance that the F statistic will exceed 7.62,
+ the measured value is 36, so the null hypothesis is rejected at the 1%
+ level.
+
+ """
+ return cont(&legacy_f, self._aug_state, size, self.lock, 2,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def noncentral_f(self, dfnum, dfden, nonc, size=None):
+ """
+ noncentral_f(dfnum, dfden, nonc, size=None)
+
+ Draw samples from the noncentral F distribution.
+
+ Samples are drawn from an F distribution with specified parameters,
+ `dfnum` (degrees of freedom in numerator) and `dfden` (degrees of
+ freedom in denominator), where both parameters > 1.
+ `nonc` is the non-centrality parameter.
+
+ Parameters
+ ----------
+ dfnum : float or array_like of floats
+ Numerator degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.14.0
+ Earlier NumPy versions required dfnum > 1.
+ dfden : float or array_like of floats
+ Denominator degrees of freedom, must be > 0.
+ nonc : float or array_like of floats
+ Non-centrality parameter, the sum of the squares of the numerator
+ means, must be >= 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``dfnum``, ``dfden``, and ``nonc``
+ are all scalars. Otherwise, ``np.broadcast(dfnum, dfden, nonc).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized noncentral Fisher distribution.
+
+ Notes
+ -----
+ When calculating the power of an experiment (power = probability of
+ rejecting the null hypothesis when a specific alternative is true) the
+ non-central F statistic becomes important. When the null hypothesis is
+ true, the F statistic follows a central F distribution. When the null
+ hypothesis is not true, then it follows a non-central F statistic.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Noncentral F-Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NoncentralF-Distribution.html
+ .. [2] Wikipedia, "Noncentral F-distribution",
+ https://en.wikipedia.org/wiki/Noncentral_F-distribution
+
+ Examples
+ --------
+ In a study, testing for a specific alternative to the null hypothesis
+ requires use of the Noncentral F distribution. We need to calculate the
+ area in the tail of the distribution that exceeds the value of the F
+ distribution for the null hypothesis. We'll plot the two probability
+ distributions for comparison.
+
+ >>> dfnum = 3 # between group deg of freedom
+ >>> dfden = 20 # within groups degrees of freedom
+ >>> nonc = 3.0
+ >>> nc_vals = np.random.noncentral_f(dfnum, dfden, nonc, 1000000)
+ >>> NF = np.histogram(nc_vals, bins=50, density=True)
+ >>> c_vals = np.random.f(dfnum, dfden, 1000000)
+ >>> F = np.histogram(c_vals, bins=50, density=True)
+ >>> import matplotlib.pyplot as plt
+ >>> plt.plot(F[1][1:], F[0])
+ >>> plt.plot(NF[1][1:], NF[0])
+ >>> plt.show()
+
+ """
+ return cont(&legacy_noncentral_f, self._aug_state, size, self.lock, 3,
+ dfnum, 'dfnum', CONS_POSITIVE,
+ dfden, 'dfden', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE, None)
+
+ def chisquare(self, df, size=None):
+ """
+ chisquare(df, size=None)
+
+ Draw samples from a chi-square distribution.
+
+ When `df` independent random variables, each with standard normal
+ distributions (mean 0, variance 1), are squared and summed, the
+ resulting distribution is chi-square (see Notes). This distribution
+ is often used in hypothesis testing.
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Number of degrees of freedom, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` is a scalar. Otherwise,
+ ``np.array(df).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized chi-square distribution.
+
+ Raises
+ ------
+ ValueError
+ When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
+ is given.
+
+ Notes
+ -----
+ The variable obtained by summing the squares of `df` independent,
+ standard normally distributed random variables:
+
+ .. math:: Q = \\sum_{i=0}^{\\mathtt{df}} X^2_i
+
+ is chi-square distributed, denoted
+
+ .. math:: Q \\sim \\chi^2_k.
+
+ The probability density function of the chi-squared distribution is
+
+ .. math:: p(x) = \\frac{(1/2)^{k/2}}{\\Gamma(k/2)}
+ x^{k/2 - 1} e^{-x/2},
+
+ where :math:`\\Gamma` is the gamma function,
+
+ .. math:: \\Gamma(x) = \\int_0^{-\\infty} t^{x - 1} e^{-t} dt.
+
+ References
+ ----------
+ .. [1] NIST "Engineering Statistics Handbook"
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3666.htm
+
+ Examples
+ --------
+ >>> np.random.chisquare(2,4)
+ array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
+
+ """
+ return cont(&legacy_chisquare, self._aug_state, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def noncentral_chisquare(self, df, nonc, size=None):
+ """
+ noncentral_chisquare(df, nonc, size=None)
+
+ Draw samples from a noncentral chi-square distribution.
+
+ The noncentral :math:`\\chi^2` distribution is a generalization of
+ the :math:`\\chi^2` distribution.
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+
+ .. versionchanged:: 1.10.0
+ Earlier NumPy versions required dfnum > 1.
+ nonc : float or array_like of floats
+ Non-centrality, must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` and ``nonc`` are both scalars.
+ Otherwise, ``np.broadcast(df, nonc).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized noncentral chi-square distribution.
+
+ Notes
+ -----
+ The probability density function for the noncentral Chi-square
+ distribution is
+
+ .. math:: P(x;df,nonc) = \\sum^{\\infty}_{i=0}
+ \\frac{e^{-nonc/2}(nonc/2)^{i}}{i!}
+ P_{Y_{df+2i}}(x),
+
+ where :math:`Y_{q}` is the Chi-square with q degrees of freedom.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Noncentral chi-squared distribution"
+ https://en.wikipedia.org/wiki/Noncentral_chi-squared_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram
+
+ >>> import matplotlib.pyplot as plt
+ >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
+ ... bins=200, density=True)
+ >>> plt.show()
+
+ Draw values from a noncentral chisquare with very small noncentrality,
+ and compare to a chisquare.
+
+ >>> plt.figure()
+ >>> values = plt.hist(np.random.noncentral_chisquare(3, .0000001, 100000),
+ ... bins=np.arange(0., 25, .1), density=True)
+ >>> values2 = plt.hist(np.random.chisquare(3, 100000),
+ ... bins=np.arange(0., 25, .1), density=True)
+ >>> plt.plot(values[1][0:-1], values[0]-values2[0], 'ob')
+ >>> plt.show()
+
+ Demonstrate how large values of non-centrality lead to a more symmetric
+ distribution.
+
+ >>> plt.figure()
+ >>> values = plt.hist(np.random.noncentral_chisquare(3, 20, 100000),
+ ... bins=200, density=True)
+ >>> plt.show()
+
+ """
+ return cont(&legacy_noncentral_chisquare, self._aug_state, size, self.lock, 2,
+ df, 'df', CONS_POSITIVE,
+ nonc, 'nonc', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def standard_cauchy(self, size=None):
+ """
+ standard_cauchy(size=None)
+
+ Draw samples from a standard Cauchy distribution with mode = 0.
+
+ Also known as the Lorentz distribution.
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ samples : ndarray or scalar
+ The drawn samples.
+
+ Notes
+ -----
+ The probability density function for the full Cauchy distribution is
+
+ .. math:: P(x; x_0, \\gamma) = \\frac{1}{\\pi \\gamma \\bigl[ 1+
+ (\\frac{x-x_0}{\\gamma})^2 \\bigr] }
+
+ and the Standard Cauchy distribution just sets :math:`x_0=0` and
+ :math:`\\gamma=1`
+
+ The Cauchy distribution arises in the solution to the driven harmonic
+ oscillator problem, and also describes spectral line broadening. It
+ also describes the distribution of values at which a line tilted at
+ a random angle will cut the x axis.
+
+ When studying hypothesis tests that assume normality, seeing how the
+ tests perform on data from a Cauchy distribution is a good indicator of
+ their sensitivity to a heavy-tailed distribution, since the Cauchy looks
+ very much like a Gaussian distribution, but with heavier tails.
+
+ References
+ ----------
+ .. [1] NIST/SEMATECH e-Handbook of Statistical Methods, "Cauchy
+ Distribution",
+ https://www.itl.nist.gov/div898/handbook/eda/section3/eda3663.htm
+ .. [2] Weisstein, Eric W. "Cauchy Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/CauchyDistribution.html
+ .. [3] Wikipedia, "Cauchy distribution"
+ https://en.wikipedia.org/wiki/Cauchy_distribution
+
+ Examples
+ --------
+ Draw samples and plot the distribution:
+
+ >>> import matplotlib.pyplot as plt
+ >>> s = np.random.standard_cauchy(1000000)
+ >>> s = s[(s>-25) & (s<25)] # truncate distribution so it plots well
+ >>> plt.hist(s, bins=100)
+ >>> plt.show()
+
+ """
+ return cont(&legacy_standard_cauchy, self._aug_state, size, self.lock, 0,
+ 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, 0.0, '', CONS_NONE, None)
+
+ def standard_t(self, df, size=None):
+ """
+ standard_t(df, size=None)
+
+ Draw samples from a standard Student's t distribution with `df` degrees
+ of freedom.
+
+ A special case of the hyperbolic distribution. As `df` gets
+ large, the result resembles that of the standard normal
+ distribution (`standard_normal`).
+
+ Parameters
+ ----------
+ df : float or array_like of floats
+ Degrees of freedom, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``df`` is a scalar. Otherwise,
+ ``np.array(df).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized standard Student's t distribution.
+
+ Notes
+ -----
+ The probability density function for the t distribution is
+
+ .. math:: P(x, df) = \\frac{\\Gamma(\\frac{df+1}{2})}{\\sqrt{\\pi df}
+ \\Gamma(\\frac{df}{2})}\\Bigl( 1+\\frac{x^2}{df} \\Bigr)^{-(df+1)/2}
+
+ The t test is based on an assumption that the data come from a
+ Normal distribution. The t test provides a way to test whether
+ the sample mean (that is the mean calculated from the data) is
+ a good estimate of the true mean.
+
+ The derivation of the t-distribution was first published in
+ 1908 by William Gosset while working for the Guinness Brewery
+ in Dublin. Due to proprietary issues, he had to publish under
+ a pseudonym, and so he used the name Student.
+
+ References
+ ----------
+ .. [1] Dalgaard, Peter, "Introductory Statistics With R",
+ Springer, 2002.
+ .. [2] Wikipedia, "Student's t-distribution"
+ https://en.wikipedia.org/wiki/Student's_t-distribution
+
+ Examples
+ --------
+ From Dalgaard page 83 [1]_, suppose the daily energy intake for 11
+ women in kilojoules (kJ) is:
+
+ >>> intake = np.array([5260., 5470, 5640, 6180, 6390, 6515, 6805, 7515, \\
+ ... 7515, 8230, 8770])
+
+ Does their energy intake deviate systematically from the recommended
+ value of 7725 kJ?
+
+ We have 10 degrees of freedom, so is the sample mean within 95% of the
+ recommended value?
+
+ >>> s = np.random.standard_t(10, size=100000)
+ >>> np.mean(intake)
+ 6753.636363636364
+ >>> intake.std(ddof=1)
+ 1142.1232221373727
+
+ Calculate the t statistic, setting the ddof parameter to the unbiased
+ value so the divisor in the standard deviation will be degrees of
+ freedom, N-1.
+
+ >>> t = (np.mean(intake)-7725)/(intake.std(ddof=1)/np.sqrt(len(intake)))
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(s, bins=100, density=True)
+
+ For a one-sided t-test, how far out in the distribution does the t
+ statistic appear?
+
+ >>> np.sum(s<t) / float(len(s))
+ 0.0090699999999999999 #random
+
+ So the p-value is about 0.009, which says the null hypothesis has a
+ probability of about 99% of being true.
+
+ """
+ return cont(&legacy_standard_t, self._aug_state, size, self.lock, 1,
+ df, 'df', CONS_POSITIVE,
+ 0, '', CONS_NONE,
+ 0, '', CONS_NONE,
+ None)
+
+ def vonmises(self, mu, kappa, size=None):
+ """
+ vonmises(mu, kappa, size=None)
+
+ Draw samples from a von Mises distribution.
+
+ Samples are drawn from a von Mises distribution with specified mode
+ (mu) and dispersion (kappa), on the interval [-pi, pi].
+
+ The von Mises distribution (also known as the circular normal
+ distribution) is a continuous probability distribution on the unit
+ circle. It may be thought of as the circular analogue of the normal
+ distribution.
+
+ Parameters
+ ----------
+ mu : float or array_like of floats
+ Mode ("center") of the distribution.
+ kappa : float or array_like of floats
+ Dispersion of the distribution, has to be >=0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mu`` and ``kappa`` are both scalars.
+ Otherwise, ``np.broadcast(mu, kappa).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized von Mises distribution.
+
+ See Also
+ --------
+ scipy.stats.vonmises : probability density function, distribution, or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the von Mises distribution is
+
+ .. math:: p(x) = \\frac{e^{\\kappa cos(x-\\mu)}}{2\\pi I_0(\\kappa)},
+
+ where :math:`\\mu` is the mode and :math:`\\kappa` the dispersion,
+ and :math:`I_0(\\kappa)` is the modified Bessel function of order 0.
+
+ The von Mises is named for Richard Edler von Mises, who was born in
+ Austria-Hungary, in what is now the Ukraine. He fled to the United
+ States in 1939 and became a professor at Harvard. He worked in
+ probability theory, aerodynamics, fluid mechanics, and philosophy of
+ science.
+
+ References
+ ----------
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] von Mises, R., "Mathematical Theory of Probability
+ and Statistics", New York: Academic Press, 1964.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, kappa = 0.0, 4.0 # mean and dispersion
+ >>> s = np.random.vonmises(mu, kappa, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy.special import i0
+ >>> plt.hist(s, 50, density=True)
+ >>> x = np.linspace(-np.pi, np.pi, num=51)
+ >>> y = np.exp(kappa*np.cos(x-mu))/(2*np.pi*i0(kappa))
+ >>> plt.plot(x, y, linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return cont(&random_vonmises, self._brng, size, self.lock, 2,
+ mu, 'mu', CONS_NONE,
+ kappa, 'kappa', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def pareto(self, a, size=None):
+ """
+ pareto(a, size=None)
+
+ Draw samples from a Pareto II or Lomax distribution with
+ specified shape.
+
+ The Lomax or Pareto II distribution is a shifted Pareto
+ distribution. The classical Pareto distribution can be
+ obtained from the Lomax distribution by adding 1 and
+ multiplying by the scale parameter ``m`` (see Notes). The
+ smallest value of the Lomax distribution is zero while for the
+ classical Pareto distribution it is ``mu``, where the standard
+ Pareto distribution has location ``mu = 1``. Lomax can also
+ be considered as a simplified version of the Generalized
+ Pareto distribution (available in SciPy), with the scale set
+ to one and the location set to zero.
+
+ The Pareto distribution must be greater than zero, and is
+ unbounded above. It is also known as the "80-20 rule". In
+ this distribution, 80 percent of the weights are in the lowest
+ 20 percent of the range, while the other 20 percent fill the
+ remaining 80 percent of the range.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Shape of the distribution. Must be positive.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Pareto distribution.
+
+ See Also
+ --------
+ scipy.stats.lomax : probability density function, distribution or
+ cumulative density function, etc.
+ scipy.stats.genpareto : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Pareto distribution is
+
+ .. math:: p(x) = \\frac{am^a}{x^{a+1}}
+
+ where :math:`a` is the shape and :math:`m` the scale.
+
+ The Pareto distribution, named after the Italian economist
+ Vilfredo Pareto, is a power law probability distribution
+ useful in many real world problems. Outside the field of
+ economics it is generally referred to as the Bradford
+ distribution. Pareto developed the distribution to describe
+ the distribution of wealth in an economy. It has also found
+ use in insurance, web page access statistics, oil field sizes,
+ and many other problems, including the download frequency for
+ projects in Sourceforge [1]_. It is one of the so-called
+ "fat-tailed" distributions.
+
+
+ References
+ ----------
+ .. [1] Francis Hunt and Paul Johnson, On the Pareto Distribution of
+ Sourceforge projects.
+ .. [2] Pareto, V. (1896). Course of Political Economy. Lausanne.
+ .. [3] Reiss, R.D., Thomas, M.(2001), Statistical Analysis of Extreme
+ Values, Birkhauser Verlag, Basel, pp 23-30.
+ .. [4] Wikipedia, "Pareto distribution",
+ https://en.wikipedia.org/wiki/Pareto_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a, m = 3., 2. # shape and mode
+ >>> s = (np.random.pareto(a, 1000) + 1) * m
+
+ Display the histogram of the samples, along with the probability
+ density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, _ = plt.hist(s, 100, density=True)
+ >>> fit = a*m**a / bins**(a+1)
+ >>> plt.plot(bins, max(count)*fit/max(fit), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return cont(&legacy_pareto, self._aug_state, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def weibull(self, a, size=None):
+ """
+ weibull(a, size=None)
+
+ Draw samples from a Weibull distribution.
+
+ Draw samples from a 1-parameter Weibull distribution with the given
+ shape parameter `a`.
+
+ .. math:: X = (-ln(U))^{1/a}
+
+ Here, U is drawn from the uniform distribution over (0,1].
+
+ The more common 2-parameter Weibull, including a scale parameter
+ :math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Shape parameter of the distribution. Must be nonnegative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Weibull distribution.
+
+ See Also
+ --------
+ scipy.stats.weibull_max
+ scipy.stats.weibull_min
+ scipy.stats.genextreme
+ gumbel
+
+ Notes
+ -----
+ The Weibull (or Type III asymptotic extreme value distribution
+ for smallest values, SEV Type III, or Rosin-Rammler
+ distribution) is one of a class of Generalized Extreme Value
+ (GEV) distributions used in modeling extreme value problems.
+ This class includes the Gumbel and Frechet distributions.
+
+ The probability density for the Weibull distribution is
+
+ .. math:: p(x) = \\frac{a}
+ {\\lambda}(\\frac{x}{\\lambda})^{a-1}e^{-(x/\\lambda)^a},
+
+ where :math:`a` is the shape and :math:`\\lambda` the scale.
+
+ The function has its peak (the mode) at
+ :math:`\\lambda(\\frac{a-1}{a})^{1/a}`.
+
+ When ``a = 1``, the Weibull distribution reduces to the exponential
+ distribution.
+
+ References
+ ----------
+ .. [1] Waloddi Weibull, Royal Technical University, Stockholm,
+ 1939 "A Statistical Theory Of The Strength Of Materials",
+ Ingeniorsvetenskapsakademiens Handlingar Nr 151, 1939,
+ Generalstabens Litografiska Anstalts Forlag, Stockholm.
+ .. [2] Waloddi Weibull, "A Statistical Distribution Function of
+ Wide Applicability", Journal Of Applied Mechanics ASME Paper
+ 1951.
+ .. [3] Wikipedia, "Weibull distribution",
+ https://en.wikipedia.org/wiki/Weibull_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = 5. # shape
+ >>> s = np.random.weibull(a, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> x = np.arange(1,100.)/50.
+ >>> def weib(x,n,a):
+ ... return (a / n) * (x / n)**(a - 1) * np.exp(-(x / n)**a)
+
+ >>> count, bins, ignored = plt.hist(np.random.weibull(5.,1000))
+ >>> x = np.arange(1,100.)/50.
+ >>> scale = count.max()/weib(x, 1., 5.).max()
+ >>> plt.plot(x, weib(x, 1., 5.)*scale)
+ >>> plt.show()
+
+ """
+ return cont(&legacy_weibull, self._aug_state, size, self.lock, 1,
+ a, 'a', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def power(self, a, size=None):
+ """
+ power(a, size=None)
+
+ Draws samples in [0, 1] from a power distribution with positive
+ exponent a - 1.
+
+ Also known as the power function distribution.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Parameter of the distribution. Must be non-negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized power distribution.
+
+ Raises
+ ------
+ ValueError
+ If a < 1.
+
+ Notes
+ -----
+ The probability density function is
+
+ .. math:: P(x; a) = ax^{a-1}, 0 \\le x \\le 1, a>0.
+
+ The power function distribution is just the inverse of the Pareto
+ distribution. It may also be seen as a special case of the Beta
+ distribution.
+
+ It is used, for example, in modeling the over-reporting of insurance
+ claims.
+
+ References
+ ----------
+ .. [1] Christian Kleiber, Samuel Kotz, "Statistical size distributions
+ in economics and actuarial sciences", Wiley, 2003.
+ .. [2] Heckert, N. A. and Filliben, James J. "NIST Handbook 148:
+ Dataplot Reference Manual, Volume 2: Let Subcommands and Library
+ Functions", National Institute of Standards and Technology
+ Handbook Series, June 2003.
+ https://www.itl.nist.gov/div898/software/dataplot/refman2/auxillar/powpdf.pdf
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = 5. # shape
+ >>> samples = 1000
+ >>> s = np.random.power(a, samples)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, bins=30)
+ >>> x = np.linspace(0, 1, 100)
+ >>> y = a*x**(a-1.)
+ >>> normed_y = samples*np.diff(bins)[0]*y
+ >>> plt.plot(x, normed_y)
+ >>> plt.show()
+
+ Compare the power function distribution to the inverse of the Pareto.
+
+ >>> from scipy import stats
+ >>> rvs = np.random.power(5, 1000000)
+ >>> rvsp = np.random.pareto(5, 1000000)
+ >>> xx = np.linspace(0,1,100)
+ >>> powpdf = stats.powerlaw.pdf(xx,5)
+
+ >>> plt.figure()
+ >>> plt.hist(rvs, bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-')
+ >>> plt.title('np.random.power(5)')
+
+ >>> plt.figure()
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-')
+ >>> plt.title('inverse of 1 + np.random.pareto(5)')
+
+ >>> plt.figure()
+ >>> plt.hist(1./(1.+rvsp), bins=50, density=True)
+ >>> plt.plot(xx,powpdf,'r-')
+ >>> plt.title('inverse of stats.pareto(5)')
+
+ """
+ return cont(&legacy_power, self._aug_state, size, self.lock, 1,
+ a, 'a', CONS_POSITIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def laplace(self, loc=0.0, scale=1.0, size=None):
+ """
+ laplace(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from the Laplace or double exponential distribution with
+ specified location (or mean) and scale (decay).
+
+ The Laplace distribution is similar to the Gaussian/normal distribution,
+ but is sharper at the peak and has fatter tails. It represents the
+ difference between two independent, identically distributed exponential
+ random variables.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ The position, :math:`\\mu`, of the distribution peak. Default is 0.
+ scale : float or array_like of floats, optional
+ :math:`\\lambda`, the exponential decay. Default is 1. Must be non-
+ negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Laplace distribution.
+
+ Notes
+ -----
+ It has the probability density function
+
+ .. math:: f(x; \\mu, \\lambda) = \\frac{1}{2\\lambda}
+ \\exp\\left(-\\frac{|x - \\mu|}{\\lambda}\\right).
+
+ The first law of Laplace, from 1774, states that the frequency
+ of an error can be expressed as an exponential function of the
+ absolute magnitude of the error, which leads to the Laplace
+ distribution. For many problems in economics and health
+ sciences, this distribution seems to model the data better
+ than the standard Gaussian distribution.
+
+ References
+ ----------
+ .. [1] Abramowitz, M. and Stegun, I. A. (Eds.). "Handbook of
+ Mathematical Functions with Formulas, Graphs, and Mathematical
+ Tables, 9th printing," New York: Dover, 1972.
+ .. [2] Kotz, Samuel, et. al. "The Laplace Distribution and
+ Generalizations, " Birkhauser, 2001.
+ .. [3] Weisstein, Eric W. "Laplace Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/LaplaceDistribution.html
+ .. [4] Wikipedia, "Laplace distribution",
+ https://en.wikipedia.org/wiki/Laplace_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution
+
+ >>> loc, scale = 0., 1.
+ >>> s = np.random.laplace(loc, scale, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> x = np.arange(-8., 8., .01)
+ >>> pdf = np.exp(-abs(x-loc)/scale)/(2.*scale)
+ >>> plt.plot(x, pdf)
+
+ Plot Gaussian for comparison:
+
+ >>> g = (1/(scale * np.sqrt(2 * np.pi)) *
+ ... np.exp(-(x - loc)**2 / (2 * scale**2)))
+ >>> plt.plot(x,g)
+
+ """
+ return cont(&random_laplace, self._brng, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def gumbel(self, loc=0.0, scale=1.0, size=None):
+ """
+ gumbel(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from a Gumbel distribution.
+
+ Draw samples from a Gumbel distribution with specified location and
+ scale. For more information on the Gumbel distribution, see
+ Notes and References below.
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ The location of the mode of the distribution. Default is 0.
+ scale : float or array_like of floats, optional
+ The scale parameter of the distribution. Default is 1. Must be non-
+ negative.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Gumbel distribution.
+
+ See Also
+ --------
+ scipy.stats.gumbel_l
+ scipy.stats.gumbel_r
+ scipy.stats.genextreme
+ weibull
+
+ Notes
+ -----
+ The Gumbel (or Smallest Extreme Value (SEV) or the Smallest Extreme
+ Value Type I) distribution is one of a class of Generalized Extreme
+ Value (GEV) distributions used in modeling extreme value problems.
+ The Gumbel is a special case of the Extreme Value Type I distribution
+ for maximums from distributions with "exponential-like" tails.
+
+ The probability density for the Gumbel distribution is
+
+ .. math:: p(x) = \\frac{e^{-(x - \\mu)/ \\beta}}{\\beta} e^{ -e^{-(x - \\mu)/
+ \\beta}},
+
+ where :math:`\\mu` is the mode, a location parameter, and
+ :math:`\\beta` is the scale parameter.
+
+ The Gumbel (named for German mathematician Emil Julius Gumbel) was used
+ very early in the hydrology literature, for modeling the occurrence of
+ flood events. It is also used for modeling maximum wind speed and
+ rainfall rates. It is a "fat-tailed" distribution - the probability of
+ an event in the tail of the distribution is larger than if one used a
+ Gaussian, hence the surprisingly frequent occurrence of 100-year
+ floods. Floods were initially modeled as a Gaussian process, which
+ underestimated the frequency of extreme events.
+
+ It is one of a class of extreme value distributions, the Generalized
+ Extreme Value (GEV) distributions, which also includes the Weibull and
+ Frechet.
+
+ The function has a mean of :math:`\\mu + 0.57721\\beta` and a variance
+ of :math:`\\frac{\\pi^2}{6}\\beta^2`.
+
+ References
+ ----------
+ .. [1] Gumbel, E. J., "Statistics of Extremes,"
+ New York: Columbia University Press, 1958.
+ .. [2] Reiss, R.-D. and Thomas, M., "Statistical Analysis of Extreme
+ Values from Insurance, Finance, Hydrology and Other Fields,"
+ Basel: Birkhauser Verlag, 2001.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, beta = 0, 0.1 # location and scale
+ >>> s = np.random.gumbel(mu, beta, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 30, density=True)
+ >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
+ ... * np.exp( -np.exp( -(bins - mu) /beta) ),
+ ... linewidth=2, color='r')
+ >>> plt.show()
+
+ Show how an extreme value distribution can arise from a Gaussian process
+ and compare to a Gaussian:
+
+ >>> means = []
+ >>> maxima = []
+ >>> for i in range(0,1000) :
+ ... a = np.random.normal(mu, beta, 1000)
+ ... means.append(a.mean())
+ ... maxima.append(a.max())
+ >>> count, bins, ignored = plt.hist(maxima, 30, density=True)
+ >>> beta = np.std(maxima) * np.sqrt(6) / np.pi
+ >>> mu = np.mean(maxima) - 0.57721*beta
+ >>> plt.plot(bins, (1/beta)*np.exp(-(bins - mu)/beta)
+ ... * np.exp(-np.exp(-(bins - mu)/beta)),
+ ... linewidth=2, color='r')
+ >>> plt.plot(bins, 1/(beta * np.sqrt(2 * np.pi))
+ ... * np.exp(-(bins - mu)**2 / (2 * beta**2)),
+ ... linewidth=2, color='g')
+ >>> plt.show()
+
+ """
+ return cont(&random_gumbel, self._brng, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def logistic(self, loc=0.0, scale=1.0, size=None):
+ """
+ logistic(loc=0.0, scale=1.0, size=None)
+
+ Draw samples from a logistic distribution.
+
+ Samples are drawn from a logistic distribution with specified
+ parameters, loc (location or mean, also median), and scale (>0).
+
+ Parameters
+ ----------
+ loc : float or array_like of floats, optional
+ Parameter of the distribution. Default is 0.
+ scale : float or array_like of floats, optional
+ Parameter of the distribution. Must be non-negative.
+ Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``loc`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(loc, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized logistic distribution.
+
+ See Also
+ --------
+ scipy.stats.logistic : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Logistic distribution is
+
+ .. math:: P(x) = P(x) = \\frac{e^{-(x-\\mu)/s}}{s(1+e^{-(x-\\mu)/s})^2},
+
+ where :math:`\\mu` = location and :math:`s` = scale.
+
+ The Logistic distribution is used in Extreme Value problems where it
+ can act as a mixture of Gumbel distributions, in Epidemiology, and by
+ the World Chess Federation (FIDE) where it is used in the Elo ranking
+ system, assuming the performance of each player is a logistically
+ distributed random variable.
+
+ References
+ ----------
+ .. [1] Reiss, R.-D. and Thomas M. (2001), "Statistical Analysis of
+ Extreme Values, from Insurance, Finance, Hydrology and Other
+ Fields," Birkhauser Verlag, Basel, pp 132-133.
+ .. [2] Weisstein, Eric W. "Logistic Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/LogisticDistribution.html
+ .. [3] Wikipedia, "Logistic-distribution",
+ https://en.wikipedia.org/wiki/Logistic_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> loc, scale = 10, 1
+ >>> s = np.random.logistic(loc, scale, 10000)
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, bins=50)
+
+ # plot against distribution
+
+ >>> def logist(x, loc, scale):
+ ... return np.exp((loc-x)/scale)/(scale*(1+np.exp((loc-x)/scale))**2)
+ >>> lgst_val = logist(bins, loc, scale)
+ >>> plt.plot(bins, lgst_val * count.max() / lgst_val.max())
+ >>> plt.show()
+
+ """
+ return cont(&random_logistic, self._brng, size, self.lock, 2,
+ loc, 'loc', CONS_NONE,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def lognormal(self, mean=0.0, sigma=1.0, size=None):
+ """
+ lognormal(mean=0.0, sigma=1.0, size=None)
+
+ Draw samples from a log-normal distribution.
+
+ Draw samples from a log-normal distribution with specified mean,
+ standard deviation, and array shape. Note that the mean and standard
+ deviation are not the values for the distribution itself, but of the
+ underlying normal distribution it is derived from.
+
+ Parameters
+ ----------
+ mean : float or array_like of floats, optional
+ Mean value of the underlying normal distribution. Default is 0.
+ sigma : float or array_like of floats, optional
+ Standard deviation of the underlying normal distribution. Must be
+ non-negative. Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mean`` and ``sigma`` are both scalars.
+ Otherwise, ``np.broadcast(mean, sigma).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized log-normal distribution.
+
+ See Also
+ --------
+ scipy.stats.lognorm : probability density function, distribution,
+ cumulative density function, etc.
+
+ Notes
+ -----
+ A variable `x` has a log-normal distribution if `log(x)` is normally
+ distributed. The probability density function for the log-normal
+ distribution is:
+
+ .. math:: p(x) = \\frac{1}{\\sigma x \\sqrt{2\\pi}}
+ e^{(-\\frac{(ln(x)-\\mu)^2}{2\\sigma^2})}
+
+ where :math:`\\mu` is the mean and :math:`\\sigma` is the standard
+ deviation of the normally distributed logarithm of the variable.
+ A log-normal distribution results if a random variable is the *product*
+ of a large number of independent, identically-distributed variables in
+ the same way that a normal distribution results if the variable is the
+ *sum* of a large number of independent, identically-distributed
+ variables.
+
+ References
+ ----------
+ .. [1] Limpert, E., Stahel, W. A., and Abbt, M., "Log-normal
+ Distributions across the Sciences: Keys and Clues,"
+ BioScience, Vol. 51, No. 5, May, 2001.
+ https://stat.ethz.ch/~stahel/lognormal/bioscience.pdf
+ .. [2] Reiss, R.D. and Thomas, M., "Statistical Analysis of Extreme
+ Values," Basel: Birkhauser Verlag, 2001, pp. 31-32.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> mu, sigma = 3., 1. # mean and standard deviation
+ >>> s = np.random.lognormal(mu, sigma, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 100, density=True, align='mid')
+
+ >>> x = np.linspace(min(bins), max(bins), 10000)
+ >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
+ ... / (x * sigma * np.sqrt(2 * np.pi)))
+
+ >>> plt.plot(x, pdf, linewidth=2, color='r')
+ >>> plt.axis('tight')
+ >>> plt.show()
+
+ Demonstrate that taking the products of random samples from a uniform
+ distribution can be fit well by a log-normal probability density
+ function.
+
+ >>> # Generate a thousand samples: each is the product of 100 random
+ >>> # values, drawn from a normal distribution.
+ >>> b = []
+ >>> for i in range(1000):
+ ... a = 10. + np.random.standard_normal(100)
+ ... b.append(np.product(a))
+
+ >>> b = np.array(b) / np.min(b) # scale values to be positive
+ >>> count, bins, ignored = plt.hist(b, 100, density=True, align='mid')
+ >>> sigma = np.std(np.log(b))
+ >>> mu = np.mean(np.log(b))
+
+ >>> x = np.linspace(min(bins), max(bins), 10000)
+ >>> pdf = (np.exp(-(np.log(x) - mu)**2 / (2 * sigma**2))
+ ... / (x * sigma * np.sqrt(2 * np.pi)))
+
+ >>> plt.plot(x, pdf, color='r', linewidth=2)
+ >>> plt.show()
+
+ """
+ return cont(&legacy_lognormal, self._aug_state, size, self.lock, 2,
+ mean, 'mean', CONS_NONE,
+ sigma, 'sigma', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def rayleigh(self, scale=1.0, size=None):
+ """
+ rayleigh(scale=1.0, size=None)
+
+ Draw samples from a Rayleigh distribution.
+
+ The :math:`\\chi` and Weibull distributions are generalizations of the
+ Rayleigh.
+
+ Parameters
+ ----------
+ scale : float or array_like of floats, optional
+ Scale, also equals the mode. Must be non-negative. Default is 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``scale`` is a scalar. Otherwise,
+ ``np.array(scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Rayleigh distribution.
+
+ Notes
+ -----
+ The probability density function for the Rayleigh distribution is
+
+ .. math:: P(x;scale) = \\frac{x}{scale^2}e^{\\frac{-x^2}{2 \\cdotp scale^2}}
+
+ The Rayleigh distribution would arise, for example, if the East
+ and North components of the wind velocity had identical zero-mean
+ Gaussian distributions. Then the wind speed would have a Rayleigh
+ distribution.
+
+ References
+ ----------
+ .. [1] Brighton Webs Ltd., "Rayleigh Distribution,"
+ https://web.archive.org/web/20090514091424/http://brighton-webs.co.uk:80/distributions/rayleigh.asp
+ .. [2] Wikipedia, "Rayleigh distribution"
+ https://en.wikipedia.org/wiki/Rayleigh_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram
+
+ >>> from matplotlib.pyplot import hist
+ >>> values = hist(np.random.rayleigh(3, 100000), bins=200, density=True)
+
+ Wave heights tend to follow a Rayleigh distribution. If the mean wave
+ height is 1 meter, what fraction of waves are likely to be larger than 3
+ meters?
+
+ >>> meanvalue = 1
+ >>> modevalue = np.sqrt(2 / np.pi) * meanvalue
+ >>> s = np.random.rayleigh(modevalue, 1000000)
+
+ The percentage of waves larger than 3 meters is:
+
+ >>> 100.*sum(s>3)/1000000.
+ 0.087300000000000003 # random
+
+ """
+ return cont(&random_rayleigh, self._brng, size, self.lock, 1,
+ scale, 'scale', CONS_NON_NEGATIVE,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE, None)
+
+ def wald(self, mean, scale, size=None):
+ """
+ wald(mean, scale, size=None)
+
+ Draw samples from a Wald, or inverse Gaussian, distribution.
+
+ As the scale approaches infinity, the distribution becomes more like a
+ Gaussian. Some references claim that the Wald is an inverse Gaussian
+ with mean equal to 1, but this is by no means universal.
+
+ The inverse Gaussian distribution was first studied in relationship to
+ Brownian motion. In 1956 M.C.K. Tweedie used the name inverse Gaussian
+ because there is an inverse relationship between the time to cover a
+ unit distance and distance covered in unit time.
+
+ Parameters
+ ----------
+ mean : float or array_like of floats
+ Distribution mean, must be > 0.
+ scale : float or array_like of floats
+ Scale parameter, must be > 0.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``mean`` and ``scale`` are both scalars.
+ Otherwise, ``np.broadcast(mean, scale).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Wald distribution.
+
+ Notes
+ -----
+ The probability density function for the Wald distribution is
+
+ .. math:: P(x;mean,scale) = \\sqrt{\\frac{scale}{2\\pi x^3}}e^
+ \\frac{-scale(x-mean)^2}{2\\cdotp mean^2x}
+
+ As noted above the inverse Gaussian distribution first arise
+ from attempts to model Brownian motion. It is also a
+ competitor to the Weibull for use in reliability modeling and
+ modeling stock returns and interest rate processes.
+
+ References
+ ----------
+ .. [1] Brighton Webs Ltd., Wald Distribution,
+ https://web.archive.org/web/20090423014010/http://www.brighton-webs.co.uk:80/distributions/wald.asp
+ .. [2] Chhikara, Raj S., and Folks, J. Leroy, "The Inverse Gaussian
+ Distribution: Theory : Methodology, and Applications", CRC Press,
+ 1988.
+ .. [3] Wikipedia, "Inverse Gaussian distribution"
+ https://en.wikipedia.org/wiki/Inverse_Gaussian_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(np.random.wald(3, 2, 100000), bins=200, density=True)
+ >>> plt.show()
+
+ """
+ return cont(&legacy_wald, self._aug_state, size, self.lock, 2,
+ mean, 'mean', CONS_POSITIVE,
+ scale, 'scale', CONS_POSITIVE,
+ 0.0, '', CONS_NONE, None)
+
+ def triangular(self, left, mode, right, size=None):
+ """
+ triangular(left, mode, right, size=None)
+
+ Draw samples from the triangular distribution over the
+ interval ``[left, right]``.
+
+ The triangular distribution is a continuous probability
+ distribution with lower limit left, peak at mode, and upper
+ limit right. Unlike the other distributions, these parameters
+ directly define the shape of the pdf.
+
+ Parameters
+ ----------
+ left : float or array_like of floats
+ Lower limit.
+ mode : float or array_like of floats
+ The value where the peak of the distribution occurs.
+ The value must fulfill the condition ``left <= mode <= right``.
+ right : float or array_like of floats
+ Upper limit, must be larger than `left`.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``left``, ``mode``, and ``right``
+ are all scalars. Otherwise, ``np.broadcast(left, mode, right).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized triangular distribution.
+
+ Notes
+ -----
+ The probability density function for the triangular distribution is
+
+ .. math:: P(x;l, m, r) = \\begin{cases}
+ \\frac{2(x-l)}{(r-l)(m-l)}& \\text{for $l \\leq x \\leq m$},\\\\
+ \\frac{2(r-x)}{(r-l)(r-m)}& \\text{for $m \\leq x \\leq r$},\\\\
+ 0& \\text{otherwise}.
+ \\end{cases}
+
+ The triangular distribution is often used in ill-defined
+ problems where the underlying distribution is not known, but
+ some knowledge of the limits and mode exists. Often it is used
+ in simulations.
+
+ References
+ ----------
+ .. [1] Wikipedia, "Triangular distribution"
+ https://en.wikipedia.org/wiki/Triangular_distribution
+
+ Examples
+ --------
+ Draw values from the distribution and plot the histogram:
+
+ >>> import matplotlib.pyplot as plt
+ >>> h = plt.hist(np.random.triangular(-3, 0, 8, 100000), bins=200,
+ ... density=True)
+ >>> plt.show()
+
+ """
+ cdef bint is_scalar = True
+ cdef double fleft, fmode, fright
+ cdef np.ndarray oleft, omode, oright
+
+ oleft = <np.ndarray>np.PyArray_FROM_OTF(left, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ omode = <np.ndarray>np.PyArray_FROM_OTF(mode, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ oright = <np.ndarray>np.PyArray_FROM_OTF(right, np.NPY_DOUBLE, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(oleft) == np.PyArray_NDIM(omode) == np.PyArray_NDIM(oright) == 0:
+ fleft = PyFloat_AsDouble(left)
+ fright = PyFloat_AsDouble(right)
+ fmode = PyFloat_AsDouble(mode)
+
+ if fleft > fmode:
+ raise ValueError("left > mode")
+ if fmode > fright:
+ raise ValueError("mode > right")
+ if fleft == fright:
+ raise ValueError("left == right")
+ return cont(&random_triangular, self._brng, size, self.lock, 3,
+ fleft, '', CONS_NONE,
+ fmode, '', CONS_NONE,
+ fright, '', CONS_NONE, None)
+
+ if np.any(np.greater(oleft, omode)):
+ raise ValueError("left > mode")
+ if np.any(np.greater(omode, oright)):
+ raise ValueError("mode > right")
+ if np.any(np.equal(oleft, oright)):
+ raise ValueError("left == right")
+
+ return cont_broadcast_3(&random_triangular, self._brng, size, self.lock,
+ oleft, '', CONS_NONE,
+ omode, '', CONS_NONE,
+ oright, '', CONS_NONE)
+
+ # Complicated, discrete distributions:
+ def binomial(self, n, p, size=None):
+ """
+ binomial(n, p, size=None)
+
+ Draw samples from a binomial distribution.
+
+ Samples are drawn from a binomial distribution with specified
+ parameters, n trials and p probability of success where
+ n an integer >= 0 and p is in the interval [0,1]. (n may be
+ input as a float, but it is truncated to an integer in use)
+
+ Parameters
+ ----------
+ n : int or array_like of ints
+ Parameter of the distribution, >= 0. Floats are also accepted,
+ but they will be truncated to integers.
+ p : float or array_like of floats
+ Parameter of the distribution, >= 0 and <=1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``n`` and ``p`` are both scalars.
+ Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized binomial distribution, where
+ each sample is equal to the number of successes over the n trials.
+
+ See Also
+ --------
+ scipy.stats.binom : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the binomial distribution is
+
+ .. math:: P(N) = \\binom{n}{N}p^N(1-p)^{n-N},
+
+ where :math:`n` is the number of trials, :math:`p` is the probability
+ of success, and :math:`N` is the number of successes.
+
+ When estimating the standard error of a proportion in a population by
+ using a random sample, the normal distribution works well unless the
+ product p*n <=5, where p = population proportion estimate, and n =
+ number of samples, in which case the binomial distribution is used
+ instead. For example, a sample of 15 people shows 4 who are left
+ handed, and 11 who are right handed. Then p = 4/15 = 27%. 0.27*15 = 4,
+ so the binomial distribution should be used in this case.
+
+ References
+ ----------
+ .. [1] Dalgaard, Peter, "Introductory Statistics with R",
+ Springer-Verlag, 2002.
+ .. [2] Glantz, Stanton A. "Primer of Biostatistics.", McGraw-Hill,
+ Fifth Edition, 2002.
+ .. [3] Lentner, Marvin, "Elementary Applied Statistics", Bogden
+ and Quigley, 1972.
+ .. [4] Weisstein, Eric W. "Binomial Distribution." From MathWorld--A
+ Wolfram Web Resource.
+ http://mathworld.wolfram.com/BinomialDistribution.html
+ .. [5] Wikipedia, "Binomial distribution",
+ https://en.wikipedia.org/wiki/Binomial_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> n, p = 10, .5 # number of trials, probability of each trial
+ >>> s = np.random.binomial(n, p, 1000)
+ # result of flipping a coin 10 times, tested 1000 times.
+
+ A real world example. A company drills 9 wild-cat oil exploration
+ wells, each with an estimated probability of success of 0.1. All nine
+ wells fail. What is the probability of that happening?
+
+ Let's do 20,000 trials of the model, and count the number that
+ generate zero positive results.
+
+ >>> sum(np.random.binomial(9, 0.1, 20000) == 0)/20000.
+ # answer = 0.38885, or 38%.
+
+ """
+
+ # Uses a custom implementation since self._binomial is required
+ cdef double _dp = 0
+ cdef int64_t _in = 0
+ cdef bint is_scalar = True
+ cdef np.npy_intp i, cnt
+ cdef np.ndarray randoms
+ cdef np.int64_t *randoms_data
+ cdef np.broadcast it
+
+ p_arr = <np.ndarray>np.PyArray_FROM_OTF(p, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(p_arr) == 0
+ n_arr = <np.ndarray>np.PyArray_FROM_OTF(n, np.NPY_INT64, np.NPY_ALIGNED)
+ is_scalar = is_scalar and np.PyArray_NDIM(n_arr) == 0
+
+ if not is_scalar:
+ check_array_constraint(p_arr, 'p', CONS_BOUNDED_0_1)
+ check_array_constraint(n_arr, 'n', CONS_NON_NEGATIVE)
+ if size is not None:
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ else:
+ it = np.PyArray_MultiIterNew2(p_arr, n_arr)
+ randoms = <np.ndarray>np.empty(it.shape, np.int64)
+
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+ cnt = np.PyArray_SIZE(randoms)
+
+ it = np.PyArray_MultiIterNew3(randoms, p_arr, n_arr)
+ with self.lock, nogil:
+ for i in range(cnt):
+ _dp = (<double*>np.PyArray_MultiIter_DATA(it, 1))[0]
+ _in = (<int64_t*>np.PyArray_MultiIter_DATA(it, 2))[0]
+ (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0] = random_binomial(self._brng, _dp, _in, self._binomial)
+
+ np.PyArray_MultiIter_NEXT(it)
+
+ return randoms
+
+ _dp = PyFloat_AsDouble(p)
+ _in = <int64_t>n
+ check_constraint(_dp, 'p', CONS_BOUNDED_0_1)
+ check_constraint(<double>_in, 'n', CONS_NON_NEGATIVE)
+
+ if size is None:
+ with self.lock:
+ return random_binomial(self._brng, _dp, _in, self._binomial)
+
+ randoms = <np.ndarray>np.empty(size, np.int64)
+ cnt = np.PyArray_SIZE(randoms)
+ randoms_data = <np.int64_t *>np.PyArray_DATA(randoms)
+
+ with self.lock, nogil:
+ for i in range(cnt):
+ randoms_data[i] = random_binomial(self._brng, _dp, _in,
+ self._binomial)
+
+ return randoms
+
+ def negative_binomial(self, n, p, size=None):
+ """
+ negative_binomial(n, p, size=None)
+
+ Draw samples from a negative binomial distribution.
+
+ Samples are drawn from a negative binomial distribution with specified
+ parameters, `n` successes and `p` probability of success where `n`
+ is > 0 and `p` is in the interval [0, 1].
+
+ Parameters
+ ----------
+ n : float or array_like of floats
+ Parameter of the distribution, > 0.
+ p : float or array_like of floats
+ Parameter of the distribution, >= 0 and <=1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``n`` and ``p`` are both scalars.
+ Otherwise, ``np.broadcast(n, p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized negative binomial distribution,
+ where each sample is equal to N, the number of failures that
+ occurred before a total of n successes was reached.
+
+ Notes
+ -----
+ The probability mass function of the negative binomial distribution is
+
+ .. math:: P(N;n,p) = \\frac{\\Gamma(N+n)}{N!\\Gamma(n)}p^{n}(1-p)^{N},
+
+ where :math:`n` is the number of successes, :math:`p` is the
+ probability of success, :math:`N+n` is the number of trials, and
+ :math:`\\Gamma` is the gamma function. When :math:`n` is an integer,
+ :math:`\\frac{\\Gamma(N+n)}{N!\\Gamma(n)} = \\binom{N+n-1}{N}`, which is
+ the more common form of this term in the the pmf. The negative
+ binomial distribution gives the probability of N failures given n
+ successes, with a success on the last trial.
+
+ If one throws a die repeatedly until the third time a "1" appears,
+ then the probability distribution of the number of non-"1"s that
+ appear before the third "1" is a negative binomial distribution.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Negative Binomial Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/NegativeBinomialDistribution.html
+ .. [2] Wikipedia, "Negative binomial distribution",
+ https://en.wikipedia.org/wiki/Negative_binomial_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ A real world example. A company drills wild-cat oil
+ exploration wells, each with an estimated probability of
+ success of 0.1. What is the probability of having one success
+ for each successive well, that is what is the probability of a
+ single success after drilling 5 wells, after 6 wells, etc.?
+
+ >>> s = np.random.negative_binomial(1, 0.1, 100000)
+ >>> for i in range(1, 11): # doctest: +SKIP
+ ... probability = sum(s<i) / 100000.
+ ... print(i, "wells drilled, probability of one success =", probability)
+
+ """
+ return disc(&legacy_negative_binomial, self._aug_state, size, self.lock, 2, 0,
+ n, 'n', CONS_POSITIVE,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE)
+
+ def poisson(self, lam=1.0, size=None):
+ """
+ poisson(lam=1.0, size=None)
+
+ Draw samples from a Poisson distribution.
+
+ The Poisson distribution is the limit of the binomial distribution
+ for large N.
+
+ Parameters
+ ----------
+ lam : float or array_like of floats
+ Expectation of interval, must be >= 0. A sequence of expectation
+ intervals must be broadcastable over the requested size.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``lam`` is a scalar. Otherwise,
+ ``np.array(lam).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Poisson distribution.
+
+ Notes
+ -----
+ The Poisson distribution
+
+ .. math:: f(k; \\lambda)=\\frac{\\lambda^k e^{-\\lambda}}{k!}
+
+ For events with an expected separation :math:`\\lambda` the Poisson
+ distribution :math:`f(k; \\lambda)` describes the probability of
+ :math:`k` events occurring within the observed
+ interval :math:`\\lambda`.
+
+ Because the output is limited to the range of the C int64 type, a
+ ValueError is raised when `lam` is within 10 sigma of the maximum
+ representable value.
+
+ References
+ ----------
+ .. [1] Weisstein, Eric W. "Poisson Distribution."
+ From MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/PoissonDistribution.html
+ .. [2] Wikipedia, "Poisson distribution",
+ https://en.wikipedia.org/wiki/Poisson_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> import numpy as np
+ >>> s = np.random.poisson(5, 10000)
+
+ Display histogram of the sample:
+
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s, 14, density=True)
+ >>> plt.show()
+
+ Draw each 100 values for lambda 100 and 500:
+
+ >>> s = np.random.poisson(lam=(100., 500.), size=(100, 2))
+
+ """
+ return disc(&random_poisson, self._brng, size, self.lock, 1, 0,
+ lam, 'lam', CONS_POISSON,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def zipf(self, a, size=None):
+ """
+ zipf(a, size=None)
+
+ Draw samples from a Zipf distribution.
+
+ Samples are drawn from a Zipf distribution with specified parameter
+ `a` > 1.
+
+ The Zipf distribution (also known as the zeta distribution) is a
+ continuous probability distribution that satisfies Zipf's law: the
+ frequency of an item is inversely proportional to its rank in a
+ frequency table.
+
+ Parameters
+ ----------
+ a : float or array_like of floats
+ Distribution parameter. Must be greater than 1.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``a`` is a scalar. Otherwise,
+ ``np.array(a).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized Zipf distribution.
+
+ See Also
+ --------
+ scipy.stats.zipf : probability density function, distribution, or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Zipf distribution is
+
+ .. math:: p(x) = \\frac{x^{-a}}{\\zeta(a)},
+
+ where :math:`\\zeta` is the Riemann Zeta function.
+
+ It is named for the American linguist George Kingsley Zipf, who noted
+ that the frequency of any word in a sample of a language is inversely
+ proportional to its rank in the frequency table.
+
+ References
+ ----------
+ .. [1] Zipf, G. K., "Selected Studies of the Principle of Relative
+ Frequency in Language," Cambridge, MA: Harvard Univ. Press,
+ 1932.
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = 2. # parameter
+ >>> s = np.random.zipf(a, 1000)
+
+ Display the histogram of the samples, along with
+ the probability density function:
+
+ >>> import matplotlib.pyplot as plt
+ >>> from scipy import special
+
+ Truncate s values at 50 so plot is interesting:
+
+ >>> count, bins, ignored = plt.hist(s[s<50], 50, density=True)
+ >>> x = np.arange(1., 50.)
+ >>> y = x**(-a) / special.zetac(a)
+ >>> plt.plot(x, y/max(y), linewidth=2, color='r')
+ >>> plt.show()
+
+ """
+ return disc(&random_zipf, self._brng, size, self.lock, 1, 0,
+ a, 'a', CONS_GT_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def geometric(self, p, size=None):
+ """
+ geometric(p, size=None)
+
+ Draw samples from the geometric distribution.
+
+ Bernoulli trials are experiments with one of two outcomes:
+ success or failure (an example of such an experiment is flipping
+ a coin). The geometric distribution models the number of trials
+ that must be run in order to achieve success. It is therefore
+ supported on the positive integers, ``k = 1, 2, ...``.
+
+ The probability mass function of the geometric distribution is
+
+ .. math:: f(k) = (1 - p)^{k - 1} p
+
+ where `p` is the probability of success of an individual trial.
+
+ Parameters
+ ----------
+ p : float or array_like of floats
+ The probability of success of an individual trial.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``p`` is a scalar. Otherwise,
+ ``np.array(p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized geometric distribution.
+
+ Examples
+ --------
+ Draw ten thousand values from the geometric distribution,
+ with the probability of an individual success equal to 0.35:
+
+ >>> z = np.random.geometric(p=0.35, size=10000)
+
+ How many trials succeeded after a single run?
+
+ >>> (z == 1).sum() / 10000.
+ 0.34889999999999999 #random
+
+ """
+ return disc(&random_geometric, self._brng, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_GT_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ def hypergeometric(self, ngood, nbad, nsample, size=None):
+ """
+ hypergeometric(ngood, nbad, nsample, size=None)
+
+ Draw samples from a Hypergeometric distribution.
+
+ Samples are drawn from a hypergeometric distribution with specified
+ parameters, `ngood` (ways to make a good selection), `nbad` (ways to make
+ a bad selection), and `nsample` (number of items sampled, which is less
+ than or equal to the sum ``ngood + nbad``).
+
+ Parameters
+ ----------
+ ngood : int or array_like of ints
+ Number of ways to make a good selection. Must be nonnegative.
+ nbad : int or array_like of ints
+ Number of ways to make a bad selection. Must be nonnegative.
+ nsample : int or array_like of ints
+ Number of items sampled. Must be at least 1 and at most
+ ``ngood + nbad``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if `ngood`, `nbad`, and `nsample`
+ are all scalars. Otherwise, ``np.broadcast(ngood, nbad, nsample).size``
+ samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized hypergeometric distribution. Each
+ sample is the number of good items within a randomly selected subset of
+ size `nsample` taken from a set of `ngood` good items and `nbad` bad items.
+
+ See Also
+ --------
+ scipy.stats.hypergeom : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Hypergeometric distribution is
+
+ .. math:: P(x) = \\frac{\\binom{g}{x}\\binom{b}{n-x}}{\\binom{g+b}{n}},
+
+ where :math:`0 \\le x \\le n` and :math:`n-b \\le x \\le g`
+
+ for P(x) the probability of ``x`` good results in the drawn sample,
+ g = `ngood`, b = `nbad`, and n = `nsample`.
+
+ Consider an urn with black and white marbles in it, `ngood` of them
+ are black and `nbad` are white. If you draw `nsample` balls without
+ replacement, then the hypergeometric distribution describes the
+ distribution of black balls in the drawn sample.
+
+ Note that this distribution is very similar to the binomial
+ distribution, except that in this case, samples are drawn without
+ replacement, whereas in the Binomial case samples are drawn with
+ replacement (or the sample space is infinite). As the sample space
+ becomes large, this distribution approaches the binomial.
+
+ References
+ ----------
+ .. [1] Lentner, Marvin, "Elementary Applied Statistics", Bogden
+ and Quigley, 1972.
+ .. [2] Weisstein, Eric W. "Hypergeometric Distribution." From
+ MathWorld--A Wolfram Web Resource.
+ http://mathworld.wolfram.com/HypergeometricDistribution.html
+ .. [3] Wikipedia, "Hypergeometric distribution",
+ https://en.wikipedia.org/wiki/Hypergeometric_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> ngood, nbad, nsamp = 100, 2, 10
+ # number of good, number of bad, and number of samples
+ >>> s = np.random.hypergeometric(ngood, nbad, nsamp, 1000)
+ >>> from matplotlib.pyplot import hist
+ >>> hist(s)
+ # note that it is very unlikely to grab both bad items
+
+ Suppose you have an urn with 15 white and 15 black marbles.
+ If you pull 15 marbles at random, how likely is it that
+ 12 or more of them are one color?
+
+ >>> s = np.random.hypergeometric(15, 15, 15, 100000)
+ >>> sum(s>=12)/100000. + sum(s<=3)/100000.
+ # answer = 0.003 ... pretty unlikely!
+
+ """
+ cdef bint is_scalar = True
+ cdef np.ndarray ongood, onbad, onsample
+ cdef int64_t lngood, lnbad, lnsample
+
+ ongood = <np.ndarray>np.PyArray_FROM_OTF(ngood, np.NPY_INT64, np.NPY_ALIGNED)
+ onbad = <np.ndarray>np.PyArray_FROM_OTF(nbad, np.NPY_INT64, np.NPY_ALIGNED)
+ onsample = <np.ndarray>np.PyArray_FROM_OTF(nsample, np.NPY_INT64, np.NPY_ALIGNED)
+
+ if np.PyArray_NDIM(ongood) == np.PyArray_NDIM(onbad) == np.PyArray_NDIM(onsample) == 0:
+
+ lngood = <int64_t>ngood
+ lnbad = <int64_t>nbad
+ lnsample = <int64_t>nsample
+
+ if lngood + lnbad < lnsample:
+ raise ValueError("ngood + nbad < nsample")
+ return disc(&random_hypergeometric, self._brng, size, self.lock, 0, 3,
+ lngood, 'ngood', CONS_NON_NEGATIVE,
+ lnbad, 'nbad', CONS_NON_NEGATIVE,
+ lnsample, 'nsample', CONS_GTE_1)
+
+ if np.any(np.less(np.add(ongood, onbad), onsample)):
+ raise ValueError("ngood + nbad < nsample")
+ return discrete_broadcast_iii(&random_hypergeometric, self._brng, size, self.lock,
+ ongood, 'ngood', CONS_NON_NEGATIVE,
+ onbad, 'nbad', CONS_NON_NEGATIVE,
+ onsample, 'nsample', CONS_GTE_1)
+
+ def logseries(self, p, size=None):
+ """
+ logseries(p, size=None)
+
+ Draw samples from a logarithmic series distribution.
+
+ Samples are drawn from a log series distribution with specified
+ shape parameter, 0 < ``p`` < 1.
+
+ Parameters
+ ----------
+ p : float or array_like of floats
+ Shape parameter for the distribution. Must be in the range (0, 1).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. If size is ``None`` (default),
+ a single value is returned if ``p`` is a scalar. Otherwise,
+ ``np.array(p).size`` samples are drawn.
+
+ Returns
+ -------
+ out : ndarray or scalar
+ Drawn samples from the parameterized logarithmic series distribution.
+
+ See Also
+ --------
+ scipy.stats.logser : probability density function, distribution or
+ cumulative density function, etc.
+
+ Notes
+ -----
+ The probability density for the Log Series distribution is
+
+ .. math:: P(k) = \\frac{-p^k}{k \\ln(1-p)},
+
+ where p = probability.
+
+ The log series distribution is frequently used to represent species
+ richness and occurrence, first proposed by Fisher, Corbet, and
+ Williams in 1943 [2]. It may also be used to model the numbers of
+ occupants seen in cars [3].
+
+ References
+ ----------
+ .. [1] Buzas, Martin A.; Culver, Stephen J., Understanding regional
+ species diversity through the log series distribution of
+ occurrences: BIODIVERSITY RESEARCH Diversity & Distributions,
+ Volume 5, Number 5, September 1999 , pp. 187-195(9).
+ .. [2] Fisher, R.A,, A.S. Corbet, and C.B. Williams. 1943. The
+ relation between the number of species and the number of
+ individuals in a random sample of an animal population.
+ Journal of Animal Ecology, 12:42-58.
+ .. [3] D. J. Hand, F. Daly, D. Lunn, E. Ostrowski, A Handbook of Small
+ Data Sets, CRC Press, 1994.
+ .. [4] Wikipedia, "Logarithmic distribution",
+ https://en.wikipedia.org/wiki/Logarithmic_distribution
+
+ Examples
+ --------
+ Draw samples from the distribution:
+
+ >>> a = .6
+ >>> s = np.random.logseries(a, 10000)
+ >>> import matplotlib.pyplot as plt
+ >>> count, bins, ignored = plt.hist(s)
+
+ # plot against distribution
+
+ >>> def logseries(k, p):
+ ... return -p**k/(k*np.log(1-p))
+ >>> plt.plot(bins, logseries(bins, a)*count.max()/
+ ... logseries(bins, a).max(), 'r')
+ >>> plt.show()
+
+ """
+ return disc(&random_logseries, self._brng, size, self.lock, 1, 0,
+ p, 'p', CONS_BOUNDED_0_1,
+ 0.0, '', CONS_NONE,
+ 0.0, '', CONS_NONE)
+
+ # Multivariate distributions:
+ def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
+ tol=1e-8):
+ """
+ multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
+
+ Draw random samples from a multivariate normal distribution.
+
+ The multivariate normal, multinormal or Gaussian distribution is a
+ generalization of the one-dimensional normal distribution to higher
+ dimensions. Such a distribution is specified by its mean and
+ covariance matrix. These parameters are analogous to the mean
+ (average or "center") and variance (standard deviation, or "width,"
+ squared) of the one-dimensional normal distribution.
+
+ Parameters
+ ----------
+ mean : 1-D array_like, of length N
+ Mean of the N-dimensional distribution.
+ cov : 2-D array_like, of shape (N, N)
+ Covariance matrix of the distribution. It must be symmetric and
+ positive-semidefinite for proper sampling.
+ size : int or tuple of ints, optional
+ Given a shape of, for example, ``(m,n,k)``, ``m*n*k`` samples are
+ generated, and packed in an `m`-by-`n`-by-`k` arrangement. Because
+ each sample is `N`-dimensional, the output shape is ``(m,n,k,N)``.
+ If no shape is specified, a single (`N`-D) sample is returned.
+ check_valid : { 'warn', 'raise', 'ignore' }, optional
+ Behavior when the covariance matrix is not positive semidefinite.
+ tol : float, optional
+ Tolerance when checking the singular values in covariance matrix.
+ cov is cast to double before the check.
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
+
+ Notes
+ -----
+ The mean is a coordinate in N-dimensional space, which represents the
+ location where samples are most likely to be generated. This is
+ analogous to the peak of the bell curve for the one-dimensional or
+ univariate normal distribution.
+
+ Covariance indicates the level to which two variables vary together.
+ From the multivariate normal distribution, we draw N-dimensional
+ samples, :math:`X = [x_1, x_2, ... x_N]`. The covariance matrix
+ element :math:`C_{ij}` is the covariance of :math:`x_i` and :math:`x_j`.
+ The element :math:`C_{ii}` is the variance of :math:`x_i` (i.e. its
+ "spread").
+
+ Instead of specifying the full covariance matrix, popular
+ approximations include:
+
+ - Spherical covariance (`cov` is a multiple of the identity matrix)
+ - Diagonal covariance (`cov` has non-negative elements, and only on
+ the diagonal)
+
+ This geometrical property can be seen in two dimensions by plotting
+ generated data-points:
+
+ >>> mean = [0, 0]
+ >>> cov = [[1, 0], [0, 100]] # diagonal covariance
+
+ Diagonal covariance means that points are oriented along x or y-axis:
+
+ >>> import matplotlib.pyplot as plt
+ >>> x, y = np.random.multivariate_normal(mean, cov, 5000).T
+ >>> plt.plot(x, y, 'x')
+ >>> plt.axis('equal')
+ >>> plt.show()
+
+ Note that the covariance matrix must be positive semidefinite (a.k.a.
+ nonnegative-definite). Otherwise, the behavior of this method is
+ undefined and backwards compatibility is not guaranteed.
+
+ References
+ ----------
+ .. [1] Papoulis, A., "Probability, Random Variables, and Stochastic
+ Processes," 3rd ed., New York: McGraw-Hill, 1991.
+ .. [2] Duda, R. O., Hart, P. E., and Stork, D. G., "Pattern
+ Classification," 2nd ed., New York: Wiley, 2001.
+
+ Examples
+ --------
+ >>> mean = (1, 2)
+ >>> cov = [[1, 0], [0, 1]]
+ >>> x = np.random.multivariate_normal(mean, cov, (3, 3))
+ >>> x.shape
+ (3, 3, 2)
+
+ The following is probably true, given that 0.6 is roughly twice the
+ standard deviation:
+
+ >>> list((x[0,0,:] - mean) < 0.6)
+ [True, True] # random
+
+ """
+ from numpy.dual import svd
+
+ # Check preconditions on arguments
+ mean = np.array(mean)
+ cov = np.array(cov)
+ if size is None:
+ shape = []
+ elif isinstance(size, (int, long, np.integer)):
+ shape = [size]
+ else:
+ shape = size
+
+ if len(mean.shape) != 1:
+ raise ValueError("mean must be 1 dimensional")
+ if (len(cov.shape) != 2) or (cov.shape[0] != cov.shape[1]):
+ raise ValueError("cov must be 2 dimensional and square")
+ if mean.shape[0] != cov.shape[0]:
+ raise ValueError("mean and cov must have same length")
+
+ # Compute shape of output and create a matrix of independent
+ # standard normally distributed random numbers. The matrix has rows
+ # with the same length as mean and as many rows are necessary to
+ # form a matrix of shape final_shape.
+ final_shape = list(shape[:])
+ final_shape.append(mean.shape[0])
+ x = self.standard_normal(final_shape).reshape(-1, mean.shape[0])
+
+ # Transform matrix of standard normals into matrix where each row
+ # contains multivariate normals with the desired covariance.
+ # Compute A such that dot(transpose(A),A) == cov.
+ # Then the matrix products of the rows of x and A has the desired
+ # covariance. Note that sqrt(s)*v where (u,s,v) is the singular value
+ # decomposition of cov is such an A.
+ #
+ # Also check that cov is positive-semidefinite. If so, the u.T and v
+ # matrices should be equal up to roundoff error if cov is
+ # symmetric and the singular value of the corresponding row is
+ # not zero. We continue to use the SVD rather than Cholesky in
+ # order to preserve current outputs. Note that symmetry has not
+ # been checked.
+
+ # GH10839, ensure double to make tol meaningful
+ cov = cov.astype(np.double)
+ (u, s, v) = svd(cov)
+
+ if check_valid != 'ignore':
+ if check_valid != 'warn' and check_valid != 'raise':
+ raise ValueError(
+ "check_valid must equal 'warn', 'raise', or 'ignore'")
+
+ psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
+ if not psd:
+ if check_valid == 'warn':
+ warnings.warn("covariance is not positive-semidefinite.",
+ RuntimeWarning)
+ else:
+ raise ValueError(
+ "covariance is not positive-semidefinite.")
+
+ x = np.dot(x, np.sqrt(s)[:, None] * v)
+ x += mean
+ x.shape = tuple(final_shape)
+ return x
+
+ def multinomial(self, np.npy_intp n, object pvals, size=None):
+ """
+ multinomial(n, pvals, size=None)
+
+ Draw samples from a multinomial distribution.
+
+ The multinomial distribution is a multivariate generalization of the
+ binomial distribution. Take an experiment with one of ``p``
+ possible outcomes. An example of such an experiment is throwing a dice,
+ where the outcome can be 1 through 6. Each sample drawn from the
+ distribution represents `n` such experiments. Its values,
+ ``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
+ outcome was ``i``.
+
+ Parameters
+ ----------
+ n : int
+ Number of experiments.
+ pvals : sequence of floats, length p
+ Probabilities of each of the ``p`` different outcomes. These
+ must sum to 1 (however, the last element is always assumed to
+ account for the remaining probability, as long as
+ ``sum(pvals[:-1]) <= 1)``.
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ out : ndarray
+ The drawn samples, of shape *size*, if that was provided. If not,
+ the shape is ``(N,)``.
+
+ In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
+ value drawn from the distribution.
+
+ Examples
+ --------
+ Throw a dice 20 times:
+
+ >>> np.random.multinomial(20, [1/6.]*6, size=1)
+ array([[4, 1, 7, 5, 2, 1]]) # random
+
+ It landed 4 times on 1, once on 2, etc.
+
+ Now, throw the dice 20 times, and 20 times again:
+
+ >>> np.random.multinomial(20, [1/6.]*6, size=2)
+ array([[3, 4, 3, 3, 4, 3], # random
+ [2, 4, 3, 4, 0, 7]])
+
+ For the first run, we threw 3 times 1, 4 times 2, etc. For the second,
+ we threw 2 times 1, 4 times 2, etc.
+
+ A loaded die is more likely to land on number 6:
+
+ >>> np.random.multinomial(100, [1/7.]*5 + [2/7.])
+ array([11, 16, 14, 17, 16, 26]) # random
+
+ The probability inputs should be normalized. As an implementation
+ detail, the value of the last entry is ignored and assumed to take
+ up any leftover probability mass, but this should not be relied on.
+ A biased coin which has twice as much weight on one side as on the
+ other should be sampled like so:
+
+ >>> np.random.multinomial(100, [1.0 / 3, 2.0 / 3]) # RIGHT
+ array([38, 62]) # random
+
+ not like:
+
+ >>> np.random.multinomial(100, [1.0, 2.0]) # WRONG
+ array([100, 0])
+
+ """
+ cdef np.npy_intp d, i, j, dn, sz
+ cdef np.ndarray parr "arrayObject_parr", mnarr "arrayObject_mnarr"
+ cdef double *pix
+ cdef int64_t *mnix
+ cdef double Sum
+
+ d = len(pvals)
+ parr = <np.ndarray>np.PyArray_FROM_OTF(pvals, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ pix = <double*>np.PyArray_DATA(parr)
+
+ if kahan_sum(pix, d-1) > (1.0 + 1e-12):
+ raise ValueError("sum(pvals[:-1]) > 1.0")
+
+ if size is None:
+ shape = (d,)
+ else:
+ try:
+ shape = (operator.index(size), d)
+ except:
+ shape = tuple(size) + (d,)
+
+ multin = np.zeros(shape, dtype=np.int64)
+ mnarr = <np.ndarray>multin
+ mnix = <int64_t*>np.PyArray_DATA(mnarr)
+ sz = np.PyArray_SIZE(mnarr)
+
+ with self.lock, nogil:
+ i = 0
+ while i < sz:
+ Sum = 1.0
+ dn = n
+ for j in range(d-1):
+ mnix[i+j] = random_binomial(self._brng, pix[j]/Sum, dn,
+ self._binomial)
+ dn = dn - mnix[i+j]
+ if dn <= 0:
+ break
+ Sum = Sum - pix[j]
+ if dn > 0:
+ mnix[i+d-1] = dn
+
+ i = i + d
+
+ return multin
+
+ def dirichlet(self, object alpha, size=None):
+ """
+ dirichlet(alpha, size=None)
+
+ Draw samples from the Dirichlet distribution.
+
+ Draw `size` samples of dimension k from a Dirichlet distribution. A
+ Dirichlet-distributed random variable can be seen as a multivariate
+ generalization of a Beta distribution. The Dirichlet distribution
+ is a conjugate prior of a multinomial distribution in Bayesian
+ inference.
+
+ Parameters
+ ----------
+ alpha : array
+ Parameter of the distribution (k dimension for sample of
+ dimension k).
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+
+ Returns
+ -------
+ samples : ndarray,
+ The drawn samples, of shape (size, alpha.ndim).
+
+ Raises
+ -------
+ ValueError
+ If any value in alpha is less than or equal to zero
+
+ Notes
+ -----
+ The Dirichlet distribution is a distribution over vectors
+ :math:`x` that fulfil the conditions :math:`x_i>0` and
+ :math:`\\sum_{i=1}^k x_i = 1`.
+
+ The probability density function :math:`p` of a
+ Dirichlet-distributed random vector :math:`X` is
+ proportional to
+
+ .. math:: p(x) \\propto \\prod_{i=1}^{k}{x^{\\alpha_i-1}_i},
+
+ where :math:`\\alpha` is a vector containing the positive
+ concentration parameters.
+
+ The method uses the following property for computation: let :math:`Y`
+ be a random vector which has components that follow a standard gamma
+ distribution, then :math:`X = \\frac{1}{\\sum_{i=1}^k{Y_i}} Y`
+ is Dirichlet-distributed
+
+ References
+ ----------
+ .. [1] David McKay, "Information Theory, Inference and Learning
+ Algorithms," chapter 23,
+ http://www.inference.org.uk/mackay/itila/
+ .. [2] Wikipedia, "Dirichlet distribution",
+ https://en.wikipedia.org/wiki/Dirichlet_distribution
+
+ Examples
+ --------
+ Taking an example cited in Wikipedia, this distribution can be used if
+ one wanted to cut strings (each of initial length 1.0) into K pieces
+ with different lengths, where each piece had, on average, a designated
+ average length, but allowing some variation in the relative sizes of
+ the pieces.
+
+ >>> s = np.random.dirichlet((10, 5, 3), 20).transpose()
+
+ >>> import matplotlib.pyplot as plt
+ >>> plt.barh(range(20), s[0])
+ >>> plt.barh(range(20), s[1], left=s[0], color='g')
+ >>> plt.barh(range(20), s[2], left=s[0]+s[1], color='r')
+ >>> plt.title("Lengths of Strings")
+
+ """
+
+ # =================
+ # Pure python algo
+ # =================
+ # alpha = N.atleast_1d(alpha)
+ # k = alpha.size
+
+ # if n == 1:
+ # val = N.zeros(k)
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val)
+ # else:
+ # val = N.zeros((k, n))
+ # for i in range(k):
+ # val[i] = sgamma(alpha[i], n)
+ # val /= N.sum(val, axis = 0)
+ # val = val.T
+ # return val
+
+ cdef np.npy_intp k, totsize, i, j
+ cdef np.ndarray alpha_arr, val_arr
+ cdef double *alpha_data
+ cdef double *val_data
+ cdef double acc, invacc
+
+ k = len(alpha)
+ alpha_arr = <np.ndarray>np.PyArray_FROM_OTF(alpha, np.NPY_DOUBLE, np.NPY_ALIGNED)
+ if np.any(np.less_equal(alpha_arr, 0)):
+ raise ValueError('alpha <= 0')
+ alpha_data = <double*>np.PyArray_DATA(alpha_arr)
+
+ if size is None:
+ shape = (k,)
+ else:
+ try:
+ shape = (operator.index(size), k)
+ except:
+ shape = tuple(size) + (k,)
+
+ diric = np.zeros(shape, np.float64)
+ val_arr = <np.ndarray>diric
+ val_data = <double*>np.PyArray_DATA(val_arr)
+
+ i = 0
+ totsize = np.PyArray_SIZE(val_arr)
+ with self.lock, nogil:
+ while i < totsize:
+ acc = 0.0
+ for j in range(k):
+ val_data[i+j] = legacy_standard_gamma(self._aug_state,
+ alpha_data[j])
+ acc = acc + val_data[i + j]
+ invacc = 1/acc
+ for j in range(k):
+ val_data[i + j] = val_data[i + j] * invacc
+ i = i + k
+
+ return diric
+
+ # Shuffling and permutations:
+ def shuffle(self, object x):
+ """
+ shuffle(x)
+
+ Modify a sequence in-place by shuffling its contents.
+
+ This function only shuffles the array along the first axis of a
+ multi-dimensional array. The order of sub-arrays is changed but
+ their contents remains the same.
+
+ Parameters
+ ----------
+ x : array_like
+ The array or list to be shuffled.
+
+ Returns
+ -------
+ None
+
+ Examples
+ --------
+ >>> arr = np.arange(10)
+ >>> np.random.shuffle(arr)
+ >>> arr
+ [1 7 5 2 9 4 3 6 0 8] # random
+
+ Multi-dimensional arrays are only shuffled along the first axis:
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> np.random.shuffle(arr)
+ >>> arr
+ array([[3, 4, 5], # random
+ [6, 7, 8],
+ [0, 1, 2]])
+
+ """
+ cdef:
+ np.npy_intp i, j, n = len(x), stride, itemsize
+ char* x_ptr
+ char* buf_ptr
+
+ if type(x) is np.ndarray and x.ndim == 1 and x.size:
+ # Fast, statically typed path: shuffle the underlying buffer.
+ # Only for non-empty, 1d objects of class ndarray (subclasses such
+ # as MaskedArrays may not support this approach).
+ x_ptr = <char*><size_t>x.ctypes.data
+ stride = x.strides[0]
+ itemsize = x.dtype.itemsize
+ # As the array x could contain python objects we use a buffer
+ # of bytes for the swaps to avoid leaving one of the objects
+ # within the buffer and erroneously decrementing it's refcount
+ # when the function exits.
+ buf = np.empty(itemsize, dtype=np.int8) # GC'd at function exit
+ buf_ptr = <char*><size_t>buf.ctypes.data
+ with self.lock:
+ # We trick gcc into providing a specialized implementation for
+ # the most common case, yielding a ~33% performance improvement.
+ # Note that apparently, only one branch can ever be specialized.
+ if itemsize == sizeof(np.npy_intp):
+ self._shuffle_raw(n, sizeof(np.npy_intp), stride, x_ptr, buf_ptr)
+ else:
+ self._shuffle_raw(n, itemsize, stride, x_ptr, buf_ptr)
+ elif isinstance(x, np.ndarray) and x.ndim and x.size:
+ buf = np.empty_like(x[0, ...])
+ with self.lock:
+ for i in reversed(range(1, n)):
+ j = random_interval(self._brng, i)
+ if i == j:
+ continue # i == j is not needed and memcpy is undefined.
+ buf[...] = x[j]
+ x[j] = x[i]
+ x[i] = buf
+ else:
+ # Untyped path.
+ with self.lock:
+ for i in reversed(range(1, n)):
+ j = random_interval(self._brng, i)
+ x[i], x[j] = x[j], x[i]
+
+ cdef inline _shuffle_raw(self, np.npy_intp n, np.npy_intp itemsize,
+ np.npy_intp stride, char* data, char* buf):
+ cdef np.npy_intp i, j
+ for i in reversed(range(1, n)):
+ j = random_interval(self._brng, i)
+ string.memcpy(buf, data + j * stride, itemsize)
+ string.memcpy(data + j * stride, data + i * stride, itemsize)
+ string.memcpy(data + i * stride, buf, itemsize)
+
+ def permutation(self, object x):
+ """
+ permutation(x)
+
+ Randomly permute a sequence, or return a permuted range.
+
+ If `x` is a multi-dimensional array, it is only shuffled along its
+ first index.
+
+ Parameters
+ ----------
+ x : int or array_like
+ If `x` is an integer, randomly permute ``np.arange(x)``.
+ If `x` is an array, make a copy and shuffle the elements
+ randomly.
+
+ Returns
+ -------
+ out : ndarray
+ Permuted sequence or array range.
+
+ Examples
+ --------
+ >>> np.random.permutation(10)
+ array([1, 7, 4, 3, 0, 9, 2, 5, 8, 6]) # random
+
+ >>> np.random.permutation([1, 4, 9, 12, 15])
+ array([15, 1, 9, 4, 12]) # random
+
+ >>> arr = np.arange(9).reshape((3, 3))
+ >>> np.random.permutation(arr)
+ array([[6, 7, 8], # random
+ [0, 1, 2],
+ [3, 4, 5]])
+
+ """
+ if isinstance(x, (int, long, np.integer)):
+ arr = np.arange(x)
+ self.shuffle(arr)
+ return arr
+
+ arr = np.asarray(x)
+
+ # shuffle has fast-path for 1-d
+ if arr.ndim == 1:
+ # Return a copy if same memory
+ if np.may_share_memory(arr, x):
+ arr = np.array(arr)
+ self.shuffle(arr)
+ return arr
+
+ # Shuffle index array, dtype to ensure fast path
+ idx = np.arange(arr.shape[0], dtype=np.intp)
+ self.shuffle(idx)
+ return arr[idx]
+
+_rand = RandomState()
+
+beta = _rand.beta
+binomial = _rand.binomial
+bytes = _rand.bytes
+chisquare = _rand.chisquare
+choice = _rand.choice
+dirichlet = _rand.dirichlet
+exponential = _rand.exponential
+f = _rand.f
+gamma = _rand.gamma
+get_state = _rand.get_state
+geometric = _rand.geometric
+gumbel = _rand.gumbel
+hypergeometric = _rand.hypergeometric
+laplace = _rand.laplace
+logistic = _rand.logistic
+lognormal = _rand.lognormal
+logseries = _rand.logseries
+multinomial = _rand.multinomial
+multivariate_normal = _rand.multivariate_normal
+negative_binomial = _rand.negative_binomial
+noncentral_chisquare = _rand.noncentral_chisquare
+noncentral_f = _rand.noncentral_f
+normal = _rand.normal
+pareto = _rand.pareto
+permutation = _rand.permutation
+poisson = _rand.poisson
+power = _rand.power
+rand = _rand.rand
+randint = _rand.randint
+randn = _rand.randn
+random = _rand.random_sample
+random_integers = _rand.random_integers
+random_sample = _rand.random_sample
+ranf = _rand.random_sample
+rayleigh = _rand.rayleigh
+sample = _rand.random_sample
+seed = _rand.seed
+set_state = _rand.set_state
+shuffle = _rand.shuffle
+standard_cauchy = _rand.standard_cauchy
+standard_exponential = _rand.standard_exponential
+standard_gamma = _rand.standard_gamma
+standard_normal = _rand.standard_normal
+standard_t = _rand.standard_t
+triangular = _rand.triangular
+uniform = _rand.uniform
+vonmises = _rand.vonmises
+wald = _rand.wald
+weibull = _rand.weibull
+zipf = _rand.zipf
+
+__all__ = [
+ 'beta',
+ 'binomial',
+ 'bytes',
+ 'chisquare',
+ 'choice',
+ 'dirichlet',
+ 'exponential',
+ 'f',
+ 'gamma',
+ 'geometric',
+ 'get_state',
+ 'gumbel',
+ 'hypergeometric',
+ 'laplace',
+ 'logistic',
+ 'lognormal',
+ 'logseries',
+ 'multinomial',
+ 'multivariate_normal',
+ 'negative_binomial',
+ 'noncentral_chisquare',
+ 'noncentral_f',
+ 'normal',
+ 'pareto',
+ 'permutation',
+ 'poisson',
+ 'power',
+ 'rand',
+ 'randint',
+ 'randn',
+ 'random_integers',
+ 'random_sample',
+ 'rayleigh',
+ 'seed',
+ 'set_state',
+ 'shuffle',
+ 'standard_cauchy',
+ 'standard_exponential',
+ 'standard_gamma',
+ 'standard_normal',
+ 'standard_t',
+ 'triangular',
+ 'uniform',
+ 'vonmises',
+ 'wald',
+ 'weibull',
+ 'zipf',
+ 'RandomState',
+]
+
diff --git a/_randomgen/randomgen/pcg32.pyx b/numpy/random/randomgen/pcg32.pyx
index 5c83b1040..5f2b34807 100644
--- a/_randomgen/randomgen/pcg32.pyx
+++ b/numpy/random/randomgen/pcg32.pyx
@@ -1,16 +1,17 @@
-from __future__ import absolute_import
-
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy
np.import_array()
@@ -24,14 +25,14 @@ cdef extern from "src/pcg32/pcg32.h":
ctypedef pcg_state_setseq_64 pcg32_random_t
struct s_pcg32_state:
- pcg32_random_t *pcg_state
+ pcg32_random_t *pcg_state
ctypedef s_pcg32_state pcg32_state
uint64_t pcg32_next64(pcg32_state *state) nogil
uint32_t pcg32_next32(pcg32_state *state) nogil
double pcg32_next_double(pcg32_state *state) nogil
- void pcg32_jump(pcg32_state *state)
+ void pcg32_jump(pcg32_state *state)
void pcg32_advance_state(pcg32_state *state, uint64_t step)
void pcg32_set_seed(pcg32_state *state, uint64_t seed, uint64_t inc)
@@ -123,12 +124,14 @@ cdef class PCG32:
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None, inc=0):
self.rng_state = <pcg32_state *>malloc(sizeof(pcg32_state))
self.rng_state.pcg_state = <pcg32_random_t *>malloc(sizeof(pcg32_random_t))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed, inc)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &pcg32_uint64
@@ -151,7 +154,8 @@ cdef class PCG32:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -159,42 +163,39 @@ cdef class PCG32:
free(self.rng_state)
free(self._brng)
- def __random_integer(self, bits=64):
+ def random_raw(self, size=None, output=True):
"""
- 64-bit Random Integers from the PRNG
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
Parameters
----------
- bits : {32, 64}
- Number of random bits to return
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
Returns
-------
- rv : int
- Next random value
+ out : uint or ndarray
+ Drawn samples.
Notes
-----
- Testing only
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
"""
- if bits == 64:
- return self._brng.next_uint64(self._brng.state)
- elif bits == 32:
- return self._brng.next_uint32(self._brng.state)
- else:
- raise ValueError('bits must be 32 or 64')
+ return random_raw(self._brng, self.lock, size, output)
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
-
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None, inc=0):
"""
@@ -218,7 +219,7 @@ cdef class PCG32:
ValueError
If seed values are out of range for the PRNG.
"""
- ub = 2 ** 64
+ ub = 2 ** 64
if seed is None:
try:
seed = <np.ndarray>random_entropy(2)
@@ -257,7 +258,7 @@ cdef class PCG32:
"""
return {'brng': self.__class__.__name__,
'state': {'state': self.rng_state.pcg_state.state,
- 'inc':self.rng_state.pcg_state.inc}}
+ 'inc': self.rng_state.pcg_state.inc}}
@state.setter
def state(self, value):
@@ -267,7 +268,7 @@ cdef class PCG32:
if brng != self.__class__.__name__:
raise ValueError('state must be for a {0} '
'PRNG'.format(self.__class__.__name__))
- self.rng_state.pcg_state.state = value['state']['state']
+ self.rng_state.pcg_state.state = value['state']['state']
self.rng_state.pcg_state.inc = value['state']['inc']
def advance(self, delta):
@@ -327,12 +328,12 @@ cdef class PCG32:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -341,25 +342,10 @@ cdef class PCG32:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&pcg32_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&pcg32_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&pcg32_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -380,19 +366,8 @@ cdef class PCG32:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -401,10 +376,10 @@ cdef class PCG32:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the core PRNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/pcg64.pyx b/numpy/random/randomgen/pcg64.pyx
index 44e123d4c..f67d9623c 100644
--- a/_randomgen/randomgen/pcg64.pyx
+++ b/numpy/random/randomgen/pcg64.pyx
@@ -1,31 +1,32 @@
-from __future__ import absolute_import
-
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy
np.import_array()
-IF PCG_EMULATED_MATH==1:
- cdef extern from "src/pcg64/pcg64.h":
-
- ctypedef struct pcg128_t:
- uint64_t high
- uint64_t low
-ELSE:
- cdef extern from "inttypes.h":
- ctypedef unsigned long long __uint128_t
+# IF PCG_EMULATED_MATH==1:
+cdef extern from "src/pcg64/pcg64.h":
- cdef extern from "src/pcg64/pcg64.h":
- ctypedef __uint128_t pcg128_t
+ ctypedef struct pcg128_t:
+ uint64_t high
+ uint64_t low
+# ELSE:
+# cdef extern from "inttypes.h":
+# ctypedef unsigned long long __uint128_t
+#
+# cdef extern from "src/pcg64/pcg64.h":
+# ctypedef __uint128_t pcg128_t
cdef extern from "src/pcg64/pcg64.h":
@@ -36,15 +37,15 @@ cdef extern from "src/pcg64/pcg64.h":
ctypedef pcg_state_setseq_128 pcg64_random_t
struct s_pcg64_state:
- pcg64_random_t *pcg_state
- int has_uint32
- uint32_t uinteger
+ pcg64_random_t *pcg_state
+ int has_uint32
+ uint32_t uinteger
ctypedef s_pcg64_state pcg64_state
uint64_t pcg64_next64(pcg64_state *state) nogil
uint32_t pcg64_next32(pcg64_state *state) nogil
- void pcg64_jump(pcg64_state *state)
+ void pcg64_jump(pcg64_state *state)
void pcg64_advance(pcg64_state *state, uint64_t *step)
void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc)
@@ -134,12 +135,14 @@ cdef class PCG64:
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None, inc=0):
self.rng_state = <pcg64_state *>malloc(sizeof(pcg64_state))
self.rng_state.pcg_state = <pcg64_random_t *>malloc(sizeof(pcg64_random_t))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed, inc)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &pcg64_uint64
@@ -162,7 +165,8 @@ cdef class PCG64:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -174,42 +178,39 @@ cdef class PCG64:
self.rng_state.has_uint32 = 0
self.rng_state.uinteger = 0
- def __random_integer(self, bits=64):
+ def random_raw(self, size=None, output=True):
"""
- 64-bit Random Integers from the RNG
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
Parameters
----------
- bits : {32, 64}
- Number of random bits to return
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
Returns
-------
- rv : int
- Next random value
+ out : uint or ndarray
+ Drawn samples.
Notes
-----
- Testing only
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
"""
- if bits == 64:
- return self._brng.next_uint64(self._brng.state)
- elif bits == 32:
- return self._brng.next_uint32(self._brng.state)
- else:
- raise ValueError('bits must be 32 or 64')
+ return random_raw(self._brng, self.lock, size, output)
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
-
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None, inc=0):
"""
@@ -235,7 +236,7 @@ cdef class PCG64:
"""
cdef np.ndarray _seed, _inc
- ub = 2 ** 128
+ ub = 2 ** 128
if seed is None:
try:
_seed = <np.ndarray>random_entropy(4)
@@ -277,17 +278,18 @@ cdef class PCG64:
Dictionary containing the information required to describe the
state of the RNG
"""
- IF PCG_EMULATED_MATH==1:
- state = 2 **64 * self.rng_state.pcg_state.state.high
- state += self.rng_state.pcg_state.state.low
- inc = 2 **64 * self.rng_state.pcg_state.inc.high
- inc += self.rng_state.pcg_state.inc.low
- ELSE:
- state = self.rng_state.pcg_state.state
- inc = self.rng_state.pcg_state.inc
+ # IF PCG_EMULATED_MATH==1:
+ # TODO: push this into an #ifdef in the C code
+ state = 2 **64 * self.rng_state.pcg_state.state.high
+ state += self.rng_state.pcg_state.state.low
+ inc = 2 **64 * self.rng_state.pcg_state.inc.high
+ inc += self.rng_state.pcg_state.inc.low
+ # ELSE:
+ # state = self.rng_state.pcg_state.state
+ # inc = self.rng_state.pcg_state.inc
return {'brng': self.__class__.__name__,
- 'state': {'state': state, 'inc':inc},
+ 'state': {'state': state, 'inc': inc},
'has_uint32': self.rng_state.has_uint32,
'uinteger': self.rng_state.uinteger}
@@ -299,14 +301,14 @@ cdef class PCG64:
if brng != self.__class__.__name__:
raise ValueError('state must be for a {0} '
'RNG'.format(self.__class__.__name__))
- IF PCG_EMULATED_MATH==1:
- self.rng_state.pcg_state.state.high = value['state']['state'] // 2 ** 64
- self.rng_state.pcg_state.state.low = value['state']['state'] % 2 ** 64
- self.rng_state.pcg_state.inc.high = value['state']['inc'] // 2 ** 64
- self.rng_state.pcg_state.inc.low = value['state']['inc'] % 2 ** 64
- ELSE:
- self.rng_state.pcg_state.state = value['state']['state']
- self.rng_state.pcg_state.inc = value['state']['inc']
+ # IF PCG_EMULATED_MATH==1:
+ self.rng_state.pcg_state.state.high = value['state']['state'] // 2 ** 64
+ self.rng_state.pcg_state.state.low = value['state']['state'] % 2 ** 64
+ self.rng_state.pcg_state.inc.high = value['state']['inc'] // 2 ** 64
+ self.rng_state.pcg_state.inc.low = value['state']['inc'] % 2 ** 64
+ # ELSE:
+ # self.rng_state.pcg_state.state = value['state']['state']
+ # self.rng_state.pcg_state.inc = value['state']['inc']
self.rng_state.has_uint32 = value['has_uint32']
self.rng_state.uinteger = value['uinteger']
@@ -380,12 +382,12 @@ cdef class PCG64:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -394,25 +396,10 @@ cdef class PCG64:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&pcg64_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&pcg64_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&pcg64_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -433,19 +420,8 @@ cdef class PCG64:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -454,10 +430,10 @@ cdef class PCG64:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator using this instance as the core RNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/philox.pyx b/numpy/random/randomgen/philox.pyx
index 26ece5e14..70afd55ab 100644
--- a/_randomgen/randomgen/philox.pyx
+++ b/numpy/random/randomgen/philox.pyx
@@ -1,15 +1,17 @@
-from __future__ import absolute_import
-
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
import numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
+
np.import_array()
@@ -25,14 +27,14 @@ cdef extern from 'src/philox/philox.h':
ctypedef s_r123array4x64 r123array4x64
ctypedef s_r123array2x64 r123array2x64
- ctypedef r123array4x64 philox4x64_ctr_t;
- ctypedef r123array2x64 philox4x64_key_t;
+ ctypedef r123array4x64 philox4x64_ctr_t
+ ctypedef r123array2x64 philox4x64_key_t
struct s_philox_state:
- philox4x64_ctr_t *ctr;
- philox4x64_key_t *key;
- int buffer_pos;
- uint64_t buffer[PHILOX_BUFFER_SIZE];
+ philox4x64_ctr_t *ctr
+ philox4x64_key_t *key
+ int buffer_pos
+ uint64_t buffer[PHILOX_BUFFER_SIZE]
int has_uint32
uint32_t uinteger
@@ -158,12 +160,13 @@ cdef class Philox:
the International Conference for High Performance Computing,
Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
"""
- cdef philox_state *rng_state
+ cdef philox_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None, counter=None, key=None):
self.rng_state = <philox_state *> malloc(sizeof(philox_state))
@@ -173,6 +176,7 @@ cdef class Philox:
sizeof(philox4x64_key_t))
self._brng = <brng_t *> malloc(sizeof(brng_t))
self.seed(seed, counter, key)
+ self.lock = Lock()
self._brng.state = <void *> self.rng_state
self._brng.next_uint64 = &philox_uint64
@@ -195,7 +199,8 @@ cdef class Philox:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -212,16 +217,39 @@ cdef class Philox:
for i in range(PHILOX_BUFFER_SIZE):
self.rng_state.buffer[i] = 0
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
+
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None, counter=None, key=None):
"""
@@ -256,7 +284,7 @@ cdef class Philox:
"""
if seed is not None and key is not None:
raise ValueError('seed and key cannot be both used')
- ub = 2 ** 64
+ ub = 2 ** 64
if key is None:
if seed is None:
try:
@@ -392,14 +420,15 @@ cdef class Philox:
self._reset_state_variables()
return self
+ @property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -408,25 +437,10 @@ cdef class Philox:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&philox_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&philox_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&philox_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -447,19 +461,8 @@ cdef class Philox:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -468,10 +471,10 @@ cdef class Philox:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the core PRNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/numpy/random/randomgen/setup.py b/numpy/random/randomgen/setup.py
new file mode 100644
index 000000000..5b7be4559
--- /dev/null
+++ b/numpy/random/randomgen/setup.py
@@ -0,0 +1,191 @@
+from os.path import join
+import sys
+import os
+import platform
+import struct
+from distutils.dep_util import newer
+from distutils.msvccompiler import get_build_version as get_msvc_build_version
+
+def needs_mingw_ftime_workaround():
+ # We need the mingw workaround for _ftime if the msvc runtime version is
+ # 7.1 or above and we build with mingw ...
+ # ... but we can't easily detect compiler version outside distutils command
+ # context, so we will need to detect in randomkit whether we build with gcc
+ msver = get_msvc_build_version()
+ if msver and msver >= 8:
+ return True
+
+ return False
+
+
+def configuration(parent_package='',top_path=None):
+ from numpy.distutils.misc_util import Configuration, get_mathlibs
+ config = Configuration('randomgen', parent_package, top_path)
+
+ def generate_libraries(ext, build_dir):
+ config_cmd = config.get_config_cmd()
+ libs = get_mathlibs()
+ if sys.platform == 'win32':
+ libs.append('Advapi32')
+ ext.libraries.extend(libs)
+ return None
+
+ # enable unix large file support on 32 bit systems
+ # (64 bit off_t, lseek -> lseek64 etc.)
+ if sys.platform[:3] == "aix":
+ defs = [('_LARGE_FILES', None)]
+ else:
+ defs = [('_FILE_OFFSET_BITS', '64'),
+ ('_LARGEFILE_SOURCE', '1'),
+ ('_LARGEFILE64_SOURCE', '1')]
+ if needs_mingw_ftime_workaround():
+ defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
+
+ libs = []
+ defs.append(('NPY_NO_DEPRECATED_API', 0))
+ config.add_data_dir('tests')
+
+ ##############################
+ # randomgen
+ ##############################
+
+ # Make a guess as to whether SSE2 is present for now, TODO: Improve
+ USE_SSE2 = False
+ for k in platform.uname():
+ for val in ('x86', 'i686', 'i386', 'amd64'):
+ USE_SSE2 = USE_SSE2 or val in k.lower()
+ print('Building with SSE?: {0}'.format(USE_SSE2))
+ if '--no-sse2' in sys.argv:
+ USE_SSE2 = False
+ sys.argv.remove('--no-sse2')
+
+ DEBUG = False
+ PCG_EMULATED_MATH = False
+ EXTRA_LINK_ARGS = []
+ EXTRA_LIBRARIES = ['m'] if os.name != 'nt' else []
+ EXTRA_COMPILE_ARGS = [] if os.name == 'nt' else [
+ '-std=c99', '-U__GNUC_GNU_INLINE__']
+ if os.name == 'nt':
+ EXTRA_LINK_ARGS = ['/LTCG', '/OPT:REF', 'Advapi32.lib', 'Kernel32.lib']
+ if DEBUG:
+ EXTRA_LINK_ARGS += ['-debug']
+ EXTRA_COMPILE_ARGS += ["-Zi", "/Od"]
+ if sys.version_info < (3, 0):
+ EXTRA_INCLUDE_DIRS += [join(MOD_DIR, 'src', 'common')]
+
+ PCG64_DEFS = []
+ # TODO: remove the unconditional forced emulation, move code from pcg64.pyx
+ # to an #ifdef
+ if 1 or sys.maxsize < 2 ** 32 or os.name == 'nt':
+ # Force emulated mode here
+ PCG_EMULATED_MATH = True
+ PCG64_DEFS += [('PCG_FORCE_EMULATED_128BIT_MATH', '1')]
+
+ if struct.calcsize('P') < 8:
+ PCG_EMULATED_MATH = True
+ defs.append(('PCG_EMULATED_MATH', int(PCG_EMULATED_MATH)))
+
+ DSFMT_DEFS = [('DSFMT_MEXP', '19937')]
+ if USE_SSE2:
+ if os.name == 'nt':
+ EXTRA_COMPILE_ARGS += ['/wd4146', '/GL']
+ if struct.calcsize('P') < 8:
+ EXTRA_COMPILE_ARGS += ['/arch:SSE2']
+ else:
+ EXTRA_COMPILE_ARGS += ['-msse2']
+ DSFMT_DEFS += [('HAVE_SSE2', '1')]
+
+ config.add_extension('entropy',
+ sources=['entropy.c', 'src/entropy/entropy.c'],
+ include_dirs=[join('randomgen', 'src', 'entropy')],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=[join('src', 'splitmix64', 'splitmix.h'),
+ join('src', 'entropy', 'entropy.h'),
+ 'entropy.pyx',
+ ],
+ define_macros=defs,
+ )
+ config.add_extension('dsfmt',
+ sources=['dsfmt.c', 'src/dsfmt/dSFMT.c',
+ 'src/dsfmt/dSFMT-jump.c',
+ 'src/aligned_malloc/aligned_malloc.c'],
+ include_dirs=[join('src', 'dsfmt')],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=[join('src', 'dsfmt', 'dsfmt.h'),
+ 'dsfmt.pyx',
+ ],
+ define_macros=defs + DSFMT_DEFS,
+ )
+ for gen in ['mt19937']:
+ # gen.pyx, src/gen/gen.c, src/gen/gen-jump.c
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen), 'src/{0}/{0}.c'.format(gen),
+ 'src/{0}/{0}-jump.c'.format(gen)],
+ include_dirs=[join('src', gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen],
+ define_macros=defs,
+ )
+ for gen in ['philox', 'threefry', 'threefry32',
+ 'xoroshiro128', 'xorshift1024', 'xoshiro256starstar',
+ 'xoshiro512starstar',
+ 'pcg64', 'pcg32',
+ ]:
+ # gen.pyx, src/gen/gen.c
+ if gen == 'pcg64':
+ _defs = defs + PCG64_DEFS
+ else:
+ _defs = defs
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen), 'src/{0}/{0}.c'.format(gen)],
+ include_dirs=[join('src', gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen],
+ define_macros=_defs,
+ )
+ for gen in ['common']:
+ # gen.pyx
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen)],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen],
+ define_macros=defs,
+ )
+ for gen in ['generator', 'bounded_integers']:
+ # gen.pyx, src/distributions/distributions.c
+ config.add_extension(gen,
+ sources=['{0}.c'.format(gen),
+ join('src', 'distributions',
+ 'distributions.c')],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['%s.pyx' % gen],
+ define_macros=defs,
+ )
+ config.add_extension('mtrand',
+ sources=['mtrand.c',
+ 'src/legacy/distributions-boxmuller.c',
+ 'src/distributions/distributions.c' ],
+ include_dirs=['.', 'legacy'],
+ libraries=EXTRA_LIBRARIES,
+ extra_compile_args=EXTRA_COMPILE_ARGS,
+ extra_link_args=EXTRA_LINK_ARGS,
+ depends=['mtrand.pyx'],
+ define_macros=defs + DSFMT_DEFS,
+ )
+ config.add_subpackage('legacy')
+ return config
+if __name__ == '__main__':
+ from numpy.distutils.core import setup
+ setup(configuration=configuration)
diff --git a/_randomgen/randomgen/src/aligned_malloc/aligned_malloc.c b/numpy/random/randomgen/src/aligned_malloc/aligned_malloc.c
index 6e8192cfb..6e8192cfb 100644
--- a/_randomgen/randomgen/src/aligned_malloc/aligned_malloc.c
+++ b/numpy/random/randomgen/src/aligned_malloc/aligned_malloc.c
diff --git a/_randomgen/randomgen/src/aligned_malloc/aligned_malloc.h b/numpy/random/randomgen/src/aligned_malloc/aligned_malloc.h
index ea24f6d23..ea24f6d23 100644
--- a/_randomgen/randomgen/src/aligned_malloc/aligned_malloc.h
+++ b/numpy/random/randomgen/src/aligned_malloc/aligned_malloc.h
diff --git a/_randomgen/randomgen/src/common/LICENSE.md b/numpy/random/randomgen/src/common/LICENSE.md
index 71bf8cf46..71bf8cf46 100644
--- a/_randomgen/randomgen/src/common/LICENSE.md
+++ b/numpy/random/randomgen/src/common/LICENSE.md
diff --git a/_randomgen/randomgen/src/common/inttypes.h b/numpy/random/randomgen/src/common/inttypes.h
index 8f8b61108..8f8b61108 100644
--- a/_randomgen/randomgen/src/common/inttypes.h
+++ b/numpy/random/randomgen/src/common/inttypes.h
diff --git a/_randomgen/randomgen/src/common/stdint.h b/numpy/random/randomgen/src/common/stdint.h
index 710de1570..710de1570 100644
--- a/_randomgen/randomgen/src/common/stdint.h
+++ b/numpy/random/randomgen/src/common/stdint.h
diff --git a/_randomgen/randomgen/src/legacy/LICENSE.md b/numpy/random/randomgen/src/distributions/LICENSE.md
index 88b1791b2..31576ba4b 100644
--- a/_randomgen/randomgen/src/legacy/LICENSE.md
+++ b/numpy/random/randomgen/src/distributions/LICENSE.md
@@ -1,3 +1,5 @@
+## NumPy
+
Copyright (c) 2005-2017, NumPy Developers.
All rights reserved.
@@ -28,3 +30,32 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+## Julia
+
+The ziggurat methods were derived from Julia.
+
+Copyright (c) 2009-2019: Jeff Bezanson, Stefan Karpinski, Viral B. Shah,
+and other contributors:
+
+https://github.com/JuliaLang/julia/contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/_randomgen/randomgen/src/distributions/binomial.h b/numpy/random/randomgen/src/distributions/binomial.h
index e69de29bb..e69de29bb 100644
--- a/_randomgen/randomgen/src/distributions/binomial.h
+++ b/numpy/random/randomgen/src/distributions/binomial.h
diff --git a/_randomgen/randomgen/src/distributions/distributions.c b/numpy/random/randomgen/src/distributions/distributions.c
index a9d8a308d..83806de38 100644
--- a/_randomgen/randomgen/src/distributions/distributions.c
+++ b/numpy/random/randomgen/src/distributions/distributions.c
@@ -41,7 +41,7 @@ void random_double_fill(brng_t *brng_state, npy_intp cnt, double *out) {
out[i] = next_double(brng_state);
}
}
-/*
+#if 0
double random_gauss(brng_t *brng_state) {
if (brng_state->has_gauss) {
const double temp = brng_state->gauss;
@@ -57,9 +57,9 @@ double random_gauss(brng_t *brng_state) {
r2 = x1 * x1 + x2 * x2;
} while (r2 >= 1.0 || r2 == 0.0);
- /* Polar method, a more efficient version of the Box-Muller approach.
+ /* Polar method, a more efficient version of the Box-Muller approach. */
f = sqrt(-2.0 * log(r2) / r2);
- /* Keep for next call
+ /* Keep for next call */
brng_state->gauss = f * x1;
brng_state->has_gauss = true;
return f * x2;
@@ -81,15 +81,15 @@ float random_gauss_f(brng_t *brng_state) {
r2 = x1 * x1 + x2 * x2;
} while (r2 >= 1.0 || r2 == 0.0);
- /* Polar method, a more efficient version of the Box-Muller approach.
+ /* Polar method, a more efficient version of the Box-Muller approach. */
f = sqrtf(-2.0f * logf(r2) / r2);
- /* Keep for next call
+ /* Keep for next call */
brng_state->gauss_f = f * x1;
brng_state->has_gauss_f = true;
return f * x2;
}
}
-*/
+#endif
static NPY_INLINE double standard_exponential_zig(brng_t *brng_state);
@@ -879,6 +879,9 @@ int64_t random_binomial(brng_t *brng_state, double p, int64_t n,
binomial_t *binomial) {
double q;
+ if ((n == 0LL) || (p == 0.0f))
+ return 0;
+
if (p <= 0.5) {
if (p * n <= 30.0) {
return random_binomial_inversion(brng_state, n, p, binomial);
@@ -896,6 +899,9 @@ int64_t random_binomial(brng_t *brng_state, double p, int64_t n,
}
double random_noncentral_chisquare(brng_t *brng_state, double df, double nonc) {
+ if (npy_isnan(nonc)){
+ return NPY_NAN;
+ }
if (nonc == 0) {
return random_chisquare(brng_state, df);
}
@@ -936,7 +942,9 @@ double random_vonmises(brng_t *brng_state, double mu, double kappa) {
double U, V, W, Y, Z;
double result, mod;
int neg;
-
+ if (npy_isnan(kappa)){
+ return NPY_NAN;
+ }
if (kappa < 1e-8) {
return M_PI * (2 * next_double(brng_state) - 1);
} else {
diff --git a/_randomgen/randomgen/src/distributions/distributions.h b/numpy/random/randomgen/src/distributions/distributions.h
index 5cf9c72b2..7ca31a16c 100644
--- a/_randomgen/randomgen/src/distributions/distributions.h
+++ b/numpy/random/randomgen/src/distributions/distributions.h
@@ -20,7 +20,7 @@ typedef int bool;
#include "Python.h"
#include "numpy/npy_common.h"
-#include <math.h>
+#include "numpy/npy_math.h"
#ifdef _WIN32
#if _MSC_VER == 1500
diff --git a/_randomgen/randomgen/src/distributions/ziggurat.h b/numpy/random/randomgen/src/distributions/ziggurat.h
index 7808c0e68..7808c0e68 100644
--- a/_randomgen/randomgen/src/distributions/ziggurat.h
+++ b/numpy/random/randomgen/src/distributions/ziggurat.h
diff --git a/_randomgen/randomgen/src/distributions/ziggurat_constants.h b/numpy/random/randomgen/src/distributions/ziggurat_constants.h
index 17eccec0f..17eccec0f 100644
--- a/_randomgen/randomgen/src/distributions/ziggurat_constants.h
+++ b/numpy/random/randomgen/src/distributions/ziggurat_constants.h
diff --git a/_randomgen/randomgen/src/dsfmt/128-bit-jump.poly.txt b/numpy/random/randomgen/src/dsfmt/128-bit-jump.poly.txt
index fea1318fb..fea1318fb 100644
--- a/_randomgen/randomgen/src/dsfmt/128-bit-jump.poly.txt
+++ b/numpy/random/randomgen/src/dsfmt/128-bit-jump.poly.txt
diff --git a/_randomgen/randomgen/src/dsfmt/96-bit-jump.poly.txt b/numpy/random/randomgen/src/dsfmt/96-bit-jump.poly.txt
index 15c68d155..15c68d155 100644
--- a/_randomgen/randomgen/src/dsfmt/96-bit-jump.poly.txt
+++ b/numpy/random/randomgen/src/dsfmt/96-bit-jump.poly.txt
diff --git a/_randomgen/randomgen/src/dsfmt/LICENSE.md b/numpy/random/randomgen/src/dsfmt/LICENSE.md
index d59568f6b..d59568f6b 100644
--- a/_randomgen/randomgen/src/dsfmt/LICENSE.md
+++ b/numpy/random/randomgen/src/dsfmt/LICENSE.md
diff --git a/_randomgen/randomgen/src/dsfmt/calc-jump.cpp b/numpy/random/randomgen/src/dsfmt/calc-jump.cpp
index 495b2797c..495b2797c 100644
--- a/_randomgen/randomgen/src/dsfmt/calc-jump.cpp
+++ b/numpy/random/randomgen/src/dsfmt/calc-jump.cpp
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-benchmark.c b/numpy/random/randomgen/src/dsfmt/dSFMT-benchmark.c
index af29d0e1f..af29d0e1f 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-benchmark.c
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-benchmark.c
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-calc-jump.hpp b/numpy/random/randomgen/src/dsfmt/dSFMT-calc-jump.hpp
index b960826be..b960826be 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-calc-jump.hpp
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-calc-jump.hpp
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-common.h b/numpy/random/randomgen/src/dsfmt/dSFMT-common.h
index 30c26c08b..30c26c08b 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-common.h
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-common.h
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-jump.c b/numpy/random/randomgen/src/dsfmt/dSFMT-jump.c
index 1832bb885..1832bb885 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-jump.c
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-jump.c
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-jump.h b/numpy/random/randomgen/src/dsfmt/dSFMT-jump.h
index 689f9499a..689f9499a 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-jump.h
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-jump.h
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-params.h b/numpy/random/randomgen/src/dsfmt/dSFMT-params.h
index aa0247800..aa0247800 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-params.h
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-params.h
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-params19937.h b/numpy/random/randomgen/src/dsfmt/dSFMT-params19937.h
index a600b0dbc..a600b0dbc 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-params19937.h
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-params19937.h
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-poly.h b/numpy/random/randomgen/src/dsfmt/dSFMT-poly.h
index f8e15c3eb..f8e15c3eb 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-poly.h
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-poly.h
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT-test-gen.c b/numpy/random/randomgen/src/dsfmt/dSFMT-test-gen.c
index 697a3010a..697a3010a 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT-test-gen.c
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT-test-gen.c
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT.c b/numpy/random/randomgen/src/dsfmt/dSFMT.c
index 0f122c26c..0f122c26c 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT.c
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT.c
diff --git a/_randomgen/randomgen/src/dsfmt/dSFMT.h b/numpy/random/randomgen/src/dsfmt/dSFMT.h
index 224d0108f..224d0108f 100644
--- a/_randomgen/randomgen/src/dsfmt/dSFMT.h
+++ b/numpy/random/randomgen/src/dsfmt/dSFMT.h
diff --git a/_randomgen/randomgen/src/entropy/LICENSE.md b/numpy/random/randomgen/src/entropy/LICENSE.md
index b7276aad7..b7276aad7 100644
--- a/_randomgen/randomgen/src/entropy/LICENSE.md
+++ b/numpy/random/randomgen/src/entropy/LICENSE.md
diff --git a/_randomgen/randomgen/src/entropy/entropy.c b/numpy/random/randomgen/src/entropy/entropy.c
index ead4bef83..ead4bef83 100644
--- a/_randomgen/randomgen/src/entropy/entropy.c
+++ b/numpy/random/randomgen/src/entropy/entropy.c
diff --git a/_randomgen/randomgen/src/entropy/entropy.h b/numpy/random/randomgen/src/entropy/entropy.h
index 785603dd3..785603dd3 100644
--- a/_randomgen/randomgen/src/entropy/entropy.h
+++ b/numpy/random/randomgen/src/entropy/entropy.h
diff --git a/_randomgen/randomgen/src/distributions/LICENSE.md b/numpy/random/randomgen/src/legacy/LICENSE.md
index 88b1791b2..88b1791b2 100644
--- a/_randomgen/randomgen/src/distributions/LICENSE.md
+++ b/numpy/random/randomgen/src/legacy/LICENSE.md
diff --git a/_randomgen/randomgen/src/legacy/distributions-boxmuller.c b/numpy/random/randomgen/src/legacy/distributions-boxmuller.c
index 768de066c..5d3ba27f8 100644
--- a/_randomgen/randomgen/src/legacy/distributions-boxmuller.c
+++ b/numpy/random/randomgen/src/legacy/distributions-boxmuller.c
@@ -103,6 +103,7 @@ double legacy_chisquare(aug_brng_t *aug_state, double df) {
double legacy_noncentral_chisquare(aug_brng_t *aug_state, double df,
double nonc) {
+ double out;
if (nonc == 0) {
return legacy_chisquare(aug_state, df);
}
@@ -112,7 +113,13 @@ double legacy_noncentral_chisquare(aug_brng_t *aug_state, double df,
return Chi2 + n * n;
} else {
const long i = random_poisson(aug_state->basicrng, nonc / 2.0);
- return legacy_chisquare(aug_state, df + 2 * i);
+ out = legacy_chisquare(aug_state, df + 2 * i);
+ /* Insert nan guard here to avoid changing the stream */
+ if (npy_isnan(nonc)){
+ return NPY_NAN;
+ } else {
+ return out;
+ }
}
}
diff --git a/_randomgen/randomgen/src/legacy/distributions-boxmuller.h b/numpy/random/randomgen/src/legacy/distributions-boxmuller.h
index 445686e6c..445686e6c 100644
--- a/_randomgen/randomgen/src/legacy/distributions-boxmuller.h
+++ b/numpy/random/randomgen/src/legacy/distributions-boxmuller.h
diff --git a/_randomgen/randomgen/src/mt19937/LICENSE.md b/numpy/random/randomgen/src/mt19937/LICENSE.md
index f65c3d46e..f65c3d46e 100644
--- a/_randomgen/randomgen/src/mt19937/LICENSE.md
+++ b/numpy/random/randomgen/src/mt19937/LICENSE.md
diff --git a/_randomgen/randomgen/src/mt19937/mt19937-benchmark.c b/numpy/random/randomgen/src/mt19937/mt19937-benchmark.c
index 039f8030a..039f8030a 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937-benchmark.c
+++ b/numpy/random/randomgen/src/mt19937/mt19937-benchmark.c
diff --git a/_randomgen/randomgen/src/mt19937/mt19937-jump.c b/numpy/random/randomgen/src/mt19937/mt19937-jump.c
index 46b28cf96..46b28cf96 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937-jump.c
+++ b/numpy/random/randomgen/src/mt19937/mt19937-jump.c
diff --git a/_randomgen/randomgen/src/mt19937/mt19937-jump.h b/numpy/random/randomgen/src/mt19937/mt19937-jump.h
index 394c150a0..394c150a0 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937-jump.h
+++ b/numpy/random/randomgen/src/mt19937/mt19937-jump.h
diff --git a/_randomgen/randomgen/src/mt19937/mt19937-poly.h b/numpy/random/randomgen/src/mt19937/mt19937-poly.h
index b03747881..b03747881 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937-poly.h
+++ b/numpy/random/randomgen/src/mt19937/mt19937-poly.h
diff --git a/_randomgen/randomgen/src/mt19937/mt19937-test-data-gen.c b/numpy/random/randomgen/src/mt19937/mt19937-test-data-gen.c
index 4f4ec1d64..4f4ec1d64 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937-test-data-gen.c
+++ b/numpy/random/randomgen/src/mt19937/mt19937-test-data-gen.c
diff --git a/_randomgen/randomgen/src/mt19937/mt19937.c b/numpy/random/randomgen/src/mt19937/mt19937.c
index e5ca9e0cf..e5ca9e0cf 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937.c
+++ b/numpy/random/randomgen/src/mt19937/mt19937.c
diff --git a/_randomgen/randomgen/src/mt19937/mt19937.h b/numpy/random/randomgen/src/mt19937/mt19937.h
index 8105329ec..8105329ec 100644
--- a/_randomgen/randomgen/src/mt19937/mt19937.h
+++ b/numpy/random/randomgen/src/mt19937/mt19937.h
diff --git a/_randomgen/randomgen/src/mt19937/randomkit.c b/numpy/random/randomgen/src/mt19937/randomkit.c
index f8ed4b49e..f8ed4b49e 100644
--- a/_randomgen/randomgen/src/mt19937/randomkit.c
+++ b/numpy/random/randomgen/src/mt19937/randomkit.c
diff --git a/_randomgen/randomgen/src/mt19937/randomkit.h b/numpy/random/randomgen/src/mt19937/randomkit.h
index abb082cb2..abb082cb2 100644
--- a/_randomgen/randomgen/src/mt19937/randomkit.h
+++ b/numpy/random/randomgen/src/mt19937/randomkit.h
diff --git a/_randomgen/randomgen/src/pcg32/LICENSE.md b/numpy/random/randomgen/src/pcg32/LICENSE.md
index 3db2ac2e8..3db2ac2e8 100644
--- a/_randomgen/randomgen/src/pcg32/LICENSE.md
+++ b/numpy/random/randomgen/src/pcg32/LICENSE.md
diff --git a/_randomgen/randomgen/src/pcg32/pcg-advance-64.c b/numpy/random/randomgen/src/pcg32/pcg-advance-64.c
index 8210e7565..8210e7565 100644
--- a/_randomgen/randomgen/src/pcg32/pcg-advance-64.c
+++ b/numpy/random/randomgen/src/pcg32/pcg-advance-64.c
diff --git a/_randomgen/randomgen/src/pcg32/pcg32-test-data-gen.c b/numpy/random/randomgen/src/pcg32/pcg32-test-data-gen.c
index cccaf84b9..cccaf84b9 100644
--- a/_randomgen/randomgen/src/pcg32/pcg32-test-data-gen.c
+++ b/numpy/random/randomgen/src/pcg32/pcg32-test-data-gen.c
diff --git a/_randomgen/randomgen/src/pcg32/pcg32.c b/numpy/random/randomgen/src/pcg32/pcg32.c
index 5fbf6759f..5fbf6759f 100644
--- a/_randomgen/randomgen/src/pcg32/pcg32.c
+++ b/numpy/random/randomgen/src/pcg32/pcg32.c
diff --git a/_randomgen/randomgen/src/pcg32/pcg32.h b/numpy/random/randomgen/src/pcg32/pcg32.h
index 15410bd82..557113d8f 100644
--- a/_randomgen/randomgen/src/pcg32/pcg32.h
+++ b/numpy/random/randomgen/src/pcg32/pcg32.h
@@ -1,3 +1,5 @@
+#ifndef _RANDOMDGEN__PCG32_H_
+#define _RANDOMDGEN__PCG32_H_
#ifdef _WIN32
#ifndef _INTTYPES
@@ -83,3 +85,5 @@ static inline double pcg32_next_double(pcg32_state *state) {
void pcg32_advance_state(pcg32_state *state, uint64_t step);
void pcg32_set_seed(pcg32_state *state, uint64_t seed, uint64_t inc);
+
+#endif
diff --git a/_randomgen/randomgen/src/pcg32/pcg_variants.h b/numpy/random/randomgen/src/pcg32/pcg_variants.h
index 32daac1ce..32daac1ce 100644
--- a/_randomgen/randomgen/src/pcg32/pcg_variants.h
+++ b/numpy/random/randomgen/src/pcg32/pcg_variants.h
diff --git a/_randomgen/randomgen/src/pcg64/LICENSE.md b/numpy/random/randomgen/src/pcg64/LICENSE.md
index dd6a17ee8..dd6a17ee8 100644
--- a/_randomgen/randomgen/src/pcg64/LICENSE.md
+++ b/numpy/random/randomgen/src/pcg64/LICENSE.md
diff --git a/_randomgen/randomgen/src/pcg64/pcg64-benchmark.c b/numpy/random/randomgen/src/pcg64/pcg64-benchmark.c
index 76f3ec78c..76f3ec78c 100644
--- a/_randomgen/randomgen/src/pcg64/pcg64-benchmark.c
+++ b/numpy/random/randomgen/src/pcg64/pcg64-benchmark.c
diff --git a/_randomgen/randomgen/src/pcg64/pcg64-test-data-gen.c b/numpy/random/randomgen/src/pcg64/pcg64-test-data-gen.c
index 0c2b079a3..0c2b079a3 100644
--- a/_randomgen/randomgen/src/pcg64/pcg64-test-data-gen.c
+++ b/numpy/random/randomgen/src/pcg64/pcg64-test-data-gen.c
diff --git a/_randomgen/randomgen/src/pcg64/pcg64.c b/numpy/random/randomgen/src/pcg64/pcg64.c
index c7c1eb045..c7c1eb045 100644
--- a/_randomgen/randomgen/src/pcg64/pcg64.c
+++ b/numpy/random/randomgen/src/pcg64/pcg64.c
diff --git a/_randomgen/randomgen/src/pcg64/pcg64.h b/numpy/random/randomgen/src/pcg64/pcg64.h
index 854930176..156c73a36 100644
--- a/_randomgen/randomgen/src/pcg64/pcg64.h
+++ b/numpy/random/randomgen/src/pcg64/pcg64.h
@@ -212,8 +212,6 @@ typedef pcg_state_setseq_128 pcg64_random_t;
}
#endif
-#endif /* PCG64_H_INCLUDED */
-
typedef struct s_pcg64_state {
pcg64_random_t *pcg_state;
int has_uint32;
@@ -239,3 +237,5 @@ static inline uint32_t pcg64_next32(pcg64_state *state) {
void pcg64_advance(pcg64_state *state, uint64_t *step);
void pcg64_set_seed(pcg64_state *state, uint64_t *seed, uint64_t *inc);
+
+#endif /* PCG64_H_INCLUDED */
diff --git a/_randomgen/randomgen/src/pcg64/pcg64.orig.c b/numpy/random/randomgen/src/pcg64/pcg64.orig.c
index 07e97e4b6..07e97e4b6 100644
--- a/_randomgen/randomgen/src/pcg64/pcg64.orig.c
+++ b/numpy/random/randomgen/src/pcg64/pcg64.orig.c
diff --git a/_randomgen/randomgen/src/pcg64/pcg64.orig.h b/numpy/random/randomgen/src/pcg64/pcg64.orig.h
index 74be91f31..74be91f31 100644
--- a/_randomgen/randomgen/src/pcg64/pcg64.orig.h
+++ b/numpy/random/randomgen/src/pcg64/pcg64.orig.h
diff --git a/_randomgen/randomgen/src/philox/LICENSE.md b/numpy/random/randomgen/src/philox/LICENSE.md
index 4a9f6bb29..4a9f6bb29 100644
--- a/_randomgen/randomgen/src/philox/LICENSE.md
+++ b/numpy/random/randomgen/src/philox/LICENSE.md
diff --git a/_randomgen/randomgen/src/philox/philox-benchmark.c b/numpy/random/randomgen/src/philox/philox-benchmark.c
index 0cab04cf5..0cab04cf5 100644
--- a/_randomgen/randomgen/src/philox/philox-benchmark.c
+++ b/numpy/random/randomgen/src/philox/philox-benchmark.c
diff --git a/_randomgen/randomgen/src/philox/philox-test-data-gen.c b/numpy/random/randomgen/src/philox/philox-test-data-gen.c
index 442e18b55..442e18b55 100644
--- a/_randomgen/randomgen/src/philox/philox-test-data-gen.c
+++ b/numpy/random/randomgen/src/philox/philox-test-data-gen.c
diff --git a/_randomgen/randomgen/src/philox/philox.c b/numpy/random/randomgen/src/philox/philox.c
index 3382c60d6..3382c60d6 100644
--- a/_randomgen/randomgen/src/philox/philox.c
+++ b/numpy/random/randomgen/src/philox/philox.c
diff --git a/_randomgen/randomgen/src/philox/philox.h b/numpy/random/randomgen/src/philox/philox.h
index 411404b55..411404b55 100644
--- a/_randomgen/randomgen/src/philox/philox.h
+++ b/numpy/random/randomgen/src/philox/philox.h
diff --git a/_randomgen/randomgen/src/splitmix64/LICENSE.md b/numpy/random/randomgen/src/splitmix64/LICENSE.md
index 3c4d73b92..3c4d73b92 100644
--- a/_randomgen/randomgen/src/splitmix64/LICENSE.md
+++ b/numpy/random/randomgen/src/splitmix64/LICENSE.md
diff --git a/_randomgen/randomgen/src/splitmix64/splitmix64.c b/numpy/random/randomgen/src/splitmix64/splitmix64.c
index 79a845982..79a845982 100644
--- a/_randomgen/randomgen/src/splitmix64/splitmix64.c
+++ b/numpy/random/randomgen/src/splitmix64/splitmix64.c
diff --git a/_randomgen/randomgen/src/splitmix64/splitmix64.h b/numpy/random/randomgen/src/splitmix64/splitmix64.h
index 880132970..880132970 100644
--- a/_randomgen/randomgen/src/splitmix64/splitmix64.h
+++ b/numpy/random/randomgen/src/splitmix64/splitmix64.h
diff --git a/_randomgen/randomgen/src/splitmix64/splitmix64.orig.c b/numpy/random/randomgen/src/splitmix64/splitmix64.orig.c
index df6133aab..df6133aab 100644
--- a/_randomgen/randomgen/src/splitmix64/splitmix64.orig.c
+++ b/numpy/random/randomgen/src/splitmix64/splitmix64.orig.c
diff --git a/_randomgen/randomgen/src/threefry/LICENSE.md b/numpy/random/randomgen/src/threefry/LICENSE.md
index 4a9f6bb29..4a9f6bb29 100644
--- a/_randomgen/randomgen/src/threefry/LICENSE.md
+++ b/numpy/random/randomgen/src/threefry/LICENSE.md
diff --git a/_randomgen/randomgen/src/threefry/threefry-benchmark.c b/numpy/random/randomgen/src/threefry/threefry-benchmark.c
index 6d6239cd3..6d6239cd3 100644
--- a/_randomgen/randomgen/src/threefry/threefry-benchmark.c
+++ b/numpy/random/randomgen/src/threefry/threefry-benchmark.c
diff --git a/_randomgen/randomgen/src/threefry/threefry-orig.c b/numpy/random/randomgen/src/threefry/threefry-orig.c
index d27cfd797..d27cfd797 100644
--- a/_randomgen/randomgen/src/threefry/threefry-orig.c
+++ b/numpy/random/randomgen/src/threefry/threefry-orig.c
diff --git a/_randomgen/randomgen/src/threefry/threefry-test-data-gen.c b/numpy/random/randomgen/src/threefry/threefry-test-data-gen.c
index 328eb2575..328eb2575 100644
--- a/_randomgen/randomgen/src/threefry/threefry-test-data-gen.c
+++ b/numpy/random/randomgen/src/threefry/threefry-test-data-gen.c
diff --git a/_randomgen/randomgen/src/threefry/threefry.c b/numpy/random/randomgen/src/threefry/threefry.c
index 19c37df1b..19c37df1b 100644
--- a/_randomgen/randomgen/src/threefry/threefry.c
+++ b/numpy/random/randomgen/src/threefry/threefry.c
diff --git a/_randomgen/randomgen/src/threefry/threefry.h b/numpy/random/randomgen/src/threefry/threefry.h
index 297c1241a..297c1241a 100644
--- a/_randomgen/randomgen/src/threefry/threefry.h
+++ b/numpy/random/randomgen/src/threefry/threefry.h
diff --git a/_randomgen/randomgen/src/threefry32/LICENSE.md b/numpy/random/randomgen/src/threefry32/LICENSE.md
index 591cd75f4..591cd75f4 100644
--- a/_randomgen/randomgen/src/threefry32/LICENSE.md
+++ b/numpy/random/randomgen/src/threefry32/LICENSE.md
diff --git a/_randomgen/randomgen/src/threefry32/threefry32-test-data-gen.c b/numpy/random/randomgen/src/threefry32/threefry32-test-data-gen.c
index 0e6229995..0e6229995 100644
--- a/_randomgen/randomgen/src/threefry32/threefry32-test-data-gen.c
+++ b/numpy/random/randomgen/src/threefry32/threefry32-test-data-gen.c
diff --git a/_randomgen/randomgen/src/threefry32/threefry32.c b/numpy/random/randomgen/src/threefry32/threefry32.c
index 500e9482d..500e9482d 100644
--- a/_randomgen/randomgen/src/threefry32/threefry32.c
+++ b/numpy/random/randomgen/src/threefry32/threefry32.c
diff --git a/_randomgen/randomgen/src/threefry32/threefry32.h b/numpy/random/randomgen/src/threefry32/threefry32.h
index 74a85c42b..74a85c42b 100644
--- a/_randomgen/randomgen/src/threefry32/threefry32.h
+++ b/numpy/random/randomgen/src/threefry32/threefry32.h
diff --git a/_randomgen/randomgen/src/xoroshiro128/LICENSE.md b/numpy/random/randomgen/src/xoroshiro128/LICENSE.md
index 969430149..969430149 100644
--- a/_randomgen/randomgen/src/xoroshiro128/LICENSE.md
+++ b/numpy/random/randomgen/src/xoroshiro128/LICENSE.md
diff --git a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128-benchmark.c b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128-benchmark.c
index 108058eeb..9a7b52bfb 100644
--- a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128-benchmark.c
+++ b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128-benchmark.c
@@ -14,14 +14,16 @@
#define N 1000000000
-int main() {
+int main()
+{
uint64_t count = 0, sum = 0;
uint64_t seed = 0xDEADBEAF;
s[0] = splitmix64_next(&seed);
s[1] = splitmix64_next(&seed);
int i;
clock_t begin = clock();
- for (i = 0; i < N; i++) {
+ for (i = 0; i < N; i++)
+ {
sum += next();
count++;
}
diff --git a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128-test-data-gen.c b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128-test-data-gen.c
index d95260eca..d50e63f5e 100644
--- a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128-test-data-gen.c
+++ b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128-test-data-gen.c
@@ -21,50 +21,61 @@
#define N 1000
-int main() {
+int main()
+{
uint64_t sum = 0;
uint64_t state, seed = 0xDEADBEAF;
state = seed;
int i;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 2; i++)
+ {
s[i] = splitmix64_next(&state);
}
uint64_t store[N];
- for (i = 0; i < N; i++) {
+ for (i = 0; i < N; i++)
+ {
store[i] = next();
}
FILE *fp;
fp = fopen("xoroshiro128-testset-1.csv", "w");
- if (fp == NULL) {
+ if (fp == NULL)
+ {
printf("Couldn't open file\n");
return -1;
}
fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
- for (i = 0; i < N; i++) {
+ for (i = 0; i < N; i++)
+ {
fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
- if (i == 999) {
+ if (i == 999)
+ {
printf("%d, 0x%" PRIx64 "\n", i, store[i]);
}
}
fclose(fp);
seed = state = 0;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < 2; i++)
+ {
s[i] = splitmix64_next(&state);
}
- for (i = 0; i < N; i++) {
+ for (i = 0; i < N; i++)
+ {
store[i] = next();
}
fp = fopen("xoroshiro128-testset-2.csv", "w");
- if (fp == NULL) {
+ if (fp == NULL)
+ {
printf("Couldn't open file\n");
return -1;
}
fprintf(fp, "seed, 0x%" PRIx64 "\n", seed);
- for (i = 0; i < N; i++) {
+ for (i = 0; i < N; i++)
+ {
fprintf(fp, "%d, 0x%" PRIx64 "\n", i, store[i]);
- if (i == 999) {
+ if (i == 999)
+ {
printf("%d, 0x%" PRIx64 "\n", i, store[i]);
}
}
diff --git a/numpy/random/randomgen/src/xoroshiro128/xoroshiro128.c b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128.c
new file mode 100644
index 000000000..060eb8a51
--- /dev/null
+++ b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128.c
@@ -0,0 +1,60 @@
+/* Written in 2016-2018 by David Blackman and Sebastiano Vigna (vigna@acm.org)
+
+To the extent possible under law, the author has dedicated all copyright
+and related and neighboring rights to this software to the public domain
+worldwide. This software is distributed without any warranty.
+
+See <http://creativecommons.org/publicdomain/zero/1.0/>. */
+
+/* This is xoroshiro128+ 1.0, our best and fastest small-state generator
+ for floating-point numbers. We suggest to use its upper bits for
+ floating-point generation, as it is slightly faster than
+ xoroshiro128**. It passes all tests we are aware of except for the four
+ lower bits, which might fail linearity tests (and just those), so if
+ low linear complexity is not considered an issue (as it is usually the
+ case) it can be used to generate 64-bit outputs, too; moreover, this
+ generator has a very mild Hamming-weight dependency making our test
+ (http://prng.di.unimi.it/hwd.php) fail after 5 TB of output; we believe
+ this slight bias cannot affect any application. If you are concerned,
+ use xoroshiro128** or xoshiro256+.
+
+ We suggest to use a sign test to extract a random Boolean value, and
+ right shifts to extract subsets of bits.
+
+ The state must be seeded so that it is not everywhere zero. If you have
+ a 64-bit seed, we suggest to seed a splitmix64 generator and use its
+ output to fill s.
+
+ NOTE: the parameters (a=24, b=16, b=37) of this version give slightly
+ better results in our test than the 2016 version (a=55, b=14, c=36).
+*/
+
+#include "xoroshiro128.h"
+
+extern INLINE uint64_t xoroshiro128_next64(xoroshiro128_state *state);
+
+extern INLINE uint32_t xoroshiro128_next32(xoroshiro128_state *state);
+
+void xoroshiro128_jump(xoroshiro128_state *state)
+{
+ int i, b;
+ uint64_t s0;
+ uint64_t s1;
+ static const uint64_t JUMP[] = {0xdf900294d8f554a5, 0x170865df4b3201fc};
+
+ s0 = 0;
+ s1 = 0;
+ for (i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
+ for (b = 0; b < 64; b++)
+ {
+ if (JUMP[i] & UINT64_C(1) << b)
+ {
+ s0 ^= state->s[0];
+ s1 ^= state->s[1];
+ }
+ xoroshiro128_next(&state->s[0]);
+ }
+
+ state->s[0] = s0;
+ state->s[1] = s1;
+}
diff --git a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128.h b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128.h
index 40cb39218..0db82b173 100644
--- a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128.h
+++ b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128.h
@@ -14,35 +14,41 @@
#define INLINE inline
#endif
-typedef struct s_xoroshiro128_state {
+typedef struct s_xoroshiro128_state
+{
uint64_t s[2];
int has_uint32;
uint32_t uinteger;
} xoroshiro128_state;
-static INLINE uint64_t rotl(const uint64_t x, int k) {
+static INLINE uint64_t rotl(const uint64_t x, int k)
+{
return (x << k) | (x >> (64 - k));
}
-static INLINE uint64_t xoroshiro128_next(uint64_t *s) {
+static INLINE uint64_t xoroshiro128_next(uint64_t *s)
+{
const uint64_t s0 = s[0];
uint64_t s1 = s[1];
const uint64_t result = s0 + s1;
s1 ^= s0;
- s[0] = rotl(s0, 55) ^ s1 ^ (s1 << 14); // a, b
- s[1] = rotl(s1, 36); // c
+ s[0] = rotl(s0, 24) ^ s1 ^ (s1 << 16); // a, b
+ s[1] = rotl(s1, 37); // c
return result;
}
-static INLINE uint64_t xoroshiro128_next64(xoroshiro128_state *state) {
+static INLINE uint64_t xoroshiro128_next64(xoroshiro128_state *state)
+{
return xoroshiro128_next(&state->s[0]);
}
-static INLINE uint32_t xoroshiro128_next32(xoroshiro128_state *state) {
+static INLINE uint32_t xoroshiro128_next32(xoroshiro128_state *state)
+{
uint64_t next;
- if (state->has_uint32) {
+ if (state->has_uint32)
+ {
state->has_uint32 = 0;
return state->uinteger;
}
diff --git a/numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c
new file mode 100644
index 000000000..1b5f46e4b
--- /dev/null
+++ b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.c
@@ -0,0 +1,102 @@
+/* Written in 2016-2018 by David Blackman and Sebastiano Vigna (vigna@acm.org)
+
+To the extent possible under law, the author has dedicated all copyright
+and related and neighboring rights to this software to the public domain
+worldwide. This software is distributed without any warranty.
+
+See <http://creativecommons.org/publicdomain/zero/1.0/>. */
+
+#include <stdint.h>
+
+/* This is xoroshiro128+ 1.0, our best and fastest small-state generator
+ for floating-point numbers. We suggest to use its upper bits for
+ floating-point generation, as it is slightly faster than
+ xoroshiro128**. It passes all tests we are aware of except for the four
+ lower bits, which might fail linearity tests (and just those), so if
+ low linear complexity is not considered an issue (as it is usually the
+ case) it can be used to generate 64-bit outputs, too; moreover, this
+ generator has a very mild Hamming-weight dependency making our test
+ (http://prng.di.unimi.it/hwd.php) fail after 5 TB of output; we believe
+ this slight bias cannot affect any application. If you are concerned,
+ use xoroshiro128** or xoshiro256+.
+
+ We suggest to use a sign test to extract a random Boolean value, and
+ right shifts to extract subsets of bits.
+
+ The state must be seeded so that it is not everywhere zero. If you have
+ a 64-bit seed, we suggest to seed a splitmix64 generator and use its
+ output to fill s.
+
+ NOTE: the parameters (a=24, b=16, b=37) of this version give slightly
+ better results in our test than the 2016 version (a=55, b=14, c=36).
+*/
+
+uint64_t s[2];
+
+static inline uint64_t rotl(const uint64_t x, int k)
+{
+ return (x << k) | (x >> (64 - k));
+}
+
+uint64_t next(void)
+{
+ const uint64_t s0 = s[0];
+ uint64_t s1 = s[1];
+ const uint64_t result = s0 + s1;
+
+ s1 ^= s0;
+ s[0] = rotl(s0, 24) ^ s1 ^ (s1 << 16); // a, b
+ s[1] = rotl(s1, 37); // c
+
+ return result;
+}
+
+/* This is the jump function for the generator. It is equivalent
+ to 2^64 calls to next(); it can be used to generate 2^64
+ non-overlapping subsequences for parallel computations. */
+
+void jump(void)
+{
+ static const uint64_t JUMP[] = {0xdf900294d8f554a5, 0x170865df4b3201fc};
+
+ uint64_t s0 = 0;
+ uint64_t s1 = 0;
+ for (int i = 0; i < sizeof JUMP / sizeof *JUMP; i++)
+ for (int b = 0; b < 64; b++)
+ {
+ if (JUMP[i] & UINT64_C(1) << b)
+ {
+ s0 ^= s[0];
+ s1 ^= s[1];
+ }
+ next();
+ }
+ s[0] = s0;
+ s[1] = s1;
+}
+
+/* This is the long-jump function for the generator. It is equivalent to
+ 2^96 calls to next(); it can be used to generate 2^32 starting points,
+ from each of which jump() will generate 2^32 non-overlapping
+ subsequences for parallel distributed computations. */
+
+void long_jump(void)
+{
+ static const uint64_t LONG_JUMP[] = {0xd2a98b26625eee7b, 0xdddf9b1090aa7ac1};
+
+ uint64_t s0 = 0;
+ uint64_t s1 = 0;
+ for (int i = 0; i < sizeof LONG_JUMP / sizeof *LONG_JUMP; i++)
+ for (int b = 0; b < 64; b++)
+ {
+ if (LONG_JUMP[i] & UINT64_C(1) << b)
+ {
+ s0 ^= s[0];
+ s1 ^= s[1];
+ }
+ next();
+ }
+
+ s[0] = s0;
+ s[1] = s1;
+}
diff --git a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.h b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.h
index 20c96fe04..20c96fe04 100644
--- a/_randomgen/randomgen/src/xoroshiro128/xoroshiro128plus.orig.h
+++ b/numpy/random/randomgen/src/xoroshiro128/xoroshiro128plus.orig.h
diff --git a/_randomgen/randomgen/src/xorshift1024/LICENSE.md b/numpy/random/randomgen/src/xorshift1024/LICENSE.md
index 3ca8ed4b9..3ca8ed4b9 100644
--- a/_randomgen/randomgen/src/xorshift1024/LICENSE.md
+++ b/numpy/random/randomgen/src/xorshift1024/LICENSE.md
diff --git a/_randomgen/randomgen/src/xorshift1024/xorshift1024-benchmark.c b/numpy/random/randomgen/src/xorshift1024/xorshift1024-benchmark.c
index 0eef33537..0eef33537 100644
--- a/_randomgen/randomgen/src/xorshift1024/xorshift1024-benchmark.c
+++ b/numpy/random/randomgen/src/xorshift1024/xorshift1024-benchmark.c
diff --git a/_randomgen/randomgen/src/xorshift1024/xorshift1024-test-data-gen.c b/numpy/random/randomgen/src/xorshift1024/xorshift1024-test-data-gen.c
index a2ae08df4..a2ae08df4 100644
--- a/_randomgen/randomgen/src/xorshift1024/xorshift1024-test-data-gen.c
+++ b/numpy/random/randomgen/src/xorshift1024/xorshift1024-test-data-gen.c
diff --git a/_randomgen/randomgen/src/xorshift1024/xorshift1024.c b/numpy/random/randomgen/src/xorshift1024/xorshift1024.c
index 8737b5a82..8737b5a82 100644
--- a/_randomgen/randomgen/src/xorshift1024/xorshift1024.c
+++ b/numpy/random/randomgen/src/xorshift1024/xorshift1024.c
diff --git a/_randomgen/randomgen/src/xorshift1024/xorshift1024.h b/numpy/random/randomgen/src/xorshift1024/xorshift1024.h
index e0ef77826..e0ef77826 100644
--- a/_randomgen/randomgen/src/xorshift1024/xorshift1024.h
+++ b/numpy/random/randomgen/src/xorshift1024/xorshift1024.h
diff --git a/_randomgen/randomgen/src/xorshift1024/xorshift1024.orig.c b/numpy/random/randomgen/src/xorshift1024/xorshift1024.orig.c
index e4f899fb7..03c1c17fe 100644
--- a/_randomgen/randomgen/src/xorshift1024/xorshift1024.orig.c
+++ b/numpy/random/randomgen/src/xorshift1024/xorshift1024.orig.c
@@ -14,7 +14,7 @@ See <http://creativecommons.org/publicdomain/zero/1.0/>. */
linear dependencies from one of the lowest bits. The previous
multiplier was 1181783497276652981 (M_8 in the paper). If you need to
tell apart the two generators, you can refer to this generator as
- xorshift1024*φ and to the previous one as xorshift1024*M_8.
+ xorshift1024φ and to the previous one as xorshift1024*M_8.
This is a fast, high-quality generator. If 1024 bits of state are too
much, try a xoroshiro128+ generator.
@@ -36,7 +36,7 @@ int p;
uint64_t next(void) {
const uint64_t s0 = s[p];
uint64_t s1 = s[p = (p + 1) & 15];
- s1 ^= s1 << 31; // a
+ s1 ^= s1 << 31; // a
s[p] = s1 ^ s0 ^ (s1 >> 11) ^ (s0 >> 30); // b,c
return s[p] * 0x9e3779b97f4a7c13;
}
diff --git a/_randomgen/randomgen/src/xorshift1024/xorshift1024.orig.h b/numpy/random/randomgen/src/xorshift1024/xorshift1024.orig.h
index 9b7597967..9b7597967 100644
--- a/_randomgen/randomgen/src/xorshift1024/xorshift1024.orig.h
+++ b/numpy/random/randomgen/src/xorshift1024/xorshift1024.orig.h
diff --git a/_randomgen/randomgen/src/xoshiro256starstar/LICENSE.md b/numpy/random/randomgen/src/xoshiro256starstar/LICENSE.md
index d863f3b29..d863f3b29 100644
--- a/_randomgen/randomgen/src/xoshiro256starstar/LICENSE.md
+++ b/numpy/random/randomgen/src/xoshiro256starstar/LICENSE.md
diff --git a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar-test-data-gen.c b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar-test-data-gen.c
index 8522229dd..8522229dd 100644
--- a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar-test-data-gen.c
+++ b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar-test-data-gen.c
diff --git a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.c b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.c
index 30b6c7d85..30b6c7d85 100644
--- a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.c
+++ b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.c
diff --git a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.h b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.h
index 1d7d8ea40..1d7d8ea40 100644
--- a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.h
+++ b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.h
diff --git a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.c b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.c
index ecf87bab9..ecf87bab9 100644
--- a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.c
+++ b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.c
diff --git a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.h b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.h
index 3aa788ec9..3aa788ec9 100644
--- a/_randomgen/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.h
+++ b/numpy/random/randomgen/src/xoshiro256starstar/xoshiro256starstar.orig.h
diff --git a/_randomgen/randomgen/src/xoshiro512starstar/LICENSE.md b/numpy/random/randomgen/src/xoshiro512starstar/LICENSE.md
index aa34c1966..aa34c1966 100644
--- a/_randomgen/randomgen/src/xoshiro512starstar/LICENSE.md
+++ b/numpy/random/randomgen/src/xoshiro512starstar/LICENSE.md
diff --git a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar-test-data-gen.c b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar-test-data-gen.c
index bcc3574e4..bcc3574e4 100644
--- a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar-test-data-gen.c
+++ b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar-test-data-gen.c
diff --git a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.c b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.c
index a9f56699f..a9f56699f 100644
--- a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.c
+++ b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.c
diff --git a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.h b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.h
index 0fa0ba3cd..0fa0ba3cd 100644
--- a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.h
+++ b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.h
diff --git a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.c b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.c
index 0cf884edb..0cf884edb 100644
--- a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.c
+++ b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.c
diff --git a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.h b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.h
index 0b7892473..0b7892473 100644
--- a/_randomgen/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.h
+++ b/numpy/random/randomgen/src/xoshiro512starstar/xoshiro512starstar.orig.h
diff --git a/_randomgen/randomgen/tests/__init__.py b/numpy/random/randomgen/tests/__init__.py
index e69de29bb..e69de29bb 100644
--- a/_randomgen/randomgen/tests/__init__.py
+++ b/numpy/random/randomgen/tests/__init__.py
diff --git a/_randomgen/randomgen/tests/data/__init__.py b/numpy/random/randomgen/tests/data/__init__.py
index e69de29bb..e69de29bb 100644
--- a/_randomgen/randomgen/tests/data/__init__.py
+++ b/numpy/random/randomgen/tests/data/__init__.py
diff --git a/_randomgen/randomgen/tests/data/dSFMT-testset-1.csv b/numpy/random/randomgen/tests/data/dSFMT-testset-1.csv
index 9f3f68aee..9f3f68aee 100644
--- a/_randomgen/randomgen/tests/data/dSFMT-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/dSFMT-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/dSFMT-testset-2.csv b/numpy/random/randomgen/tests/data/dSFMT-testset-2.csv
index 2ec2d7a51..2ec2d7a51 100644
--- a/_randomgen/randomgen/tests/data/dSFMT-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/dSFMT-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/mt19937-testset-1.csv b/numpy/random/randomgen/tests/data/mt19937-testset-1.csv
index 3d4995840..3d4995840 100644
--- a/_randomgen/randomgen/tests/data/mt19937-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/mt19937-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/mt19937-testset-2.csv b/numpy/random/randomgen/tests/data/mt19937-testset-2.csv
index d2f6c156c..d2f6c156c 100644
--- a/_randomgen/randomgen/tests/data/mt19937-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/mt19937-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/pcg32-testset-1.csv b/numpy/random/randomgen/tests/data/pcg32-testset-1.csv
index 6bddc8d5c..6bddc8d5c 100644
--- a/_randomgen/randomgen/tests/data/pcg32-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/pcg32-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/pcg32-testset-2.csv b/numpy/random/randomgen/tests/data/pcg32-testset-2.csv
index 2d4c8aed1..2d4c8aed1 100644
--- a/_randomgen/randomgen/tests/data/pcg32-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/pcg32-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/pcg64-testset-1.csv b/numpy/random/randomgen/tests/data/pcg64-testset-1.csv
index da6d77d40..da6d77d40 100644
--- a/_randomgen/randomgen/tests/data/pcg64-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/pcg64-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/pcg64-testset-2.csv b/numpy/random/randomgen/tests/data/pcg64-testset-2.csv
index 779761d0f..779761d0f 100644
--- a/_randomgen/randomgen/tests/data/pcg64-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/pcg64-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/philox-testset-1.csv b/numpy/random/randomgen/tests/data/philox-testset-1.csv
index 64c1516cb..64c1516cb 100644
--- a/_randomgen/randomgen/tests/data/philox-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/philox-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/philox-testset-2.csv b/numpy/random/randomgen/tests/data/philox-testset-2.csv
index 1c2d4eba0..1c2d4eba0 100644
--- a/_randomgen/randomgen/tests/data/philox-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/philox-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/threefry-testset-1.csv b/numpy/random/randomgen/tests/data/threefry-testset-1.csv
index ddfa736dc..ddfa736dc 100644
--- a/_randomgen/randomgen/tests/data/threefry-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/threefry-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/threefry-testset-2.csv b/numpy/random/randomgen/tests/data/threefry-testset-2.csv
index 275604557..275604557 100644
--- a/_randomgen/randomgen/tests/data/threefry-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/threefry-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/threefry32-testset-1.csv b/numpy/random/randomgen/tests/data/threefry32-testset-1.csv
index 3aec7304d..3aec7304d 100644
--- a/_randomgen/randomgen/tests/data/threefry32-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/threefry32-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/threefry32-testset-2.csv b/numpy/random/randomgen/tests/data/threefry32-testset-2.csv
index b7753638f..b7753638f 100644
--- a/_randomgen/randomgen/tests/data/threefry32-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/threefry32-testset-2.csv
diff --git a/numpy/random/randomgen/tests/data/xoroshiro128-testset-1.csv b/numpy/random/randomgen/tests/data/xoroshiro128-testset-1.csv
new file mode 100644
index 000000000..4ef7172e1
--- /dev/null
+++ b/numpy/random/randomgen/tests/data/xoroshiro128-testset-1.csv
@@ -0,0 +1,1001 @@
+seed, 0xdeadbeaf
+0, 0x86f9f4feeebed928
+1, 0xb3617382bfd2bb33
+2, 0x4314c03ca1908f7a
+3, 0xfdbe2ea0213dab80
+4, 0x6076f6f829c64721
+5, 0x6587411cc85fa712
+6, 0x4778e74dc7f92125
+7, 0x6ada4530f4cf01c8
+8, 0xe0ddb30ce61b5172
+9, 0x2998c6e98e79ae50
+10, 0xfb8cb09917a0e99a
+11, 0x7df546933cbeabcc
+12, 0x972407f8132f16c2
+13, 0x6dcfab42a6d97aaa
+14, 0xcbd39869fb69f683
+15, 0xaa789636ea4daf4c
+16, 0xe364dbbff8064dbd
+17, 0xf639489e242feaca
+18, 0xa3454eb3b12942b7
+19, 0xbaa569d8f934bc14
+20, 0xbfe4a0166c493f06
+21, 0x96dec770408b339d
+22, 0xc75a3b26b2702eec
+23, 0x4752a021254c0915
+24, 0x35edf588263b9bbc
+25, 0xa97342f217e541c2
+26, 0xea9bc6a01b4b7d83
+27, 0x93cec444361979b5
+28, 0x8ed5719f5ba9a424
+29, 0x8e1dead734d410b1
+30, 0x1f63a442ce77d4db
+31, 0x3c36e0a05da986de
+32, 0xc6c10658893be094
+33, 0x196ed853b1167184
+34, 0x8b06218d6a34950a
+35, 0xac51e33319f103a4
+36, 0xdf47e4b0ef46c70
+37, 0xf34775455325aea0
+38, 0xf341953786525c76
+39, 0xe9b85d99c1115696
+40, 0x4432daff0305cfed
+41, 0x34cc1bfed7b9676f
+42, 0x87d19c7db528ca12
+43, 0xaa767030e19682b1
+44, 0xac39890dafd308ef
+45, 0x92a6d833ca6a68c2
+46, 0x6626523fdceed4ba
+47, 0xa609844feb22593c
+48, 0x3d68f39a561a7c78
+49, 0xee2b74bafbe9a80e
+50, 0x7f23ba8a58a0226a
+51, 0x8900ca1c9d5550b6
+52, 0x781ff936e2d8abfc
+53, 0x960fa9c6da5eeab3
+54, 0xdc316a7a5161cd
+55, 0x61f6096510d22bdd
+56, 0x1c1fc069b6643ce7
+57, 0x3105b8feea7651bb
+58, 0x4ea8c76afedfa6e5
+59, 0x55cce3ba46b2ff82
+60, 0x32ce23a4e59a9ddb
+61, 0x5d8b33b817a1e1c2
+62, 0x6861f95a5dbc833d
+63, 0x1af405acc0346f1e
+64, 0x4bea8e5e87e6b3cd
+65, 0x6a79d9021478eb80
+66, 0xbd1512cd44c0d03e
+67, 0x870719b2e322e44
+68, 0x10eaf80ea8e95a14
+69, 0xad85fdbe2e6d99ec
+70, 0x47e9f8f63db20254
+71, 0x69ca77aa6a12ff2d
+72, 0x385ec684f96ed9aa
+73, 0x573a97f8103f9ea8
+74, 0x57a94ba1ca505197
+75, 0xcb4bbe5444b0b4ae
+76, 0xcbe456413a8d1566
+77, 0x67a21dc37357827e
+78, 0x992ea6028ade725c
+79, 0x89cc89e9570792f2
+80, 0x2b4873ae2a6260c6
+81, 0xec8e07e9204eac1a
+82, 0x6bc898204729c23c
+83, 0x7d086557a49d391d
+84, 0xfa16b7ede631dfd9
+85, 0xf2710cc7516fd414
+86, 0x9aef67d70498456d
+87, 0x518f9fc570ea091e
+88, 0xe765ca35b2293f35
+89, 0xb06015f656a5ce97
+90, 0xc0154acff67d930
+91, 0x1cb3b41de66b3964
+92, 0x41a024e712b0d577
+93, 0x921572c63007b7a7
+94, 0xb2864274d855fbb9
+95, 0xe555774f7bbf0590
+96, 0x2d15198702e750fc
+97, 0x3fb422e738e347c8
+98, 0x8562515206baf50
+99, 0x13dcd174d6640321
+100, 0x2c2d1d739e2243c4
+101, 0xc41b5f458215c274
+102, 0xa6d7e86d348f1f4b
+103, 0x9d78f189583149c7
+104, 0x685ac4ec5ac7c326
+105, 0xfb998afb22666cfe
+106, 0x5b9df8c77816280b
+107, 0x2ebad315b4a6de5e
+108, 0xb70f296a67a1ee71
+109, 0x3b714a6ba151c3c0
+110, 0xa0b64d501736310f
+111, 0x17f827804629ad52
+112, 0x46ed287bdea3217b
+113, 0xb26d12305a6fb5eb
+114, 0x184247bba474368b
+115, 0x542b93986dd5ea3
+116, 0xb8a0cc8fbd9a193b
+117, 0x7a89dcac9c85a621
+118, 0x48d466d072a44c78
+119, 0xada1f7e65a517caa
+120, 0xf6febf549d553ec3
+121, 0x19cf94cb14dadd59
+122, 0x9087aeca4e923157
+123, 0x1afb1bb6e2fc9f3e
+124, 0x6192b1b315392ddc
+125, 0xedcd055d2840341f
+126, 0x396cfce39b432fa7
+127, 0x183319afd1d0cf9
+128, 0x917409d968c3620a
+129, 0x5868095709377c1b
+130, 0x84e1f92faddbb86a
+131, 0x45c5ecb0c1bf2a92
+132, 0xc5a7f84f16b52592
+133, 0xb389866b9cca1c35
+134, 0x7924255948cb4bb8
+135, 0x6b4a33d0810f1d99
+136, 0x749800294244b46
+137, 0x4ce2e9d74a34c543
+138, 0x3bf47c412961da25
+139, 0x35ecd46092b0d494
+140, 0x360703f0b4c8fd18
+141, 0xef4f039e6428f8bd
+142, 0xfc63479c9833ab9f
+143, 0xc660fc00994c3490
+144, 0x57a055abc0afd99c
+145, 0xe83bee603058ba87
+146, 0xa6c06409fab71942
+147, 0x8ee4675a90f2eded
+148, 0xef7240fbaaa4454e
+149, 0x73d0378b4855e043
+150, 0x4039e6cd0da81154
+151, 0x3e4261378314b8ef
+152, 0x5f891423c2a77d05
+153, 0xd57c43570ff9cc37
+154, 0x3c69a08bb46e066c
+155, 0xe265cba3cabb6fd8
+156, 0xd9bed8fe0179a58a
+157, 0x882255210140b153
+158, 0x7d212a20e03003a2
+159, 0x5821c48f435aa8db
+160, 0x3ff2fc230e4fc421
+161, 0x7617ac677d8703e2
+162, 0x306c8cf9c6be23ef
+163, 0xd7728cfebc614ab4
+164, 0xeddb425f79076eee
+165, 0x76e61bc49c5b61e7
+166, 0x803610661e501d33
+167, 0x52c2968fd8b30ba9
+168, 0x56c3d2813aabf019
+169, 0x895c1923a0f7f8b8
+170, 0x2bb50b203d52d465
+171, 0x6987baec87355c0c
+172, 0x1186c9e7e524b89a
+173, 0xd72695cdee9d1e41
+174, 0x1aed250f3b25eb65
+175, 0x7a98d55cc9b790cf
+176, 0xe3f84b1f0a3436a2
+177, 0x929f43e01cdb529b
+178, 0xee439860101d371f
+179, 0xc30f8f543ea1391
+180, 0x51470330a3a9555c
+181, 0x13cbcd8728edb72b
+182, 0xc2ff37a63d496acc
+183, 0x38a775bab54c9e5
+184, 0xd3b9a106f7cae82e
+185, 0x409dbb706362645f
+186, 0x6756c88f065e228d
+187, 0xfca59c508b33a615
+188, 0x4f1a85c2cf7f70b
+189, 0xde05e1f080183683
+190, 0x58cc04c78668d30e
+191, 0x29a6cdaabc128bd4
+192, 0x7fd48a7175d7080b
+193, 0xf62615d9f1a4c0e0
+194, 0xc0bcdabf943a9882
+195, 0x2e17eaa18c30cbe9
+196, 0x23b0a7bf11059cdf
+197, 0x980faafc56997e96
+198, 0x5df31ba1971bc9b
+199, 0xf2918ccd892a579b
+200, 0x863c265ba908fbd0
+201, 0x6c4d8604d73771f
+202, 0x8231891e28337ca1
+203, 0xcf1bca7019e17620
+204, 0xc2914444386dd7b2
+205, 0xb2940f2f54af1342
+206, 0x5f94a7742182144
+207, 0xdd84510e416c55a9
+208, 0xed6cabbc034639b6
+209, 0x8ec7d280a1dc818e
+210, 0xff9c27aafcdf6ad8
+211, 0xe4eb67cd34584e9e
+212, 0xde9094e8bd9372fc
+213, 0xf72c5d7a9f648bff
+214, 0xab84eec6625de3ab
+215, 0x31401a873557b3f0
+216, 0x3a92ea998fbe33e0
+217, 0x938f51440df1e372
+218, 0x1b8adb3266aa2b20
+219, 0x52614e3f539aa97e
+220, 0x8c3910405c2b6db5
+221, 0x5678fa580938f1d0
+222, 0xaecec4c57c393677
+223, 0x25387b15ce263e99
+224, 0xa2064849456b89fc
+225, 0xf3d6abe7c5ccbf89
+226, 0x71306e8ce5f15360
+227, 0x9100205404028ff
+228, 0x473f2c3ee60b5ff8
+229, 0xa98cccbcb30fd354
+230, 0x150886f18c2a8abe
+231, 0x73a10ec9d0a3e5b
+232, 0xa04d37f64652298d
+233, 0xc47af08c27df64bd
+234, 0x127ec96954fef972
+235, 0xd8ce70375554f482
+236, 0xdb2e3388772bb0b3
+237, 0x843d9952b8e93044
+238, 0xb4c50aa1b3ff8dc0
+239, 0x41d43b3662be19e4
+240, 0x5de18309d9cb944c
+241, 0xbfcbc63f45cf4998
+242, 0xc58ce67bd9f221ba
+243, 0xeb3c7f7349d6876b
+244, 0xbd38b8331d766037
+245, 0x5c0de7cf313bef77
+246, 0xe8984144d81c9ab8
+247, 0x42ba3a205f55572
+248, 0x316372b92fd6df46
+249, 0xe7c4b5535873e7fc
+250, 0xec4b062406fad890
+251, 0xcb9254c0853f3bc6
+252, 0x6fbb40d1758c5a4c
+253, 0xa17b63cb93c12094
+254, 0x3e00a27f4b51696a
+255, 0x7dcec0f01bead947
+256, 0x1a8b827e9d0658c4
+257, 0x75929c838a9fc9eb
+258, 0x165b162560d5bcc
+259, 0x2320fd4ab2fbe43
+260, 0xf0ca89e97c60154b
+261, 0x90a3b09348c0caf8
+262, 0xac1516105437f9d0
+263, 0x8f11e6454bea6007
+264, 0x19b8a0c5805ebeec
+265, 0x3105e78b4e2f11b0
+266, 0x8c365bc66d26d165
+267, 0xdccf55c3bf85167a
+268, 0x926c88470d3a703a
+269, 0xe5421ab183b63c89
+270, 0x9fbcfd19c800e684
+271, 0x618b70d50ad85d44
+272, 0xcdc7b4b5f83386e1
+273, 0xb03b91dccaff6125
+274, 0xf470624fdbde22b7
+275, 0xac2f659be06a08ea
+276, 0xa49f1af1a29abb77
+277, 0x5c2e94fe6a5d7978
+278, 0xf12b8c7cfa17f5f4
+279, 0xa8c4c5c917203a0e
+280, 0xd1ea9aa8c49fa551
+281, 0x7363288bd7b40258
+282, 0xb3668a32f1b36dfe
+283, 0xeebdb9e4377e3fc2
+284, 0xa07bf94365ce0a90
+285, 0x49a75731b3efe51b
+286, 0x29af2e86886df8db
+287, 0x84cab4d861631a0b
+288, 0xec45e2345d9c1114
+289, 0x9e0aca51ba3aef2b
+290, 0xef6db51f7d2239c0
+291, 0x6bd53260a01e4992
+292, 0x5ff887fc83732499
+293, 0xef39c88e15608f39
+294, 0x166883224e645f35
+295, 0xb296e3561ccc419c
+296, 0x342081199780e04b
+297, 0xdf24e8c78a472e
+298, 0xa64a89e743ae2617
+299, 0x6cc623876bd66b3d
+300, 0x45066f03a3fae72d
+301, 0x99030d380d113008
+302, 0x891b6637052d6ed2
+303, 0x8ac4de1d3bacdd5c
+304, 0x6bd4540970e747b5
+305, 0xb48125e69019055f
+306, 0xefc03dc62eee31b8
+307, 0xbb61f62afc671564
+308, 0xf941c67dc61e4f9
+309, 0x58dfbb0f8cdf6857
+310, 0xfdd423db3734d952
+311, 0xddbef0ea939712bc
+312, 0x64cb6c97a176d099
+313, 0x57d73985fa34a17e
+314, 0xa5241bd90d5a351c
+315, 0xc2669439928e2f29
+316, 0x4a00c61ffe77951b
+317, 0x562532cdc210a37
+318, 0xe2fb88acfe451d04
+319, 0x8b4053716affaa4a
+320, 0x208616d3e2ef242b
+321, 0xf830a95da5ba3676
+322, 0xbf01348d4133f007
+323, 0xd44cd6d38e4b2c1a
+324, 0xa577c236744b677e
+325, 0x1a08ee02ccfaf7d4
+326, 0x94b364baa2e88b22
+327, 0x1278a7e8f3886173
+328, 0x5e5c595c284d8555
+329, 0xe81240395bfc355f
+330, 0x5c72a5e76ba9777f
+331, 0x30c5587516d5dd3b
+332, 0x3a01ab159c61cc3
+333, 0x921a5153ca306f87
+334, 0x499d0410a755a3c3
+335, 0xe35523f8c25a0e36
+336, 0x923135834356ca2e
+337, 0xf0d3cfbca4b9c1f6
+338, 0x523ba92f35c890b5
+339, 0x32791ee78e661d2d
+340, 0xb8def8bec5d4833f
+341, 0x8ddf479407495133
+342, 0x32af2693e93bea72
+343, 0xfe7c577a150a526b
+344, 0x29f322d80b5d31e7
+345, 0x770f1c2f276aac75
+346, 0x710d900fa16454bc
+347, 0xad35a054b19a6419
+348, 0xf8543705d2221c4d
+349, 0x1a208f09dcd992e6
+350, 0xc6fe24f4478fae54
+351, 0x15af9b23589efa5c
+352, 0x77c3cdaf72c7974a
+353, 0x8b9ee62898b42805
+354, 0x24d86bd194b32df2
+355, 0x8947894a6f9e06bf
+356, 0x8f74894e2ebc9e42
+357, 0x30a460a64ac97b89
+358, 0x985f3b650b19bfd1
+359, 0x6b68d65a7decd3b8
+360, 0x33ca1f7082b77691
+361, 0xe16c9da462bcc4c2
+362, 0x85512fb1e448efc1
+363, 0xd23729cdc1fbf8af
+364, 0xb4c48c3dd40c6431
+365, 0xc408b5198e25d53d
+366, 0x9c3ede789980eac6
+367, 0xea44b85282535bfc
+368, 0x8a4ef27cc51a1368
+369, 0x92b1c10cd267b53c
+370, 0xe262b216c571e216
+371, 0x9ab945ad23c45d33
+372, 0xf8a6ee8859691e81
+373, 0xb4387e4124b6ede6
+374, 0x67057ebcb0458cba
+375, 0xcfea5995ea0d9036
+376, 0xf60e552368e88671
+377, 0x97a744db9ea2a465
+378, 0xf9bdfa8f7765704d
+379, 0x4a092a7483623007
+380, 0xd1ec15e72770f23d
+381, 0x9f4bddf29a1ec891
+382, 0x7574533dbfd85aef
+383, 0xf336b164aa32c8c1
+384, 0xaecebdd3bb1f6273
+385, 0xcdee4c8c59b621f3
+386, 0x4e62a96f422f045e
+387, 0x7954b5c0c33402d9
+388, 0x44284a788c4bd72b
+389, 0x3f7f331379400e90
+390, 0xbe47722adce724fa
+391, 0x202ec17f30c6dfd3
+392, 0x20e6bd8b12ae33ca
+393, 0x56ba93c9a6aa537d
+394, 0x2ffaed6a56011829
+395, 0x59bf8fb6387acb10
+396, 0x1d092715f75b4731
+397, 0x4f73c9b5945ea6c5
+398, 0x4f9965a9cef20ef1
+399, 0xcce9d598a9ee3128
+400, 0xad6bf86f41263232
+401, 0xb768dbb67c5e35c0
+402, 0xb5c06fa5f590c5c0
+403, 0x2849f01daca38cdb
+404, 0xe31daf6540d3c5a2
+405, 0xa462464c813059e0
+406, 0x98cd0da9a864a13c
+407, 0xa0bf6269bcb1c79b
+408, 0x354467e29dd2415b
+409, 0x8760ca9ca2ed96c1
+410, 0xdcbc0296a9d243e8
+411, 0x940f0679c02babd0
+412, 0x4f389795e70c9b32
+413, 0xcad57ca19c578c20
+414, 0x346870e71ed575ad
+415, 0x4dc47ca3f25bc1bf
+416, 0x636afe20b8e9f185
+417, 0xb47b4b4dbf6ec559
+418, 0x8634686a6ec85170
+419, 0xc176c9709a40fdc0
+420, 0xaf830009e390dfb0
+421, 0x65cb55acd3653031
+422, 0x52d53d142c15219b
+423, 0xffe9258ae4b63350
+424, 0xa118cfbbb64e3f6e
+425, 0xf0aa49900685c7bd
+426, 0xf4a63180c7a493e0
+427, 0xb219297f315c3905
+428, 0xedbe04b3e65cee75
+429, 0x17578b66c9422dcb
+430, 0x93341d13d6f3f307
+431, 0x4cb05e69da41bb0a
+432, 0x1f1dad0ac8c3b625
+433, 0x4cc0d69392282fe4
+434, 0xa415bcab7a9e9030
+435, 0x49c4bce139b17cd7
+436, 0x70921ec48102ba98
+437, 0xcce784ad8ecc8ef1
+438, 0x1394434e6983851
+439, 0xdceea9694050de81
+440, 0xdd34ce31271e0733
+441, 0xaa722f329463f189
+442, 0xca65b550f35dd0c9
+443, 0x1477d0711156ff76
+444, 0xeff9d7e1e525c056
+445, 0xf425a4a41487a26c
+446, 0xd7a62c18f2ce9451
+447, 0x178ad1414c520698
+448, 0xd5fb19755b8a9cd3
+449, 0xf9b662ac336df748
+450, 0x3ab374b95c09392
+451, 0xb1017e23e64343a2
+452, 0x4e38eaa252f0eda8
+453, 0xe50e9e51d4b9abce
+454, 0xaa5d9bfb8193c02c
+455, 0x823a22b374b70757
+456, 0x31cebbf89930801b
+457, 0xd7c29187bcea4c72
+458, 0xe9118fc76c45cbf1
+459, 0x9c71d746e81ad2de
+460, 0x61546ce34ed37e5
+461, 0x93cf2b2f08a9dd70
+462, 0xaa8a0e918d246f1a
+463, 0xbd35895f4913143c
+464, 0xdfe98084bcf724b5
+465, 0x3711d1a09d3b438c
+466, 0xef7a4589307c37f9
+467, 0x174de3c95aad64d
+468, 0xa66127748de17435
+469, 0xaa7789614da98e6d
+470, 0xa01a9a6570b63f02
+471, 0x52e4422529cdf289
+472, 0x5fa480a033fa2eaf
+473, 0x8d950f2a0362d44d
+474, 0x264fa25f7b50b19e
+475, 0x4f2ed2872e35635e
+476, 0x1173eaed49ff03d
+477, 0x7cbde5b17f7541b1
+478, 0x80913ce7ba0fd00f
+479, 0xae85c98e607af8ab
+480, 0xd4349f0a1a724b17
+481, 0x436510b9fdbb00b5
+482, 0x345d25f181b23831
+483, 0x360875f99d1dbc3f
+484, 0x77657a493413286e
+485, 0xdb45294b3f8dab13
+486, 0x25e84d1be5f5c6d6
+487, 0xbb44ba55be3f453
+488, 0x7d7b2c5077ddb248
+489, 0x4c7e02c08f79a63f
+490, 0xea589769295ebac7
+491, 0xcf1159ed1fbbabb2
+492, 0x5f0c199a69956db4
+493, 0xb2c190830b28ba75
+494, 0xfef7b55986f49a19
+495, 0x961eb7b425de477b
+496, 0xdb31045f05af950c
+497, 0x13bc7a5600b306b9
+498, 0xe4834efcc7d8515c
+499, 0x6d3ebda0630a9e64
+500, 0xcf453a9b686cbc0
+501, 0xe1048db43e9dc5ec
+502, 0x95b3b95608ff12fe
+503, 0xdaa8457f1d3bca37
+504, 0x3913e8473f5593ba
+505, 0x3afceee33004c5dc
+506, 0xd117393f5e9d11f6
+507, 0x7f462da9314f76e
+508, 0xa4fc522c19f1f8d6
+509, 0x7429b79b76acdcfd
+510, 0x5a570cb8d216a730
+511, 0x705c4c4af10c7ac7
+512, 0x4abf3a808087344e
+513, 0xe6313ab9845d433c
+514, 0xb038e24fbbfc7716
+515, 0x80a25d4531599d7c
+516, 0xb2b75e488f81693b
+517, 0x43b8d27a4dbba2a9
+518, 0xa611ff50d23f05e2
+519, 0x872da217d6fa41a6
+520, 0xb153855cda09b36f
+521, 0x3c6f5d7d21da31a7
+522, 0x59a63c7ad79be63f
+523, 0x27a4679e83422368
+524, 0x4e8bc4d771b0feaa
+525, 0x6719469e9cf2c3d9
+526, 0xb56b708a9a915da5
+527, 0x2594e558e515d19
+528, 0xd6df1e4a0bf1b153
+529, 0x4f7f25d38191e1fe
+530, 0xaaaf850ad5e538b0
+531, 0x6355175a813e200c
+532, 0xbe7544f56eef5ae9
+533, 0x5c516d87bbf779cb
+534, 0x23c180f962a3d2a5
+535, 0x5cb1ac1c4261b281
+536, 0x4717bb3567432de1
+537, 0xeb1285580fa935a0
+538, 0xc50894350d82d33a
+539, 0xcd0e2cfea37c3245
+540, 0x298bc40ad881a2b5
+541, 0x466df434870604fc
+542, 0xbc1eb2d8ebbd351f
+543, 0x357534396e12e369
+544, 0x305a159545ad695a
+545, 0x63775f2479bae51a
+546, 0x44e266968147590e
+547, 0xd733336da0cfd6b9
+548, 0x7b1668635266025e
+549, 0xe25f2c506843c3de
+550, 0xf9635d39029105fe
+551, 0x4098dbd2e7b717fe
+552, 0xd58848e50b96b32d
+553, 0xdf776a82ade5f937
+554, 0xb86fe57c82d76cf3
+555, 0x8cb4f08fb1f46b4e
+556, 0x5df5b1c00ab261ac
+557, 0x6726d97935f08d31
+558, 0x4cbc02697842c6ac
+559, 0x6c0440408a22bcae
+560, 0x2327f06b5ef97cf8
+561, 0xabf95d4d77e37c76
+562, 0xc53b4d7aa97f77d6
+563, 0x6964842fd4206b42
+564, 0xee45d852faa5932a
+565, 0x607e303bc41e73e8
+566, 0xe604d0952299ff3a
+567, 0xd762802eed4cb935
+568, 0x4cf13e77ae1782aa
+569, 0x6780f1ac226eb1e7
+570, 0xc102e38a0e8e8199
+571, 0xb97f0632dec2edb1
+572, 0x43b1a8890a987c2a
+573, 0x5603f8d9115551a
+574, 0xdd523847a2d2346a
+575, 0xd9a6c2e9be51ec7e
+576, 0x5c87bb42ff344def
+577, 0x1c08b83d807a9322
+578, 0x6c79b279737cd049
+579, 0xc75ee98ecd59cd3c
+580, 0x318284b03e77d76e
+581, 0x6737a1e79456ce1a
+582, 0xefe096a77d952f55
+583, 0x37f780c27a9fdd68
+584, 0xfefed1b3b932191e
+585, 0xdf552be0dfc09adf
+586, 0x4d210f71c1ccfe6a
+587, 0xf524993a9f48c96e
+588, 0x6fb8c3c46cb1e951
+589, 0x2ac8c28e67eb7b03
+590, 0xefcb311d060d2897
+591, 0x675c6ca4aba62734
+592, 0x1f5f1df09191b5ed
+593, 0x177d32b3b4fe46da
+594, 0x58f48456e4a88cf2
+595, 0xec0233251bedcbae
+596, 0x11046407a9ce0c19
+597, 0x50eccedfa2531ef9
+598, 0x5769c9369f18c53a
+599, 0x879442d615c8f67b
+600, 0xc7aee966f982a0a7
+601, 0xaadf9a353e6dffd
+602, 0x216d2fcfe81b00f7
+603, 0x8b17b3b2a61765b8
+604, 0x7cc969d82c53763e
+605, 0x1b1a5d88afda0c8e
+606, 0x21ea1e785061959c
+607, 0xbbbf45849572539e
+608, 0xf84972b569d342a6
+609, 0x85952fc81713400
+610, 0xf6bccc50b5741a48
+611, 0x35106a9ef28f5be
+612, 0x785370b833caca28
+613, 0xc6c5c3d1bbe5b4ef
+614, 0xda7658fec38bbb8c
+615, 0xd3d1e9de94c6f41e
+616, 0x73ad91859892dd7a
+617, 0x35621a8da5e3fd19
+618, 0x5530b00cd5c63fb2
+619, 0xfa36112a09c088cd
+620, 0x302b7f4fc815cd73
+621, 0x1fa4adb717689692
+622, 0x3a15fd5eb37c731a
+623, 0xcc854934e21d4cd7
+624, 0x1d11465a34be290e
+625, 0x213b3e59f10b1d60
+626, 0xf923efefe3fd28b8
+627, 0x81b56a961626ed7d
+628, 0xe4f41f8c283c8fba
+629, 0x374ade85b0260e4e
+630, 0x4cf71f967d36fcca
+631, 0x705b52b4a9d5d174
+632, 0xdc6f883cf909c428
+633, 0x44dd73ed064e8a3a
+634, 0xdcff5a374c2641c1
+635, 0xe3177de93b2197ad
+636, 0x71f40cde55876808
+637, 0x4c12e600bd6a1b3
+638, 0xc3940e86b962699c
+639, 0x133569f533cf1540
+640, 0xcba6db36e8054239
+641, 0xc7c92f14ee34a962
+642, 0x133c8d42f4690453
+643, 0x2a62456f39aa3030
+644, 0x35354ef813ee1dec
+645, 0x35e10f4c2f2fb795
+646, 0xf105e888f10c8397
+647, 0xaa22e206ac7652dd
+648, 0x65121135905afd4c
+649, 0xe2c49866de9626ca
+650, 0xeb6ae66e1a605d02
+651, 0x7379ba1f2f16b81e
+652, 0x4a3a91e2f22d4d19
+653, 0x30e4af3cd5e5a24
+654, 0xac59e67a483fa52
+655, 0xc4f027dd48c1e37d
+656, 0x91263160b58e9e0d
+657, 0xc7672b7fbd4ef6b2
+658, 0xf3262da8a7645caa
+659, 0x7a5f4990cab96f40
+660, 0xcec55da0937d86a4
+661, 0xd25017295c98b613
+662, 0xc2c9ad08c34fd189
+663, 0xfb6ca6018b1e019f
+664, 0x480ee3cc62324c8e
+665, 0xab37c56a10ab0519
+666, 0x13fff4e20b1eb15f
+667, 0xab25dc0f003e940e
+668, 0xdbadd5f2b73aec35
+669, 0xa7b4d6770d19f43
+670, 0xd28144880c1c5434
+671, 0xa435c41dce914dc5
+672, 0x9883a2e3cddd7ad
+673, 0xddb29b179c922b28
+674, 0xed3f6669842e0c39
+675, 0xb663238d46b213a7
+676, 0x6346ef1606873452
+677, 0xe7923ae257befe28
+678, 0x848ce090039c77
+679, 0xb77a6da428f4da49
+680, 0x6d9acffa8252ae03
+681, 0xd90d55fd8f8abf1c
+682, 0x28718a78a031f802
+683, 0x8305f9005a1b1d6d
+684, 0xd065b82167b53418
+685, 0xec65275154e9da5c
+686, 0x292c7a783bc2dc04
+687, 0x71aa213998c2c31a
+688, 0x114032c57e9fc4a0
+689, 0x67c3675a88faa9d3
+690, 0x7813f653eef4d4fc
+691, 0x50004f43e4f4c43c
+692, 0x43d3ac5d63714a93
+693, 0x4142e7030323e77a
+694, 0x5da693da8e656d6f
+695, 0xac696f7b0818c8b
+696, 0x910b5df8803af3fb
+697, 0x8d89168d50ded4d6
+698, 0x37c31ab0b2635495
+699, 0x66d97616af36f929
+700, 0x2ada02a3c0389bda
+701, 0x62eea272c6334ef
+702, 0xc8afae44f45ccc49
+703, 0x4978910fb289af22
+704, 0x64590f6a489183f9
+705, 0x594837052d1ee56f
+706, 0x8553a88dd84e460c
+707, 0x5c11e1d61832edfe
+708, 0x7d5b6fde3c05ef8d
+709, 0xfaf96bbdea0d6f11
+710, 0x2112b6f8f25fc3b7
+711, 0x6ce347dc5bd8d9f6
+712, 0xb072e2c4076aa185
+713, 0xf4162f4ab453ead3
+714, 0x369789462fc197c7
+715, 0xe732c5b207c55f3c
+716, 0x4689ce674194c32c
+717, 0x6bcf28130ebd7bbe
+718, 0x4d7a25def10edb97
+719, 0xc4a2283e380f5239
+720, 0xab31536a95f7e336
+721, 0x50c1ecd9e4dec3e4
+722, 0x1bbea15462cfde71
+723, 0x1e7c73d56d6e939d
+724, 0x7c46fb35982735db
+725, 0x83c23f93c4221593
+726, 0xddc566e8005e0e6e
+727, 0xd0551a666c088325
+728, 0x2c57b590ab686557
+729, 0xf2e9351a14724fe1
+730, 0x45d25cf2ebb2ee0d
+731, 0xbe23d2a8fc7aea1
+732, 0xc721cb7b65d8dd7b
+733, 0xe6642683775efcac
+734, 0x6c29ca0adc0a83e0
+735, 0xd0de3128954b2eef
+736, 0x7abea9b318f9a544
+737, 0x3a63475d59d64b22
+738, 0xb804c9cd589c817
+739, 0xfc4f880ac9dbc246
+740, 0x414d492c8870732f
+741, 0x3ee15c71660a8129
+742, 0x57f4ab3a25da00eb
+743, 0x5a1d89d6f9eaa29f
+744, 0x60139567a3d66313
+745, 0x5759ec448bbaba05
+746, 0x44d3088d8cf1cc1
+747, 0x77d8019fadba610e
+748, 0xcdc729417b13904e
+749, 0xdc77421f8b2bfb0e
+750, 0x47ae0c4222bc1d4a
+751, 0x22768d4b89156cbb
+752, 0xa60d3ef97eae8ddb
+753, 0x7aa22493dbfceff3
+754, 0x2ee0ee06bf9a5fb
+755, 0xd54b7701d7afc96f
+756, 0x1aa49ed985a53efb
+757, 0x97d6fad17caacdd3
+758, 0x1b2f6dcd1d10fe
+759, 0x46347f5bcca0f422
+760, 0xb9dc35c224242d3c
+761, 0xb5dd657190fa8a03
+762, 0x50ff9434c7862fae
+763, 0x7a05cd5c25bc1209
+764, 0xd5aa141a498560a1
+765, 0x73c62b8d0206e8b1
+766, 0x740f369af4ac9f51
+767, 0xe7479d9a0716b94e
+768, 0x8b3d0375452d633
+769, 0x6ed58c4d905dfe37
+770, 0xbefb7f1e9c79f6ed
+771, 0xe2cd7ee311d7a8c7
+772, 0x932cfb8178492b88
+773, 0x8e39205fbe47711c
+774, 0x149ea35973cc055e
+775, 0x96b73b6cfad8ad7c
+776, 0x572898ff1f967eef
+777, 0x795e8172b62cbf69
+778, 0x4e3d34c5bb921c28
+779, 0x7a4c623c7295f4c3
+780, 0x15b7ca7ef7179a7
+781, 0x48340589636b223f
+782, 0xfcd61c186913a7aa
+783, 0xf4f7f0cb49d78f5c
+784, 0xb9591798ca218218
+785, 0xe304bc438ae109a6
+786, 0xe65890c4bed537f4
+787, 0x54719032d537f085
+788, 0x927bbdd2931be349
+789, 0xfd4a852025d02c14
+790, 0x915a7c2bc713221c
+791, 0x4adac4a960ecdf9b
+792, 0x58133bde7f0edb25
+793, 0x73d00fa5f091794f
+794, 0xcb2fe411bfb56cf3
+795, 0x54a4f66f2c5f6220
+796, 0x125bce09ee493ea
+797, 0x766ba624e5f3b266
+798, 0x884478527221bba1
+799, 0x8a1920c18ba6676a
+800, 0xb0c08f7fbca3cdbb
+801, 0xd3b570c49c774405
+802, 0xae4a55264d8e012f
+803, 0x91a25b7c5e5872a9
+804, 0xeb65375cda8296ef
+805, 0x149f98de1b29f459
+806, 0xe00a81c67b8ba093
+807, 0xbd7da1f6c6be49f3
+808, 0x4ad7c327a630b482
+809, 0x7efc93c60449206a
+810, 0xff182d272189a04c
+811, 0x4e7892e8adc82e19
+812, 0x1327926bc36b7f99
+813, 0x9b6a8085d12fca4d
+814, 0x34a29cb661d313b9
+815, 0x7b3398923572c6a4
+816, 0x8b3ff461c821a464
+817, 0x8e5581286f82448e
+818, 0x82a8d223a7b6937a
+819, 0x1a0c750d6029237a
+820, 0xf19a0a7f578497a5
+821, 0x2e6a85391da4f651
+822, 0x98676879af572d0e
+823, 0x50110f1f738507a0
+824, 0xbe88faea0d4f8cf4
+825, 0x183bdc54555acc08
+826, 0x1d4dd72e0c7a82f1
+827, 0xef500f1dd19059f1
+828, 0xad98db5c386d33a8
+829, 0xa17bbcaea00a9361
+830, 0x8b8967126839c74d
+831, 0xcc9d0e484a9b1dfc
+832, 0x4216966d5af86872
+833, 0xdc3f8b825876e2ef
+834, 0x3ef820c11b63f9f9
+835, 0x78da1c113cdca011
+836, 0x7f74559d9177c87
+837, 0xfde51ee31804305a
+838, 0xc491d970fa5ce907
+839, 0x89b0ff390723a6ff
+840, 0x7452028822f2d7bd
+841, 0x3e55cee332d78047
+842, 0x5dabead1e04596ed
+843, 0xc4e878a6ba18aec7
+844, 0xa785fac229f7f353
+845, 0xd95155479c867ad0
+846, 0x678fdb174e3774e3
+847, 0x54106e733d27b887
+848, 0x60bdc0fa294764ec
+849, 0x55f1d4270179bd54
+850, 0x80165190a3df59ba
+851, 0x81b128a7508d2174
+852, 0x831d78b199fe132f
+853, 0x80ee7eba239ed866
+854, 0x359f1906550f62bc
+855, 0xe293dd490df5f745
+856, 0xf3362af4b0de9c01
+857, 0x9cdc46fbc7f9bee8
+858, 0xe577a13809850692
+859, 0x1490ed2b4ed8ce8c
+860, 0x63b861e371a125f4
+861, 0x49916e67be281c2e
+862, 0x1a3a8999e60fe603
+863, 0xa373c8ff642e222b
+864, 0x8112bea03196843c
+865, 0x29c507a4ee61f7c2
+866, 0x4eedd845cd786583
+867, 0x1d9bdbe51c1aa7c7
+868, 0x3e5d043d5ab768ad
+869, 0x8a3c0e9801e39bee
+870, 0xc49cd378bfb3c516
+871, 0x1b9ebe1f63af91d4
+872, 0xe44afa8dcf0f28f5
+873, 0xf5a7ab4f9a8d8cc5
+874, 0x8ba7cba3af03234
+875, 0xe79397a55e04d4b2
+876, 0xc49014ba09442ad4
+877, 0xe58a5dd949723f3b
+878, 0xd67c781ca27169dc
+879, 0x409f1435da244c9a
+880, 0x7ec9df0b04c17696
+881, 0x8a34c51bafd6e390
+882, 0x2f60cc0ebb4a781d
+883, 0x161283264abcb573
+884, 0x9c9db4bf55a46c8a
+885, 0x381e6106ff6053cd
+886, 0x6e8fd5a7b8ed1c18
+887, 0x89d0da00aecbae85
+888, 0x1baffa4542d298f9
+889, 0xbf53f2e1dc44d359
+890, 0x4c31d9bd148120a8
+891, 0xc36be4d6404a748b
+892, 0x400584c614a63b32
+893, 0x6622b75443cfa5dc
+894, 0xbbfcae44c8eec3d
+895, 0x28dbf6790e9ad12b
+896, 0x7779f5d56f2613c3
+897, 0xd221ad0b4c565a5f
+898, 0x4949752332a98b9
+899, 0x5bd9931a164b2717
+900, 0xb5108565cbec069b
+901, 0x2e8491298f41ecd8
+902, 0xc94483fba700a620
+903, 0x7c1299ec45d1e22
+904, 0xf37c3a7e7e020358
+905, 0x3635565fc484cbf6
+906, 0xa93b65e210af2a2b
+907, 0xcf18d773960a3667
+908, 0xa7529ce40290e679
+909, 0xd539e8afab9ff21f
+910, 0x44fa456fc4e2908a
+911, 0x138e0dfef16de572
+912, 0xb55ac8aa42abe21f
+913, 0xc8a7a9ed90a4920a
+914, 0xcc0f0dff8f4f1fc0
+915, 0x78c99cc82195feac
+916, 0xa7669ab9998bdb89
+917, 0x2bf510028d6ea80a
+918, 0x8995287d2a60326c
+919, 0xb3c5676e9772daa7
+920, 0xf210121d1f5cf3cf
+921, 0x3ec0fa808fe50e83
+922, 0x42f5269fd9717a58
+923, 0x7603ca20951ebe1a
+924, 0x7f75e4c3afca107
+925, 0xa08af524629c434d
+926, 0x1d144241418f216e
+927, 0x7cabc46fab0dfa3b
+928, 0x317172e8fe407c21
+929, 0x2694bf3be80d8b3c
+930, 0xdf18b4db02b875c5
+931, 0x5df0cb415bc5a2fd
+932, 0x954386c3df63e124
+933, 0xf0ad49aa400ee528
+934, 0x2a941df25bb38eb8
+935, 0x3b43af03f2d3eefe
+936, 0x7a58932cec64555d
+937, 0xabb56ea03deeaec1
+938, 0x33673826e58f9a52
+939, 0x8cb6fb8e42cd9f80
+940, 0xda88c439fe3b9dbe
+941, 0x31cb50c4a69d5883
+942, 0xe2164f69f02e57e4
+943, 0xb6ea04dd0ba2811f
+944, 0xb3458306841de334
+945, 0xbc6cd1a3cf526a19
+946, 0x9424b50438e687e2
+947, 0xa668fa546aecdd82
+948, 0xb8783bd3623d86f5
+949, 0x6d4341f1dd170d5c
+950, 0x1202c1b457913af9
+951, 0xf2b532602b908de1
+952, 0xb15f6354e6482108
+953, 0x4a542e16c973ef2f
+954, 0xcef0b8ef4bcbbf64
+955, 0xdd7090f21726ab28
+956, 0xd53de858192a0094
+957, 0x58e723302bf4d675
+958, 0xc3ffb98f745409ec
+959, 0x5489e4fa52210035
+960, 0x3a6a10b142c74d43
+961, 0x69436c7b12a2c4c7
+962, 0xccecdcc046f76f03
+963, 0xa6b9793a0660fc0f
+964, 0xf114cd63b38756a5
+965, 0xa44ac409c2246f07
+966, 0x65dd5dde54b6aa26
+967, 0x5df21b90d999494a
+968, 0xafc3d89336a6d356
+969, 0x1acc23065a7ba8bd
+970, 0x87ff903278b23e2f
+971, 0x58e4a44f7e4c012f
+972, 0xb2eb460bab7744a1
+973, 0x9b1aa5a17ba581c2
+974, 0x90c87a15edc021b4
+975, 0x43369d9b481b28a5
+976, 0xd05dc8b00763dc1
+977, 0x40f058f20d77b5e6
+978, 0x2502c9829f78bdb4
+979, 0xa5ef6729f601b2d7
+980, 0xab49116e5d404023
+981, 0x6b77c686cd653da8
+982, 0xd99e324ce1468143
+983, 0xb338c64071fd5469
+984, 0x94f67b1e04fb4267
+985, 0x16f34d11e280c73f
+986, 0x9a6c4cd947bed4e0
+987, 0xd1bf20f05cd068f0
+988, 0x2ced63b15eaa27e4
+989, 0x95989123251dec6a
+990, 0x38906e5a3cb4fb01
+991, 0x4b02f03a01180ba3
+992, 0x67d5842c2b13960a
+993, 0x45dc1d0f5981374e
+994, 0xe6dbf0961817185a
+995, 0xf5717f537c683578
+996, 0xf7a689617ffe5002
+997, 0xdbd1595a8ec1ac24
+998, 0x545db9592b492be4
+999, 0x9e1085dc2c3335ed
diff --git a/numpy/random/randomgen/tests/data/xoroshiro128-testset-2.csv b/numpy/random/randomgen/tests/data/xoroshiro128-testset-2.csv
new file mode 100644
index 000000000..2de341ecd
--- /dev/null
+++ b/numpy/random/randomgen/tests/data/xoroshiro128-testset-2.csv
@@ -0,0 +1,1001 @@
+seed, 0x0
+0, 0x509946a41cd733a3
+1, 0xd805fcac6824536e
+2, 0xdadc02f3e3cf7be3
+3, 0x622e4dd99d2720e5
+4, 0xaacfd52d630b52bd
+5, 0xa94fc32eb4128023
+6, 0x9ee359839e68f625
+7, 0xd9f180e03b686e4f
+8, 0xd6825e7d8fc65068
+9, 0x887f15071c20b9d
+10, 0x6dc39f8336eeaa66
+11, 0x13d17509661b69b
+12, 0xdbe703ea4e61caec
+13, 0x1a4deda7c51c5b7b
+14, 0xe2f2259fb30bafcc
+15, 0x7eb5a4d5f053fcbf
+16, 0x4704d55257921919
+17, 0xcfeb1c70eacd6734
+18, 0xed98c92a0d6b8b3e
+19, 0x4efb928a052188b7
+20, 0x15617edcea5e98ab
+21, 0x8ac022e71a4d1a40
+22, 0xe0ae2cdf81cc05bf
+23, 0x11ae6d329bc72f19
+24, 0x5369885a834c1073
+25, 0x7a865692c8495a12
+26, 0xaf752d7df50f6968
+27, 0x4b81c799c968e005
+28, 0x4104e06972751b34
+29, 0x8600214cf598d6f6
+30, 0x444545884a4b0a80
+31, 0x2d13243847e43cfe
+32, 0x6064921c3b70601c
+33, 0x1b2c2f204185130e
+34, 0xac1e21160f7e90f4
+35, 0xa718d564118e2bca
+36, 0x25fb8750f330bdc1
+37, 0xcdd8329cb365e06
+38, 0xfdcfbff05c3470e3
+39, 0xcbce143aec5155a5
+40, 0x1d17b5b1e2c3c21
+41, 0x68fe2fbabc30aa23
+42, 0x19086e8dbd448c02
+43, 0xdb7d8126e6f3d1c6
+44, 0x1865e34fb131a69f
+45, 0xce3be151debb3e9a
+46, 0xdf573313ce569b70
+47, 0x3a7fcf8ef4fd495a
+48, 0xe26450c5ec487bcc
+49, 0xe99eaeeb35354e
+50, 0x959e7e6cb8bf55d4
+51, 0x3ba4778a79b1b758
+52, 0x30e4f35a940c2e04
+53, 0x67717bb8a50f2c22
+54, 0xa9b3e9db4934cd8e
+55, 0xe22bc184e5d2ad8d
+56, 0x7390583f39dfbb76
+57, 0x19e7ba95b2482b72
+58, 0x549b0c65abc1615f
+59, 0x43989e0d7268118a
+60, 0x1376e3b4f7319b9c
+61, 0x41bc4dd69e4a3eca
+62, 0xdb5b777a0a90e830
+63, 0x4885cae86597a2fd
+64, 0xe472ab9f66c240b5
+65, 0x387e53bf7d31a3c0
+66, 0xd8826e1be0364bef
+67, 0x2a334c6d6f748f84
+68, 0x10c7d9da8f7ba2ce
+69, 0x7b23655caa5a3872
+70, 0x4e52d38a6128c877
+71, 0x581cf9ba515b9abc
+72, 0x464df6946cf89b19
+73, 0xaf0f20053d807827
+74, 0xddeb1fe3d90b8aa2
+75, 0xccb863176382287e
+76, 0x831e79b8d6d91e8b
+77, 0x88ed0822fceb3abc
+78, 0x66adaa8387e19785
+79, 0x23a5005fb1c9c598
+80, 0x4ab28f3b1585657b
+81, 0xd620ca461099e06f
+82, 0xf056f4fdf816bab5
+83, 0xeaef5b9b3cdb015c
+84, 0xee4f14793695313b
+85, 0xaa406259c23ccb33
+86, 0x9ec3e4585b6d933f
+87, 0xb5806dfe257e6c7c
+88, 0x7bee992cfb5fd41
+89, 0x91a70b316b42bd18
+90, 0x874df34eea24edb5
+91, 0x379a0a3ad79d7db2
+92, 0xeaea9f7eb0292235
+93, 0xf4742d169fbb5198
+94, 0x57a84e20592727d
+95, 0x5d8ec89195373de3
+96, 0x22eaeb51baa32533
+97, 0xc3cad6ca8be847bb
+98, 0xf316a8b9b6172611
+99, 0xb687d7988a8a2ee5
+100, 0x8635d3f011c6253a
+101, 0x2280ec837c98731b
+102, 0x2f815c82713ebd61
+103, 0xb2b4c124ac4ea1a9
+104, 0x5db6c0a6a90a1866
+105, 0x3cc317501c96e9f8
+106, 0xd38b689a10819dac
+107, 0x1b8a114bbc51341e
+108, 0xa276c85761cf5978
+109, 0xe6b3d7d5b3b6dc0c
+110, 0x14963fae33e6c2fa
+111, 0x88f83f53a67231d7
+112, 0x77aec607b4aacad8
+113, 0x33cddae181b93178
+114, 0xf1bfcef2a7493c7d
+115, 0xc4177359c975f669
+116, 0x9d603ef0b6bee8a2
+117, 0xc16ee77a4391d9b1
+118, 0xe93f0736cbd3f398
+119, 0x327500ca9afb0730
+120, 0xd8cba3672638e75d
+121, 0xd87f00175eea9770
+122, 0x6680cfd0f0651f47
+123, 0x13287cbd1981e44d
+124, 0x9da5fb61bd633e98
+125, 0x2d704f64c4ad5444
+126, 0x4c28b98c2f7349e
+127, 0x42d156862c609af0
+128, 0xcbd49a9595d2964e
+129, 0x8d54cf464a529131
+130, 0xd6b74f26dd0e313d
+131, 0x4ef8b45baf3ec3a7
+132, 0xfc8be973c860481c
+133, 0x6112312f08028018
+134, 0x78d492d0049b30bf
+135, 0x3160db98b853a1a5
+136, 0x81eb3fabead6d97a
+137, 0xfb54ee3224945380
+138, 0x3c62663cd2aa07dd
+139, 0xeaa2eff9e2752bb4
+140, 0xbdecb6e8041eccf9
+141, 0x9a135a78514e92a2
+142, 0xacdbb7139969ae66
+143, 0xf71fc98126f511ba
+144, 0x1bd6dc2853a20898
+145, 0x6fb80e8eff8b26a3
+146, 0xfff9ba38f9c3664f
+147, 0xa4224ddddbe3a700
+148, 0xd76b8f1bc09e35ad
+149, 0x1b6c5bdad062aae9
+150, 0xabc5a61088f2a3f4
+151, 0x5160b68fd92f30c
+152, 0xb2cd4c619e1cb446
+153, 0xceffe90f16c69c0a
+154, 0xd7845f2eb8b1bf67
+155, 0xb6ddd2d76e99be45
+156, 0xf6212b33d0bc1019
+157, 0xdebc75b6e2d6db50
+158, 0x7a6d61de4c4c3a9e
+159, 0x473933055a8727a8
+160, 0x83ca458dff43a0aa
+161, 0xde2b9e38b321aa3
+162, 0x78ba83864952e9de
+163, 0xdb4c6db1049e8406
+164, 0x9c3a30ffdcfac7ee
+165, 0xeab6e9a0cf1ecd0a
+166, 0x3617b147dd5ce2ca
+167, 0xe5c000907864b48e
+168, 0x7dcb724b2767b90e
+169, 0x4ecd7ad20e75e566
+170, 0xe03be67c421d2942
+171, 0x7e7a68988cd564d3
+172, 0xa8c25e5165919c51
+173, 0xa1d550ed4a39e690
+174, 0x6e7abdcf98453f72
+175, 0xe57eb7d34da3c5b
+176, 0x8da6eebbab5ef00a
+177, 0x7574363208ed2700
+178, 0xff06b2a934a953b9
+179, 0xf3c8951de92dcabf
+180, 0x78b817c0dee711db
+181, 0x358522c82c15f627
+182, 0x81d54c2d5ef396b8
+183, 0x1f98c21036a70b27
+184, 0x4d3692ad8d5e5112
+185, 0xb63674f55b06bd46
+186, 0xbf30a7aada9b1cc2
+187, 0x57f75205e81f6b47
+188, 0x37e9ab7e796bd0c9
+189, 0x34aad24654a70694
+190, 0x5602376e46ea14ea
+191, 0x3761258bc9e79732
+192, 0xffe7d79561680d75
+193, 0x35b82f78a688b86e
+194, 0x42d23cba46456a80
+195, 0xd64f0c226c84d855
+196, 0x6ef5d71859f03982
+197, 0xdb7dabdf5282c818
+198, 0x94ec7253c617acfe
+199, 0xcc118236ff2009fd
+200, 0x9f91eaee04579472
+201, 0xbf79aadb5a3a4a1e
+202, 0xf6ac29ee74fae107
+203, 0xc82643f14e42e045
+204, 0xb08f864a06e4db72
+205, 0x7a2a402f1a000aaf
+206, 0x2c2e03247fad91fd
+207, 0xe70bb051040fd7bf
+208, 0x8d42d479e23862ed
+209, 0x3b2b368d659b45f8
+210, 0x96c8d7c31b396bc5
+211, 0x41664c476575aeea
+212, 0x303ba0289cd281fa
+213, 0x2936193bbe462f68
+214, 0x4a63581937611f45
+215, 0x10f69bed29c2a652
+216, 0xcda3073cb7dd2082
+217, 0x374da8d58157bbdb
+218, 0xf3c040dd9a135d51
+219, 0x5ae628cef3e753da
+220, 0xafdfa06ac9ed9eda
+221, 0x94582756d1cc948b
+222, 0xce387a039a43baa5
+223, 0xd9aab74b36032cb4
+224, 0x720e30cbfc81765f
+225, 0xba42d487e461d31
+226, 0x445fa16350da585b
+227, 0x43a3b57501104e19
+228, 0x55571957e6267eb3
+229, 0x8c1f8cc37a83b2cc
+230, 0xdd433be6a0188876
+231, 0xdd0c1053757845fd
+232, 0x47d17129bdec523
+233, 0x5fdc39aa7f38cf97
+234, 0x92ab54d8c66e4417
+235, 0xf46a39cdbdee494a
+236, 0x6a226e83cc244891
+237, 0xdd2dde8767318719
+238, 0x794e882325646a7
+239, 0xf1d269b9fa82e09b
+240, 0x5871ab313f97bbde
+241, 0x30a0f742fe7a1746
+242, 0x8f3b8c2ef199341a
+243, 0xf280d28fd6ab1ade
+244, 0x8b5e8a112798cd0e
+245, 0x80cc043e4ace43b
+246, 0x1dcd69d6d8f6c527
+247, 0x467dc81c1f462ff8
+248, 0x47e98dba34af7440
+249, 0xae4599c86b11c6d5
+250, 0x4cc5574019676ca9
+251, 0x79b0a34fc332cfbb
+252, 0xc5c778c13974e8
+253, 0xa1773cddcb7f3bd
+254, 0xae20dcad57acc7e1
+255, 0x11e6e98c02b4ee9f
+256, 0xfedb58925c42929
+257, 0x2ab56b3fccf3c5b6
+258, 0x5740e0a90920bbdb
+259, 0xe02ea72778a4cc5c
+260, 0x7fa9448e7563e3e
+261, 0x907603f2ccd28776
+262, 0xc655d1fbe3fbf1e0
+263, 0x40bcc587212acc1b
+264, 0x1af8bcb6c4902043
+265, 0xd47a71193454c4ba
+266, 0x9e9cb523c3c9dfe9
+267, 0x4b9e107b36ba9f0b
+268, 0xc89d86427a63c956
+269, 0x2353f37179b7147
+270, 0x7c6d3c3d67f1f245
+271, 0xf008463da2875270
+272, 0x4494eb9f1d83aca9
+273, 0x84dc57b61ca36077
+274, 0x461480c6f708fec3
+275, 0x6256b05de4b8233c
+276, 0x2b02af1084a4dfd5
+277, 0xd4f3bb079fb41a61
+278, 0x83ee412671f4ef78
+279, 0x6c46e97c8f197f8c
+280, 0x4d949413ea0d1e9d
+281, 0xd7eef89a4d084d17
+282, 0x18f03d6a52592eec
+283, 0xaf6fc843c53e63fd
+284, 0x551f420f53de9097
+285, 0x4fa8dd599dd1365d
+286, 0x399727713519d072
+287, 0xbdf7dbcc18541feb
+288, 0x3f2336894ebad1fd
+289, 0x903a74b979250389
+290, 0x733313e457a65fe
+291, 0xd189b01b9258d1c5
+292, 0xb2d9533680f9a70b
+293, 0x2a0929d54aaae5c6
+294, 0x9c6b844de0367b34
+295, 0x341d37b0d1e75bac
+296, 0x5cd370014b87cc94
+297, 0x4bdb409173abcb35
+298, 0xafd38d4c9d91240f
+299, 0x76d7d551533f344
+300, 0x3779e62cbdef738d
+301, 0x211052148f86c129
+302, 0xf2f325e09a17da4e
+303, 0x1e874c70b2d62dec
+304, 0x412fb842edc1c3f0
+305, 0x23d9f5e6c9d83d27
+306, 0x8e58937e012d3c76
+307, 0xb0ab1175918a765
+308, 0xfc7991f83e0e06fd
+309, 0x1066d7c10f16cf5e
+310, 0x29a14ec418defe81
+311, 0x20f98e60c158d08f
+312, 0x463c0497605efae6
+313, 0xdd02ac91db3f0cb9
+314, 0x434cbbb353edfa66
+315, 0x892ea5a463774836
+316, 0x8e00064e77225923
+317, 0xca7ec8ebe244a404
+318, 0xa9146f68a99e0a77
+319, 0xc85ab0fd6c4c8a99
+320, 0x4a1104cb1287380
+321, 0x25a570b6e2b45828
+322, 0x3e3f5935137b0d61
+323, 0x499d6aa0407317b9
+324, 0x4ab08263445a3fee
+325, 0x2dcd45f060d8f5cf
+326, 0xa73225adf6418dd1
+327, 0x738ff6caaffb838c
+328, 0xa93e4a4d2330026e
+329, 0x47421b8206cf7ba8
+330, 0x5d9ad2876b1a4e84
+331, 0x6557edadf965aad3
+332, 0xaeffe33ca45ac0bc
+333, 0x2196b20f7074c7d2
+334, 0x351a0c784e1056b4
+335, 0xfefaa1eca46cba97
+336, 0xf58741e34d53876e
+337, 0x5942f6de49d5cade
+338, 0xe1b0d6514455ac99
+339, 0x456dc6a18b651d36
+340, 0xa8d240033f5c9074
+341, 0x7d758bc84ec678bf
+342, 0x21ce28f61ecde645
+343, 0x83b8f058c1b36557
+344, 0xeaf452c4467ea627
+345, 0x60bb8582e53d2f9f
+346, 0x9649572eaa40c725
+347, 0x59533356c226c99a
+348, 0xc06b7f790fd4fda1
+349, 0xdb7d827921aa5962
+350, 0xd9be204c05438878
+351, 0x67f903bed4fb0450
+352, 0xf8e583b98827118c
+353, 0x72c8508fca1e207a
+354, 0xcab1df54ae1542dc
+355, 0xaaa774d0c8833857
+356, 0x710c4b86e747bbcb
+357, 0x8ffc4dd34d5f12db
+358, 0x3b1d4fbe64743023
+359, 0x3ca88da03e8d8da2
+360, 0x970b522fdad62c7d
+361, 0x7596d74c3e598a71
+362, 0x1e9c86f3b5d93e5b
+363, 0x378a3fe78b730c3c
+364, 0xfbc82d6ace6346
+365, 0x1eddf6aca48b7ff8
+366, 0xed12c2c2e137a0c6
+367, 0xd2001d92384c365d
+368, 0x69a1bad8bc8742eb
+369, 0xe1b460d2e65e9a74
+370, 0xeff030a0954e3832
+371, 0x23ac5413d4b3e60
+372, 0x802fffd55c4d2279
+373, 0x1776b952e25fcacb
+374, 0x595f3f386b0f524
+375, 0x3f2d5e55b839c40e
+376, 0x145202db5650c14d
+377, 0xc28858131b702442
+378, 0xa1381d43a4f59fcc
+379, 0xb3088835a18600fc
+380, 0xca7830bf9187f705
+381, 0xa189dbff019ca64d
+382, 0x82ad4b1f88491340
+383, 0x27262f1b70bcc1c7
+384, 0xaa52ad0b4cdc95b9
+385, 0x6898a6e5a791cca8
+386, 0x4c892bd369fb7c7c
+387, 0x2c5040316ad789e4
+388, 0x25aceb42f6d853d4
+389, 0x8f3e09dd6e6fcacb
+390, 0x35f4e10c7b4e29cf
+391, 0x6156e9fcc26a6e83
+392, 0x8a8389e8a9c70fda
+393, 0x81219b723a3dd912
+394, 0x631f0c99c62650e
+395, 0x9cec1c4f650d6c4c
+396, 0x1d3b402d466479aa
+397, 0x6d2fc0877f6f8e46
+398, 0x2000b7178225c4c
+399, 0xb01c45dca932ffb2
+400, 0x61f25ea549d3b3ef
+401, 0xfc0733a134f7bb8c
+402, 0xea3ab2a0cc6a366d
+403, 0xe26bf2b8fe0db591
+404, 0x3186c9cdd8757ee3
+405, 0x9cb472c0c526cf7b
+406, 0xdafe18916dbd33d2
+407, 0xe0b15a3aed330dec
+408, 0x7079ae5641dd16cc
+409, 0x49b6b9756c347b90
+410, 0xdda875fe11e94d34
+411, 0x8c77fb380278f362
+412, 0x602904b0cd3bc464
+413, 0xd2dc40f56fc531be
+414, 0x753175bcc1a93ba0
+415, 0x333a71f4d2d756ea
+416, 0x7b862ff73b46e03b
+417, 0x9df539d017e9017e
+418, 0x4113e5be11f63f2c
+419, 0x422942050abc4fd6
+420, 0x737b754e2add8d6a
+421, 0x313e6c1ecefdca96
+422, 0x5436d70ed2ee4cdd
+423, 0x1db894fde99e34f6
+424, 0xd86bc0b79db9a96f
+425, 0x9d904f0aca534217
+426, 0xfb14afbeabfc04df
+427, 0x9c4ccba431333edb
+428, 0xc7de0af1a5760939
+429, 0x735669225566ce71
+430, 0xf5815dabb0665733
+431, 0xf0a6b7c00d4d569
+432, 0x1448e6fe1432b7af
+433, 0x2e0586f6c9e6e7b1
+434, 0x7b75aa00eb44d795
+435, 0x7ba5cfa018a44c87
+436, 0x5854a5f78e636c5e
+437, 0xdcbe856037d0228e
+438, 0xe8882d90f7259452
+439, 0xcb6ff056c4171c82
+440, 0x4a7bd2245f0e0e32
+441, 0x3e2a40308897a793
+442, 0xe404dfa4d3284167
+443, 0xab022bce6ad8cbc
+444, 0xbb5a145064db9976
+445, 0xedd82ddea103ab7e
+446, 0xcc906d55fb10a8cc
+447, 0x63ba976a36e0cf56
+448, 0xb3ef5ad3129eedba
+449, 0x409b01e4107e9dc4
+450, 0x41059d8141efd96e
+451, 0x10bc4a29ac5cd941
+452, 0xe2fd0fb5c7787046
+453, 0xba24bd0f8d018cb3
+454, 0xc9cf71f73e6979f5
+455, 0xd79a917354d39e89
+456, 0x44fac8764c14e096
+457, 0x29c2cdcce0ce515c
+458, 0x41c6704b232934ac
+459, 0x2ace8d883c6ed401
+460, 0x76d37e5aa3c57f87
+461, 0xc7b7ae6275c47624
+462, 0x33e426b3e22bc96d
+463, 0x77818a58fdc8b640
+464, 0x49c3b6b021037e35
+465, 0x8a941f067ca1c772
+466, 0x8dac8803caad398f
+467, 0x2478a7f23abb4332
+468, 0x98ef79938ccc8b65
+469, 0xdddd5e6776f61726
+470, 0x8d9412cdc85ab90d
+471, 0x901946d2c1a63b26
+472, 0xc93fbcced6bacc00
+473, 0xabc3dfbdcc9b8fc
+474, 0x6b4ba01186620ec0
+475, 0xbb32573515ef782b
+476, 0x174d712e47dc77ee
+477, 0xd0528205819fe3ee
+478, 0xab1f77e5dc7b0e95
+479, 0x7f86317fcf8bc84a
+480, 0xa7806c55ff0b4f49
+481, 0xe8cdce88ac77263
+482, 0x2e497636f939d7c1
+483, 0x9ff5e2c32edc3ee
+484, 0x71579e5276731bbf
+485, 0x565c679f3f2eb61c
+486, 0xc2a747df0c436c
+487, 0xfc30f2f9d9489081
+488, 0x74548f1d9581fed5
+489, 0xb5819230ffd9afeb
+490, 0x228ff1227ebe13cc
+491, 0x38ac137ff54ff158
+492, 0x41ed776d549ca7da
+493, 0xb4cfe4cc90297ff
+494, 0x17988d6ed8190a5d
+495, 0xe27817eb69723f90
+496, 0xbe1cee1533890e29
+497, 0x8ee48e99d9a74f22
+498, 0xa31a5dceb1db5438
+499, 0xeecbbf998e1c0d43
+500, 0x6f8e0b0b2b361b9b
+501, 0x2a102fca177728ae
+502, 0x55a27f350de42380
+503, 0xc45ace761d5cf37b
+504, 0xe14d0182a002d8a6
+505, 0xc05841ad2de5d64
+506, 0xca6b7b7131476892
+507, 0xe4a92da10eada512
+508, 0xf7a33c11f8927959
+509, 0x7b47639e2bcd8c44
+510, 0xaed8ec8dc551d755
+511, 0xba4b5ffd28ad78b7
+512, 0xc30ddd4d1df6ce2b
+513, 0xe1b9bd0020b56548
+514, 0x8f73edbced893634
+515, 0x738098d32669cab4
+516, 0x28c03717c5adc3c0
+517, 0xbc044ebe07a8f4f3
+518, 0xc3083814802950fb
+519, 0x8639da9ccdd4068
+520, 0x2ac89cfb975e2c41
+521, 0x8e163ccdbc863461
+522, 0x4a60169f9a3648fe
+523, 0x6694ab36d7d02548
+524, 0x6b4e5764db952413
+525, 0xbf842329b9a13bfa
+526, 0x1c8639cae82e7d92
+527, 0xd5669e818fb34170
+528, 0x1f4df6bc59f9f6aa
+529, 0x8b192245d457c5a0
+530, 0xdff62af9d9eb696d
+531, 0x53dcf9276ae1ab0f
+532, 0xc1c4052d4c9d3c16
+533, 0x5c5f7b33e6aa6e0e
+534, 0x482c8e4be2a5d704
+535, 0xc5d1632e532ddf97
+536, 0x92a41d90396b49c6
+537, 0xf895429c172ec71c
+538, 0xab3ed20fad9ae896
+539, 0xbecd1ee462ba9dee
+540, 0x29e020d2bf854671
+541, 0x4a31b52b2b48d795
+542, 0x14b2c4bf2ff453a2
+543, 0xbd49a5992f3deac2
+544, 0xfe19fe4becf1b1c8
+545, 0xa90ede0ced2811cb
+546, 0x409de5d1234b16fb
+547, 0x4eb18dd87fdd6cd7
+548, 0x52387faf2214a168
+549, 0x18678b302a911d42
+550, 0x484ccf18cb491bbe
+551, 0x8610462c7e48b54d
+552, 0xb2b2712e35cc4282
+553, 0x754abdb493e3ce4f
+554, 0x352745881ade5eea
+555, 0x37d4c7cc6c238692
+556, 0xe7a8061b7c0259d2
+557, 0x187e5ee097b24be1
+558, 0x41af64f7cecc63e0
+559, 0x33612ca0ca35a2bf
+560, 0xc8b652dc6cdd0829
+561, 0xd050306acf3314b4
+562, 0x7bb7c4114d5d2347
+563, 0xd583132ce17b2f9c
+564, 0x1473fcb33448ece2
+565, 0x5f9d56e869d06300
+566, 0x45c27eae73dd6391
+567, 0x15164b3b33d2c145
+568, 0x32991907d6370c8
+569, 0x9445ff1373a9635b
+570, 0xf33ffa711ebc9d97
+571, 0x38dc80e03d8badcf
+572, 0xf346f6f42e3c396e
+573, 0x47bae2fa3827f514
+574, 0xde0e4fc698e6a6d1
+575, 0xd26d4b4097367afd
+576, 0x16dea3bef70fe858
+577, 0x226decb65f433fa0
+578, 0x2e4b7f4915de64c7
+579, 0x4f31a10935fcd415
+580, 0x5e3e420e134d2191
+581, 0x52bf5207327dfe09
+582, 0xd8c4ab9ec015b93a
+583, 0x55154818bf1ca7c9
+584, 0xc121666af28dcc9a
+585, 0x9904729e1a01bd3c
+586, 0x6e9cae3d292339bc
+587, 0xf6fb78d385e10840
+588, 0x8fb67f5e56ee1e0b
+589, 0xba17083a33230c28
+590, 0x9994a47b97c3dc9f
+591, 0x53391314bd23bebb
+592, 0x9ad473ee0eacee3b
+593, 0xaec807e5cb1f0f18
+594, 0x5d5838c5e16e82ba
+595, 0x7c810095640d10df
+596, 0x898b9da105d90061
+597, 0x9296de76fe275a73
+598, 0xb118361c5dad2c6d
+599, 0x4b9051df7724b504
+600, 0xd91789023183a552
+601, 0xc35ca6285eea0aaf
+602, 0xb6fb918229e8bb05
+603, 0x4f3f41b3fe26df66
+604, 0xb63885de73089f64
+605, 0xc55aad297e8db9cc
+606, 0x7a5ebc6cbb977bf9
+607, 0x122478e8d6b4b5fa
+608, 0x52f69dc782aba318
+609, 0xce068981160e9756
+610, 0x303897ea358b700b
+611, 0x9963ff7db9effb75
+612, 0xa3e4224b2372dc4a
+613, 0x68d78fde1f0b1e
+614, 0xb895f75785c0ec92
+615, 0x3df2981af65f3be6
+616, 0x88b17d18c7584a58
+617, 0x560834beafb27138
+618, 0xfa1d9ee07edbf359
+619, 0xc27c98d528ba33f8
+620, 0x58873114fbc61614
+621, 0x3f8112bff34dd5fc
+622, 0xbe7fbc694b26e7a1
+623, 0x323d8907780f85fb
+624, 0x7e77f48feec1f69a
+625, 0xf6d8ac3573ac4ba4
+626, 0xf013633aaba2cd2c
+627, 0x5c3153cd6f9f2fd8
+628, 0x4c3ae3906dc4e92a
+629, 0xd2f375cec67af24d
+630, 0x31943d0c1139dced
+631, 0x95ee9d16c2320163
+632, 0x1c0f03c058441f3b
+633, 0xa4dd49a2abbb39a5
+634, 0xcf6c4c9c695783ec
+635, 0xbb0ea4c9a55af9ac
+636, 0xb6a7a4c82fb232d5
+637, 0xd090cc06191a5d2f
+638, 0x653c0a506097397e
+639, 0x5a5af47067bba201
+640, 0x23df206d3d6f105
+641, 0x8501560fac79fa17
+642, 0x2b95d59621a424c8
+643, 0xb20ca1d29061c6cd
+644, 0x9824922790be5c12
+645, 0xdee7448af6c82ce
+646, 0xb57c8ab1b2b0ddb1
+647, 0x9241c7effe12d339
+648, 0xf69967036e960af9
+649, 0xe2e14558fcf89166
+650, 0x23a16c73c276d451
+651, 0x9fdd05ed8828875b
+652, 0xc3466fd3814d3253
+653, 0xdfc9c839dc99a11d
+654, 0x16693a83f78664fe
+655, 0x65da2039561d5402
+656, 0x20b0d78000a063fa
+657, 0x6b1346e833500ca1
+658, 0x7aa4a72cf75d077b
+659, 0x378c2101d36355d8
+660, 0x95910003849a5839
+661, 0x4ad588ff7fe780cc
+662, 0xd64d44efcf333e82
+663, 0x2f16c1742dcd9e7
+664, 0xd52ee978f72d63c2
+665, 0xaebda4339041c968
+666, 0x909d2433eedf9e81
+667, 0x670d7dbb7420f9da
+668, 0x2880a01109e20500
+669, 0x7b48c2a4e918f6a1
+670, 0xf38fac2caf78d1c
+671, 0x426944a0a0fcca7f
+672, 0x24331c63647d4d36
+673, 0xc8e11bd52e232844
+674, 0xe7fb6b0ccc6a867a
+675, 0x5c797fb7a7603019
+676, 0x2f6b2971284d996a
+677, 0x96a89cf3747fd01f
+678, 0x9aaedf8572e12afe
+679, 0xdf9e5a2214174223
+680, 0x163ed5bedfd06b59
+681, 0x6c45e87d73677bae
+682, 0x97b415906449e5ce
+683, 0x53f30cd13d0bca1c
+684, 0x86b204c8a1775e1d
+685, 0x7ab03915913dbaa3
+686, 0x30767dc8d5a8e96
+687, 0x4b4fd100a4d86d59
+688, 0x65a5dfabb1a06ea1
+689, 0x59632b7fec7ad10e
+690, 0x2d436b149509d8
+691, 0x37a45927a3898861
+692, 0x396db74b149f86d4
+693, 0xa1fdf757db1de83
+694, 0x3a08d99d4a60dae3
+695, 0x9df8a778bfd97996
+696, 0xc7196b2c8db56799
+697, 0x9378d20ec50eeffb
+698, 0xb9ecc104b558e25c
+699, 0x2929a6ddc011e01d
+700, 0x5c8e297d48eaa335
+701, 0x9e000149b1821081
+702, 0xa8d080481a874776
+703, 0xedb2e0fcc8695de1
+704, 0x31c38628250d2d1f
+705, 0xd92b4c99893c21a0
+706, 0xa56a77e01dffa3e6
+707, 0xa607e4ebc9c39fb5
+708, 0x6c8f5f7df2cddeaa
+709, 0x1180c33d565487aa
+710, 0xf4c66f402b7c1a21
+711, 0x4bd81bbcbe186a4d
+712, 0x623e742bf4cfc10c
+713, 0x84074e36e58825dc
+714, 0xaa70f6dfdd617ae3
+715, 0xe305ea5aaf5aea74
+716, 0xc4726917aa5914ec
+717, 0x317bbc6430cf6442
+718, 0x5b8af46f34f146a2
+719, 0xe4552970afbf97bd
+720, 0x20d7a393f8176838
+721, 0x5e4a65ab657c7d2b
+722, 0x1e430b0ad9e6fe49
+723, 0xa51866b0155c88d4
+724, 0xf1e2cdf07c51638f
+725, 0x50f57c27c4e00a44
+726, 0x23bd9255fbb896d0
+727, 0xa91748820079892f
+728, 0xb4d156ae147d6fab
+729, 0xb3a474a3480c38a9
+730, 0x45dbbb715f1e3085
+731, 0x585986863049a87c
+732, 0x436045cd7d1a9172
+733, 0x236972e814d5a4d
+734, 0x2249b5f676f29b8a
+735, 0x67fdcd55de80a8a9
+736, 0xd4fe890341189ee6
+737, 0x70e1eac3eb0a498d
+738, 0xce1c2beb72f7cff3
+739, 0x50d28189d52b5785
+740, 0x93c740175c287808
+741, 0xf29621c38e8a1044
+742, 0x32d50b2824a59d70
+743, 0x8d595966ab119908
+744, 0xa5750cc7ceb0823
+745, 0xbcdefc996aed9ceb
+746, 0xc1d70bb5480e2778
+747, 0x942b3f26a50fec6d
+748, 0xa7d4851f6990be3d
+749, 0x4086348def6e7c11
+750, 0x18aa95009f903221
+751, 0x3010f2c49ca532ad
+752, 0xe9e9b21cd5552b31
+753, 0xd90541d86fbd9566
+754, 0x9240f2d8ffffd945
+755, 0xc7815330b2fd5f62
+756, 0x89040a5ec01115f3
+757, 0x4da5e5bb136d77ec
+758, 0xc6a489d50839194b
+759, 0x37839dcfaa606c7f
+760, 0x8177b7be1443adb8
+761, 0xf588b929a63b0790
+762, 0x900a6482fa22b6de
+763, 0x845502c244d08f04
+764, 0xc0a8f114df2a3608
+765, 0x5e201627c73573b9
+766, 0xa371ef9c7fc8ac6c
+767, 0xca8a07e82c615463
+768, 0xba00e6e8d1c033db
+769, 0xcd76dbe8a10cf399
+770, 0x959fe93180800aec
+771, 0x8e77fa85404e4cce
+772, 0x7b34e8983b9be1b4
+773, 0x81c0125be3d132bf
+774, 0xfdbc9bb181a67f5c
+775, 0xf2d7962c98584eaa
+776, 0x8922b4291b6d0d41
+777, 0xb8235b21de4093bf
+778, 0xc94518b4e632edb7
+779, 0x757f43c099ff5783
+780, 0xc063132013dafb63
+781, 0xfd579036a7030019
+782, 0xa5f0638c9ead0004
+783, 0x7494b34172659deb
+784, 0x481772ff25eadcfe
+785, 0x72e37428f1e21d99
+786, 0x5cf98e5c40aa77e1
+787, 0xb3ce6c54df0aedf1
+788, 0xf00af8c613bcd8f8
+789, 0xd1237f23a07b0e3a
+790, 0xa8fe00d99f32f731
+791, 0x8b85f312af567228
+792, 0xdc2515684772c84d
+793, 0x7d11b82c9e31766f
+794, 0xf09c8697b3ff95c4
+795, 0xd35ebc77a86212eb
+796, 0xadb5a1e95afb5f6d
+797, 0x6ed845ef3fcadff1
+798, 0xaeb029f4caacb130
+799, 0x7cce6f1bf0ed8e7c
+800, 0x23b6201003d49e50
+801, 0x6dfbf0e3c21a03de
+802, 0x4729d4f0e6a9240c
+803, 0x40af60788c357e6
+804, 0xcd17f1e93dca508b
+805, 0x24a823f6424d2821
+806, 0x35194e11b535d3ef
+807, 0x948f055d9436932b
+808, 0x4e733969108a5551
+809, 0x3c0816162700c63e
+810, 0x7370a331ce8096a2
+811, 0xfcf5caf742e23baf
+812, 0xe960bb3fe0308e95
+813, 0x8ef061808248efc7
+814, 0x16c6c5da0fcf1296
+815, 0x14a05c065cffe433
+816, 0x568dd4ba989a423
+817, 0xd20a156a56768914
+818, 0x9872a06bbf694ad8
+819, 0x8ac764e367433878
+820, 0x2453eb53416ca0c4
+821, 0xa59ef657a7de7140
+822, 0x43cb5c3119ddabac
+823, 0x645ebee1c5d62133
+824, 0xacf017344a2a6031
+825, 0xc22ebb7b220dba01
+826, 0x9048e327d43fc69c
+827, 0xca2319dcd6c49370
+828, 0x63844574971006d8
+829, 0x7ce248cd860d2997
+830, 0x4d5780b45f802359
+831, 0x99798ec46c6536c5
+832, 0x4a42d4a45bdc0a1c
+833, 0x75f126405fa990ba
+834, 0xa1cf7cf0ee32ac82
+835, 0x12b722bce6d8b9a6
+836, 0x85ace663a1f92677
+837, 0x5f0514135be46137
+838, 0xb86863169f76d2f4
+839, 0x1dfc6f087c8721df
+840, 0xde984a38824ac47b
+841, 0x249504789c3f7704
+842, 0xaab5d4d12f9df445
+843, 0x863caa50cd8764c9
+844, 0x24cf6ca7a6a8e5ab
+845, 0xf293f7488a738c5d
+846, 0x2936a321fe93cce5
+847, 0xf5b2504862ce0521
+848, 0x9d6f9350f3a2b4f3
+849, 0x5093102345eb9ef0
+850, 0x20aaace8135cecbb
+851, 0x252a8e893ad79698
+852, 0x2c68c7a18c5bb936
+853, 0xf973af891f51cfc0
+854, 0xe5c661b55596bcfd
+855, 0x98b08b4904602dbd
+856, 0x9fcde37c43372b73
+857, 0xa5d05483d489e6ce
+858, 0x8b359f723ae63264
+859, 0xadaa0de5bdbd2d33
+860, 0xa4976d2755a6096
+861, 0x7174d708c2537633
+862, 0x24d86478fd44e33e
+863, 0x8a0abcdb74f29fcb
+864, 0x1fbf39da74328bcd
+865, 0x2c5973fdfcbbf09f
+866, 0xe23b300ec45a7b8b
+867, 0x69019e93b3633c1d
+868, 0x749053f7f30d6029
+869, 0x84aa9ded82b4a5c1
+870, 0xb6bb6cb827d5bcb8
+871, 0x503002036e031d34
+872, 0xba06a59f171023a1
+873, 0x733ccfc01e97abba
+874, 0xa34cc599a30202ea
+875, 0x7581c12df8a4174
+876, 0x8ee2efea87ff8766
+877, 0x2cd79614de9ff639
+878, 0xb190669d3052a8f0
+879, 0x9f3d98c2c3fc3266
+880, 0x48555e89c5b6184e
+881, 0x4b9c73be9c8e8ec2
+882, 0xeee8586bdb309974
+883, 0x823a9e3bb2741bbd
+884, 0x94a1a50e42fed547
+885, 0x2d7fcb9382eb1ba1
+886, 0xece0e31c5bb89719
+887, 0x440c75600472ddb2
+888, 0x28990d7882d9563c
+889, 0x4e9b55cfdbe05ae9
+890, 0x4dba7e062bc24994
+891, 0x71faedf4414cbab1
+892, 0xb12901b28a65ce11
+893, 0xc0834509da822274
+894, 0x7daf95e13d676f29
+895, 0x6bc8df584cd07431
+896, 0xc614bbb95c749cd6
+897, 0x11d888ab3d6e9f38
+898, 0x8f4b7c7b0bda401b
+899, 0x5eae46c2079e6f7d
+900, 0x9c6f616b61328d61
+901, 0x9415dd3fea046eeb
+902, 0x2b04d5dc9a25c2b2
+903, 0x402fd8a16781cf56
+904, 0xdc0be7170faaf41e
+905, 0x23d4fe72e8f2fa1d
+906, 0x18909afc53a4bce1
+907, 0xc8cfb6a2c1f230bb
+908, 0x8268ee65c393a138
+909, 0x9c6b4210f409a598
+910, 0xe3122eb7e28e1c8a
+911, 0xe3f0903f892e2aee
+912, 0xc51ead0ad0dd1fb8
+913, 0xb2e7343037d7e6f0
+914, 0x89376733a7d3d9b7
+915, 0x13e1f3b9da7cc130
+916, 0xe1911b0a3fa4089b
+917, 0xfdc131f18d761b91
+918, 0x782dbb406f0453f9
+919, 0xa61c1d244fdbea55
+920, 0xa4d2ed4dfd8bf85a
+921, 0x3459f746e0a71313
+922, 0xa4f67e188e38d8c9
+923, 0x271dd484aee01e22
+924, 0x1907c912ddab4c06
+925, 0xed295346066663cc
+926, 0xbebf878973ec93bb
+927, 0x464b6c605cf80b2f
+928, 0x924f8c5d8af46c76
+929, 0x8a705a5045a54c51
+930, 0xbe630deef4598083
+931, 0x63a782885bf2ef56
+932, 0x5c408ad85ab683f8
+933, 0x5a35bf59ca6db7f0
+934, 0x995b786bc77fcae8
+935, 0x93ac6c1d806cfe6a
+936, 0xdc8ad969faae9220
+937, 0x67eda7e6d2b41375
+938, 0x21d2eeb2f58da10e
+939, 0x4209dff5fec899a2
+940, 0x1b30fe5b2d96eddd
+941, 0x3959011cb1541a05
+942, 0xfd0400e18394ce3e
+943, 0xfff052e033e0ce86
+944, 0x569bb5da57a3cf2e
+945, 0x45e0ef9753a2731e
+946, 0xf6c64d69371ef3ea
+947, 0xff6e5d50e2b29841
+948, 0x57334a6acad31efd
+949, 0x3f39b0989b465114
+950, 0x9bf7bda3bc70b5dd
+951, 0x44adb420df4b19ae
+952, 0xa32ca7df58be9881
+953, 0x1af3b91f5078f255
+954, 0x9b1c0f815dba0781
+955, 0x29a5f5869108b99f
+956, 0x890ebd600b286b45
+957, 0x4fdbfbba80a094ba
+958, 0xbb42ae41c9679296
+959, 0xf51a153b2e4ea0d2
+960, 0xcb01bcb495a01869
+961, 0x1005c4deb506d28e
+962, 0x3e1213bfd6496f47
+963, 0x388f29b4151fb7aa
+964, 0xe75b0d72872db802
+965, 0xc764bfae67627d2f
+966, 0xb86fd279622fb937
+967, 0x3fc887ebd2afa4e6
+968, 0x850b7ec2436195dc
+969, 0x11495c0c0e4d1d34
+970, 0xc98421a7c82ced
+971, 0x8337132f8c2eea5a
+972, 0x77eb95949a98f2f3
+973, 0xcb325cf4d527f0e3
+974, 0x483192546ec89241
+975, 0x957fba4dd4238c59
+976, 0x6b12c9edf75d9ac6
+977, 0x9e959f3749b97cc4
+978, 0x1d77ee83f6b337c1
+979, 0xf6cf70e9db6bee2a
+980, 0x87155a5e5746a82b
+981, 0x552b032dc590447e
+982, 0xbb939df7cb2dc42d
+983, 0x1db106ff15b953c7
+984, 0xcee301b609e43399
+985, 0xe9babbea0fc4b81c
+986, 0x8ea4ec5562e67027
+987, 0x422d3637cfb0c29
+988, 0x534c6604cd9cc6c
+989, 0x301f4f55a0fdac48
+990, 0xf6c4cc1ea05c27a5
+991, 0xa1f4a4d5b999fbb1
+992, 0x343425e806758ccd
+993, 0x9641ccb506ca4b0f
+994, 0xa94166fa9641d3f5
+995, 0xf344ca329bff56e1
+996, 0xbc49cac1233860fb
+997, 0x9087c97dba7f230
+998, 0xf2acda7714a3d1f6
+999, 0x4d076fb8ea7d9b9a
diff --git a/_randomgen/randomgen/tests/data/xorshift1024-testset-1.csv b/numpy/random/randomgen/tests/data/xorshift1024-testset-1.csv
index 661b157b1..661b157b1 100644
--- a/_randomgen/randomgen/tests/data/xorshift1024-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/xorshift1024-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/xorshift1024-testset-2.csv b/numpy/random/randomgen/tests/data/xorshift1024-testset-2.csv
index ef10e6872..ef10e6872 100644
--- a/_randomgen/randomgen/tests/data/xorshift1024-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/xorshift1024-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/xoshiro256starstar-testset-1.csv b/numpy/random/randomgen/tests/data/xoshiro256starstar-testset-1.csv
index 534799b04..534799b04 100644
--- a/_randomgen/randomgen/tests/data/xoshiro256starstar-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/xoshiro256starstar-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/xoshiro256starstar-testset-2.csv b/numpy/random/randomgen/tests/data/xoshiro256starstar-testset-2.csv
index b688fcb2a..b688fcb2a 100644
--- a/_randomgen/randomgen/tests/data/xoshiro256starstar-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/xoshiro256starstar-testset-2.csv
diff --git a/_randomgen/randomgen/tests/data/xoshiro512starstar-testset-1.csv b/numpy/random/randomgen/tests/data/xoshiro512starstar-testset-1.csv
index 78fb903ee..78fb903ee 100644
--- a/_randomgen/randomgen/tests/data/xoshiro512starstar-testset-1.csv
+++ b/numpy/random/randomgen/tests/data/xoshiro512starstar-testset-1.csv
diff --git a/_randomgen/randomgen/tests/data/xoshiro512starstar-testset-2.csv b/numpy/random/randomgen/tests/data/xoshiro512starstar-testset-2.csv
index 264308f1a..264308f1a 100644
--- a/_randomgen/randomgen/tests/data/xoshiro512starstar-testset-2.csv
+++ b/numpy/random/randomgen/tests/data/xoshiro512starstar-testset-2.csv
diff --git a/_randomgen/randomgen/tests/test_against_numpy.py b/numpy/random/randomgen/tests/test_against_numpy.py
index ea2656e83..431c7bd85 100644
--- a/_randomgen/randomgen/tests/test_against_numpy.py
+++ b/numpy/random/randomgen/tests/test_against_numpy.py
@@ -1,11 +1,12 @@
import numpy as np
import numpy.random
-from numpy.testing import assert_allclose, assert_array_equal, assert_equal
+from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
+ suppress_warnings)
-import randomgen
-from randomgen import RandomGenerator, MT19937
-from randomgen._testing import suppress_warnings
-from randomgen.legacy import LegacyGenerator
+import pytest
+
+from numpy.random.randomgen import RandomGenerator, MT19937, generator
+from numpy.random import RandomState
def compare_0_input(f1, f2):
@@ -95,7 +96,7 @@ class TestAgainstNumPy(object):
cls.brng = MT19937
cls.seed = [2 ** 21 + 2 ** 16 + 2 ** 5 + 1]
cls.rg = RandomGenerator(cls.brng(*cls.seed))
- cls.lg = LegacyGenerator(cls.brng(*cls.seed))
+ cls.rs = RandomState(cls.brng(*cls.seed))
cls.nprs = cls.np.RandomState(*cls.seed)
cls.initial_state = cls.rg.state
cls._set_common_state()
@@ -113,7 +114,7 @@ class TestAgainstNumPy(object):
@classmethod
def _set_common_state_legacy(cls):
- state = cls.lg.state
+ state = cls.rs.get_state(legacy=False)
st = [[]] * 5
st[0] = 'MT19937'
st[1] = state['state']['key']
@@ -130,7 +131,7 @@ class TestAgainstNumPy(object):
def _is_state_common_legacy(self):
state = self.nprs.get_state()
- state2 = self.lg.state
+ state2 = self.rs.get_state(legacy=False)
assert (state[1] == state2['state']['key']).all()
assert (state[2] == state2['state']['pos'])
assert (state[3] == state2['has_gauss'])
@@ -165,21 +166,21 @@ class TestAgainstNumPy(object):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_0_input(self.nprs.standard_normal,
- self.lg.standard_normal)
+ self.rs.standard_normal)
self._is_state_common_legacy()
def test_standard_cauchy(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_0_input(self.nprs.standard_cauchy,
- self.lg.standard_cauchy)
+ self.rs.standard_cauchy)
self._is_state_common_legacy()
def test_standard_exponential(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_0_input(self.nprs.standard_exponential,
- self.lg.standard_exponential)
+ self.rs.standard_exponential)
self._is_state_common_legacy()
def test_tomaxint(self):
@@ -407,13 +408,14 @@ class TestAgainstNumPy(object):
assert (len(nprs_d.difference(rs_d)) == 0)
npmod = dir(numpy.random)
- mod = dir(randomgen.generator)
+ mod = dir(generator)
known_exlcuded = ['__all__', '__cached__', '__path__', 'Tester',
'info', 'bench', '__RandomState_ctor', 'mtrand',
'test', '__warningregistry__', '_numpy_tester',
'division', 'get_state', 'set_state', 'seed',
'ranf', 'random', 'sample', 'absolute_import',
- 'print_function', 'RandomState']
+ 'print_function', 'RandomState', 'randomgen',
+ 'tests', 'Lock']
mod += known_exlcuded
diff = set(npmod).difference(mod)
assert_equal(len(diff), 0)
@@ -423,112 +425,112 @@ class TestAgainstNumPy(object):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.chisquare,
- self.lg.chisquare)
+ self.rs.chisquare)
self._is_state_common_legacy()
def test_standard_gamma(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.standard_gamma,
- self.lg.standard_gamma)
+ self.rs.standard_gamma)
self._is_state_common_legacy()
def test_standard_t(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.standard_t,
- self.lg.standard_t)
+ self.rs.standard_t)
self._is_state_common_legacy()
def test_pareto(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.pareto,
- self.lg.pareto)
+ self.rs.pareto)
self._is_state_common_legacy()
def test_power(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.power,
- self.lg.power)
+ self.rs.power)
self._is_state_common_legacy()
def test_weibull(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.weibull,
- self.lg.weibull)
+ self.rs.weibull)
self._is_state_common_legacy()
def test_beta(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.beta,
- self.lg.beta)
+ self.rs.beta)
self._is_state_common_legacy()
def test_exponential(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_1_input(self.nprs.exponential,
- self.lg.exponential)
+ self.rs.exponential)
self._is_state_common_legacy()
def test_f(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.f,
- self.lg.f)
+ self.rs.f)
self._is_state_common_legacy()
def test_gamma(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.gamma,
- self.lg.gamma)
+ self.rs.gamma)
self._is_state_common_legacy()
def test_lognormal(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.lognormal,
- self.lg.lognormal)
+ self.rs.lognormal)
self._is_state_common_legacy()
def test_noncentral_chisquare(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.noncentral_chisquare,
- self.lg.noncentral_chisquare)
+ self.rs.noncentral_chisquare)
self._is_state_common_legacy()
def test_normal(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.normal,
- self.lg.normal)
+ self.rs.normal)
self._is_state_common_legacy()
def test_wald(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.wald,
- self.lg.wald)
+ self.rs.wald)
self._is_state_common_legacy()
def test_negative_binomial(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_2_input(self.nprs.negative_binomial,
- self.lg.negative_binomial,
+ self.rs.negative_binomial,
is_np=True)
self._is_state_common_legacy()
def test_randn(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
- f = self.lg.randn
+ f = self.rs.randn
g = self.nprs.randn
assert_allclose(f(10), g(10))
assert_allclose(f(3, 4, 5), g(3, 4, 5))
@@ -537,7 +539,7 @@ class TestAgainstNumPy(object):
def test_dirichlet(self):
self._set_common_state_legacy()
self._is_state_common_legacy()
- f = self.lg.dirichlet
+ f = self.rs.dirichlet
g = self.nprs.dirichlet
a = [3, 4, 5, 6, 7, 10]
assert_allclose(f(a), g(a))
@@ -549,7 +551,7 @@ class TestAgainstNumPy(object):
self._set_common_state_legacy()
self._is_state_common_legacy()
compare_3_input(self.nprs.noncentral_f,
- self.lg.noncentral_f)
+ self.rs.noncentral_f)
self._is_state_common_legacy()
def test_multivariate_normal(self):
@@ -557,7 +559,7 @@ class TestAgainstNumPy(object):
self._is_state_common_legacy()
mu = [1, 2, 3]
cov = [[1, .2, .3], [.2, 4, 1], [.3, 1, 10]]
- f = self.lg.multivariate_normal
+ f = self.rs.multivariate_normal
g = self.nprs.multivariate_normal
assert_allclose(f(mu, cov), g(mu, cov))
assert_allclose(f(np.array(mu), cov), g(np.array(mu), cov))
@@ -566,3 +568,25 @@ class TestAgainstNumPy(object):
assert_allclose(f(np.array(mu), np.array(cov), size=(7, 31)),
g(np.array(mu), np.array(cov), size=(7, 31)))
self._is_state_common_legacy()
+
+
+funcs = [generator.zipf,
+ generator.logseries,
+ generator.poisson]
+ids = [f.__name__ for f in funcs]
+
+
+@pytest.mark.filterwarnings('ignore:invalid value encountered:RuntimeWarning')
+@pytest.mark.parametrize('func', funcs, ids=ids)
+def test_nan_guard(func):
+ with pytest.raises(ValueError):
+ func([np.nan])
+ with pytest.raises(ValueError):
+ func(np.nan)
+
+
+def test_cons_gte1_nan_guard():
+ with pytest.raises(ValueError):
+ generator.hypergeometric(10, 10, [np.nan])
+ with pytest.raises(ValueError):
+ generator.hypergeometric(10, 10, np.nan)
diff --git a/_randomgen/randomgen/tests/test_direct.py b/numpy/random/randomgen/tests/test_direct.py
index 3f84c4cb6..6e856de41 100644
--- a/_randomgen/randomgen/tests/test_direct.py
+++ b/numpy/random/randomgen/tests/test_direct.py
@@ -7,9 +7,24 @@ from numpy.testing import assert_equal, assert_allclose, assert_array_equal, \
assert_raises
import pytest
-from randomgen import RandomGenerator, MT19937, DSFMT, ThreeFry32, ThreeFry, \
+from ...randomgen import RandomGenerator, MT19937, DSFMT, ThreeFry32, ThreeFry, \
PCG32, PCG64, Philox, Xoroshiro128, Xorshift1024, Xoshiro256StarStar, \
- Xoshiro512StarStar
+ Xoshiro512StarStar, RandomState
+from ...randomgen.common import interface
+
+try:
+ import cffi # noqa: F401
+
+ MISSING_CFFI = False
+except ImportError:
+ MISSING_CFFI = True
+
+try:
+ import ctypes # noqa: F401
+
+ MISSING_CTYPES = False
+except ImportError:
+ MISSING_CTYPES = False
if (sys.version_info > (3, 0)):
long = int
@@ -17,6 +32,16 @@ if (sys.version_info > (3, 0)):
pwd = os.path.dirname(os.path.abspath(__file__))
+def assert_state_equal(actual, target):
+ for key in actual:
+ if isinstance(actual[key], dict):
+ assert_state_equal(actual[key], target[key])
+ elif isinstance(actual[key], np.ndarray):
+ assert_array_equal(actual[key], target[key])
+ else:
+ assert actual[key] == target[key]
+
+
def uniform32_from_uint64(x):
x = np.uint64(x)
upper = np.array(x >> np.uint64(32), dtype=np.uint32)
@@ -26,14 +51,6 @@ def uniform32_from_uint64(x):
out = (joined >> np.uint32(9)) * (1.0 / 2 ** 23)
return out.astype(np.float32)
-
-def uniform32_from_uint63(x):
- x = np.uint64(x)
- x = np.uint32(x >> np.uint64(32))
- out = (x >> np.uint32(9)) * (1.0 / 2 ** 23)
- return out.astype(np.float32)
-
-
def uniform32_from_uint53(x):
x = np.uint64(x) >> np.uint64(16)
x = np.uint32(x & np.uint64(0xffffffff))
@@ -48,8 +65,6 @@ def uniform32_from_uint32(x):
def uniform32_from_uint(x, bits):
if bits == 64:
return uniform32_from_uint64(x)
- elif bits == 63:
- return uniform32_from_uint63(x)
elif bits == 53:
return uniform32_from_uint53(x)
elif bits == 32:
@@ -77,16 +92,6 @@ def uniform_from_uint32(x):
out[i // 2] = (a * 67108864.0 + b) / 9007199254740992.0
return out
-
-def uint64_from_uint63(x):
- out = np.empty(len(x) // 2, dtype=np.uint64)
- for i in range(0, len(x), 2):
- a = x[i] & np.uint64(0xffffffff00000000)
- b = x[i + 1] >> np.uint64(32)
- out[i // 2] = a | b
- return out
-
-
def uniform_from_dsfmt(x):
return x.view(np.double) - 1.0
@@ -126,6 +131,8 @@ class Base(object):
cls.bits = 64
cls.dtype = np.uint64
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = []
+ cls.invalid_seed_values = []
@classmethod
def _read_csv(cls, filename):
@@ -139,23 +146,33 @@ class Base(object):
return {'seed': seed, 'data': np.array(data, dtype=cls.dtype)}
def test_raw(self):
- rs = RandomGenerator(self.brng(*self.data1['seed']))
- uints = rs.random_raw(1000)
+ brng = self.brng(*self.data1['seed'])
+ uints = brng.random_raw(1000)
assert_equal(uints, self.data1['data'])
- rs = RandomGenerator(self.brng(*self.data2['seed']))
- uints = rs.random_raw(1000)
+ brng = self.brng(*self.data1['seed'])
+ uints = brng.random_raw()
+ assert_equal(uints, self.data1['data'][0])
+
+ brng = self.brng(*self.data2['seed'])
+ uints = brng.random_raw(1000)
assert_equal(uints, self.data2['data'])
- @pytest.mark.skip(reason='Polar transform no longer supported')
+ def test_random_raw(self):
+ brng = self.brng(*self.data1['seed'])
+ uints = brng.random_raw(output=False)
+ assert uints is None
+ uints = brng.random_raw(1000, output=False)
+ assert uints is None
+
def test_gauss_inv(self):
n = 25
- rs = RandomGenerator(self.brng(*self.data1['seed']))
+ rs = RandomState(self.brng(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, self.bits))
- rs = RandomGenerator(self.brng(*self.data2['seed']))
+ rs = RandomState(self.brng(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, self.bits))
@@ -214,6 +231,87 @@ class Base(object):
assert_raises(ValueError, rs.seed, [2 ** (2 * self.bits + 1)])
assert_raises(ValueError, rs.seed, [-1])
+ def test_repr(self):
+ rs = RandomGenerator(self.brng(*self.data1['seed']))
+ assert 'RandomGenerator' in rs.__repr__()
+ assert str(hex(id(rs)))[2:].upper() in rs.__repr__()
+
+ def test_str(self):
+ rs = RandomGenerator(self.brng(*self.data1['seed']))
+ assert 'RandomGenerator' in str(rs)
+ assert str(self.brng.__name__) in str(rs)
+ assert str(hex(id(rs)))[2:].upper() not in str(rs)
+
+ def test_generator(self):
+ brng = self.brng(*self.data1['seed'])
+ assert isinstance(brng.generator, RandomGenerator)
+
+ def test_pickle(self):
+ import pickle
+
+ brng = self.brng(*self.data1['seed'])
+ state = brng.state
+ brng_pkl = pickle.dumps(brng)
+ reloaded = pickle.loads(brng_pkl)
+ reloaded_state = reloaded.state
+ assert_array_equal(brng.generator.standard_normal(1000),
+ reloaded.generator.standard_normal(1000))
+ assert brng is not reloaded
+ assert_state_equal(reloaded_state, state)
+
+ def test_invalid_state_type(self):
+ brng = self.brng(*self.data1['seed'])
+ with pytest.raises(TypeError):
+ brng.state = {'1'}
+
+ def test_invalid_state_value(self):
+ brng = self.brng(*self.data1['seed'])
+ state = brng.state
+ state['brng'] = 'otherBRNG'
+ with pytest.raises(ValueError):
+ brng.state = state
+
+ def test_invalid_seed_type(self):
+ brng = self.brng(*self.data1['seed'])
+ for st in self.invalid_seed_types:
+ with pytest.raises(TypeError):
+ brng.seed(*st)
+
+ def test_invalid_seed_values(self):
+ brng = self.brng(*self.data1['seed'])
+ for st in self.invalid_seed_values:
+ with pytest.raises(ValueError):
+ brng.seed(*st)
+
+ def test_benchmark(self):
+ brng = self.brng(*self.data1['seed'])
+ brng._benchmark(1)
+ brng._benchmark(1, 'double')
+ with pytest.raises(ValueError):
+ brng._benchmark(1, 'int32')
+
+ @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')
+ def test_cffi(self):
+ brng = self.brng(*self.data1['seed'])
+ cffi_interface = brng.cffi
+ assert isinstance(cffi_interface, interface)
+ other_cffi_interface = brng.cffi
+ assert other_cffi_interface is cffi_interface
+
+ @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')
+ def test_ctypes(self):
+ brng = self.brng(*self.data1['seed'])
+ ctypes_interface = brng.ctypes
+ assert isinstance(ctypes_interface, interface)
+ other_ctypes_interface = brng.ctypes
+ assert other_ctypes_interface is ctypes_interface
+
+ def test_getstate(self):
+ brng = self.brng(*self.data1['seed'])
+ state = brng.state
+ alt_state = brng.__getstate__()
+ assert_state_equal(state, alt_state)
+
class TestXoroshiro128(Base):
@classmethod
@@ -226,6 +324,8 @@ class TestXoroshiro128(Base):
cls.data2 = cls._read_csv(
join(pwd, './data/xoroshiro128-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = [('apple',), (2 + 3j,), (3.1,)]
+ cls.invalid_seed_values = [(-2,), (np.empty((2, 2), dtype=np.int64),)]
class TestXoshiro256StarStar(Base):
@@ -239,6 +339,8 @@ class TestXoshiro256StarStar(Base):
cls.data2 = cls._read_csv(
join(pwd, './data/xoshiro256starstar-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = [('apple',), (2 + 3j,), (3.1,)]
+ cls.invalid_seed_values = [(-2,), (np.empty((2, 2), dtype=np.int64),)]
class TestXoshiro512StarStar(Base):
@@ -252,6 +354,8 @@ class TestXoshiro512StarStar(Base):
cls.data2 = cls._read_csv(
join(pwd, './data/xoshiro512starstar-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = [('apple',), (2 + 3j,), (3.1,)]
+ cls.invalid_seed_values = [(-2,), (np.empty((2, 2), dtype=np.int64),)]
class TestXorshift1024(Base):
@@ -265,6 +369,8 @@ class TestXorshift1024(Base):
cls.data2 = cls._read_csv(
join(pwd, './data/xorshift1024-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = [('apple',), (2 + 3j,), (3.1,)]
+ cls.invalid_seed_values = [(-2,), (np.empty((2, 2), dtype=np.int64),)]
class TestThreeFry(Base):
@@ -278,6 +384,16 @@ class TestThreeFry(Base):
cls.data2 = cls._read_csv(
join(pwd, './data/threefry-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = []
+ cls.invalid_seed_values = [(1, None, 1), (-1,), (2 ** 257 + 1,),
+ (None, None, 2 ** 257 + 1)]
+
+ def test_set_key(self):
+ brng = self.brng(*self.data1['seed'])
+ state = brng.state
+ keyed = self.brng(counter=state['state']['counter'],
+ key=state['state']['key'])
+ assert_state_equal(brng.state, keyed.state)
class TestPCG64(Base):
@@ -289,6 +405,10 @@ class TestPCG64(Base):
cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = [(np.array([1, 2]),), (3.2,),
+ (None, np.zeros(1))]
+ cls.invalid_seed_values = [(-1,), (2 ** 129 + 1,), (None, -1),
+ (None, 2 ** 129 + 1)]
def test_seed_float_array(self):
rs = RandomGenerator(self.brng(*self.data1['seed']))
@@ -317,6 +437,16 @@ class TestPhilox(Base):
cls.data2 = cls._read_csv(
join(pwd, './data/philox-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = []
+ cls.invalid_seed_values = [(1, None, 1), (-1,), (2 ** 257 + 1,),
+ (None, None, 2 ** 257 + 1)]
+
+ def test_set_key(self):
+ brng = self.brng(*self.data1['seed'])
+ state = brng.state
+ keyed = self.brng(counter=state['state']['counter'],
+ key=state['state']['key'])
+ assert_state_equal(brng.state, keyed.state)
class TestMT19937(Base):
@@ -328,6 +458,8 @@ class TestMT19937(Base):
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
+ cls.invalid_seed_types = []
+ cls.invalid_seed_values = [(-1,), np.array([2 ** 33])]
def test_seed_out_of_range(self):
# GH #82
@@ -359,6 +491,19 @@ class TestMT19937(Base):
assert_raises(TypeError, rs.seed, [np.pi])
assert_raises(TypeError, rs.seed, [0, np.pi])
+ def test_state_tuple(self):
+ rs = RandomGenerator(self.brng(*self.data1['seed']))
+ state = rs.state
+ desired = rs.randint(2 ** 16)
+ tup = (state['brng'], state['state']['key'], state['state']['pos'])
+ rs.state = tup
+ actual = rs.randint(2 ** 16)
+ assert_equal(actual, desired)
+ tup = tup + (0, 0.0)
+ rs.state = tup
+ actual = rs.randint(2 ** 16)
+ assert_equal(actual, desired)
+
class TestDSFMT(Base):
@classmethod
@@ -369,6 +514,9 @@ class TestDSFMT(Base):
cls.data1 = cls._read_csv(join(pwd, './data/dSFMT-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/dSFMT-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = []
+ cls.invalid_seed_values = [(-1,), np.array([2 ** 33]),
+ (np.array([2 ** 33, 2 ** 33]),)]
def test_uniform_double(self):
rs = RandomGenerator(self.brng(*self.data1['seed']))
@@ -379,15 +527,14 @@ class TestDSFMT(Base):
assert_equal(uniform_from_dsfmt(self.data2['data']),
rs.random_sample(1000))
- @pytest.mark.skip(reason='Polar transform no longer supported')
def test_gauss_inv(self):
n = 25
- rs = RandomGenerator(self.brng(*self.data1['seed']))
+ rs = RandomState(self.brng(*self.data1['seed']))
gauss = rs.standard_normal(n)
assert_allclose(gauss,
gauss_from_uint(self.data1['data'], n, 'dsfmt'))
- rs = RandomGenerator(self.brng(*self.data2['seed']))
+ rs = RandomState(self.brng(*self.data2['seed']))
gauss = rs.standard_normal(25)
assert_allclose(gauss,
gauss_from_uint(self.data2['data'], n, 'dsfmt'))
@@ -445,6 +592,16 @@ class TestThreeFry32(Base):
cls.data1 = cls._read_csv(join(pwd, './data/threefry32-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/threefry32-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = []
+ cls.invalid_seed_values = [(1, None, 1), (-1,), (2 ** 257 + 1,),
+ (None, None, 2 ** 129 + 1)]
+
+ def test_set_key(self):
+ brng = self.brng(*self.data1['seed'])
+ state = brng.state
+ keyed = self.brng(counter=state['state']['counter'],
+ key=state['state']['key'])
+ assert_state_equal(brng.state, keyed.state)
class TestPCG32(TestPCG64):
@@ -456,3 +613,7 @@ class TestPCG32(TestPCG64):
cls.data1 = cls._read_csv(join(pwd, './data/pcg32-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/pcg32-testset-2.csv'))
cls.seed_error_type = TypeError
+ cls.invalid_seed_types = [(np.array([1, 2]),), (3.2,),
+ (None, np.zeros(1))]
+ cls.invalid_seed_values = [(-1,), (2 ** 129 + 1,), (None, -1),
+ (None, 2 ** 129 + 1)]
diff --git a/_randomgen/randomgen/tests/test_numpy_mt19937.py b/numpy/random/randomgen/tests/test_generator_mt19937.py
index a19693d81..cad3ef4d6 100644
--- a/_randomgen/randomgen/tests/test_numpy_mt19937.py
+++ b/numpy/random/randomgen/tests/test_generator_mt19937.py
@@ -1,5 +1,3 @@
-from __future__ import division, absolute_import, print_function
-
import sys
import warnings
@@ -7,14 +5,11 @@ import numpy as np
from numpy.testing import (
assert_, assert_raises, assert_equal,
assert_warns, assert_no_warnings, assert_array_equal,
- assert_array_almost_equal)
+ assert_array_almost_equal, suppress_warnings)
-from randomgen._testing import suppress_warnings
-from randomgen import RandomGenerator, MT19937
-from randomgen.legacy import LegacyGenerator
+from ...randomgen import RandomGenerator, MT19937
-random = mt19937 = RandomGenerator(MT19937())
-legacy = LegacyGenerator(MT19937())
+random = RandomGenerator(MT19937())
class TestSeed(object):
@@ -47,6 +42,9 @@ class TestSeed(object):
assert_raises(ValueError, MT19937, [1, 2, 4294967296])
assert_raises(ValueError, MT19937, [1, -2, 4294967296])
+ def test_noninstantized_brng(self):
+ assert_raises(ValueError, RandomGenerator, MT19937)
+
class TestBinomial(object):
def test_n_zero(self):
@@ -80,17 +78,20 @@ class TestMultinomial(object):
def test_size(self):
# gh-3173
p = [0.5, 0.5]
- assert_equal(mt19937.multinomial(1, p, np.uint32(1)).shape, (1, 2))
- assert_equal(mt19937.multinomial(1, p, np.uint32(1)).shape, (1, 2))
- assert_equal(mt19937.multinomial(1, p, np.uint32(1)).shape, (1, 2))
- assert_equal(mt19937.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
- assert_equal(mt19937.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
- assert_equal(mt19937.multinomial(1, p, np.array((2, 2))).shape,
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
(2, 2, 2))
- assert_raises(TypeError, mt19937.multinomial, 1, p,
+ assert_raises(TypeError, random.multinomial, 1, p,
float(1))
+ def test_invalid_prob(self):
+ assert_raises(ValueError, random.multinomial, 100, [1.1, 0.2])
+
class TestSetState(object):
def setup(self):
@@ -125,19 +126,6 @@ class TestSetState(object):
new = self.brng.standard_normal(size=3)
assert_(np.all(old == new))
- def test_backwards_compatibility(self):
- # Make sure we can accept old state tuples that do not have the
- # cached Gaussian value.
- old_state = self.legacy_state
- legacy.state = old_state
- x1 = legacy.standard_normal(size=16)
- legacy.state = old_state
- x2 = legacy.standard_normal(size=16)
- legacy.state = old_state + (0, 0.0)
- x3 = legacy.standard_normal(size=16)
- assert_(np.all(x1 == x2))
- assert_(np.all(x1 == x3))
-
def test_negative_binomial(self):
# Ensure that the negative binomial results take floating point
# arguments without truncation.
@@ -145,7 +133,7 @@ class TestSetState(object):
class TestRandint(object):
- rfunc = mt19937.randint
+ rfunc = random.randint
# valid integer/boolean types
itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
@@ -257,7 +245,7 @@ class TestRandint(object):
def test_in_bounds_fuzz(self):
# Don't use fixed seed
- mt19937.seed()
+ random.seed()
for dt in self.itype[1:]:
for ubnd in [4, 8, 16]:
@@ -276,13 +264,13 @@ class TestRandint(object):
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
size = 1000
- mt19937.seed(1234)
+ random.seed(1234)
scalar = self.rfunc(lbnd, ubnd, size=size, dtype=dt)
- mt19937.seed(1234)
+ random.seed(1234)
scalar_array = self.rfunc([lbnd], [ubnd], size=size, dtype=dt)
- mt19937.seed(1234)
+ random.seed(1234)
array = self.rfunc([lbnd] * size, [ubnd] *
size, size=size, dtype=dt)
assert_array_equal(scalar, scalar_array)
@@ -304,7 +292,7 @@ class TestRandint(object):
'uint8': '27dd30c4e08a797063dffac2490b0be6'}
for dt in self.itype[1:]:
- mt19937.seed(1234)
+ random.seed(1234)
# view as little endian for hash
if sys.byteorder == 'little':
@@ -316,7 +304,7 @@ class TestRandint(object):
assert_(tgt[np.dtype(dt).name] == res)
# bools do not depend on endianness
- mt19937.seed(1234)
+ random.seed(1234)
val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
res = hashlib.md5(val).hexdigest()
assert_(tgt[np.dtype(bool).name] == res)
@@ -329,15 +317,15 @@ class TestRandint(object):
np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
# view as little endian for hash
- mt19937.seed(1234)
+ random.seed(1234)
val = self.rfunc(lbnd, ubnd, size=1000, dtype=dt)
- mt19937.seed(1234)
+ random.seed(1234)
val_bc = self.rfunc([lbnd] * 1000, ubnd, dtype=dt)
assert_array_equal(val, val_bc)
- mt19937.seed(1234)
+ random.seed(1234)
val_bc = self.rfunc([lbnd] * 1000, [ubnd] * 1000, dtype=dt)
assert_array_equal(val, val_bc)
@@ -362,7 +350,7 @@ class TestRandint(object):
# None of these function calls should
# generate a ValueError now.
- actual = mt19937.randint(lbnd, ubnd, dtype=dt)
+ actual = random.randint(lbnd, ubnd, dtype=dt)
assert_equal(actual, tgt)
def test_respect_dtype_singleton(self):
@@ -413,24 +401,34 @@ class TestRandomDist(object):
self.seed = 1234567890
def test_rand(self):
- mt19937.seed(self.seed)
- actual = mt19937.rand(3, 2)
+ random.seed(self.seed)
+ actual = random.rand(3, 2)
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
+ def test_rand_singleton(self):
+ random.seed(self.seed)
+ actual = random.rand()
+ desired = 0.61879477158567997
+ assert_array_almost_equal(actual, desired, decimal=15)
+
def test_randn(self):
- legacy.seed(self.seed)
- actual = legacy.randn(3, 2)
- desired = np.array([[1.34016345771863121, 1.73759122771936081],
- [1.498988344300628, -0.2286433324536169],
- [2.031033998682787, 2.17032494605655257]])
+ random.seed(self.seed)
+ actual = random.randn(3, 2)
+ desired = np.array([[-3.472754000610961, -0.108938564229143],
+ [-0.245965753396411, -0.704101550261701],
+ [0.360102487116356, 0.127832101772367]])
assert_array_almost_equal(actual, desired, decimal=15)
+ random.seed(self.seed)
+ actual = random.randn()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
def test_randint(self):
- mt19937.seed(self.seed)
- actual = mt19937.randint(-99, 99, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.randint(-99, 99, size=(3, 2))
desired = np.array([[31, 3],
[-52, 41],
[-48, -66]])
@@ -439,9 +437,9 @@ class TestRandomDist(object):
def test_randint_masked(self):
# Test masked rejection sampling algorithm to generate array of
# uint32 in an interval.
- mt19937.seed(self.seed)
- actual = mt19937.randint(0, 99, size=(3, 2), dtype=np.uint32,
- use_masked=True)
+ random.seed(self.seed)
+ actual = random.randint(0, 99, size=(3, 2), dtype=np.uint32,
+ use_masked=True)
desired = np.array([[2, 47],
[12, 51],
[33, 43]], dtype=np.uint32)
@@ -449,9 +447,9 @@ class TestRandomDist(object):
def test_randint_lemire_32(self):
# Test lemire algorithm to generate array of uint32 in an interval.
- mt19937.seed(self.seed)
- actual = mt19937.randint(0, 99, size=(3, 2), dtype=np.uint32,
- use_masked=False)
+ random.seed(self.seed)
+ actual = random.randint(0, 99, size=(3, 2), dtype=np.uint32,
+ use_masked=False)
desired = np.array([[61, 33],
[58, 14],
[87, 23]], dtype=np.uint32)
@@ -459,19 +457,19 @@ class TestRandomDist(object):
def test_randint_lemire_64(self):
# Test lemire algorithm to generate array of uint64 in an interval.
- mt19937.seed(self.seed)
- actual = mt19937.randint(0, 99 + 0xFFFFFFFFF, size=(3, 2),
- dtype=np.uint64, use_masked=False)
+ random.seed(self.seed)
+ actual = random.randint(0, 99 + 0xFFFFFFFFF, size=(3, 2),
+ dtype=np.uint64, use_masked=False)
desired = np.array([[42523252834, 40656066204],
[61069871386, 61274051182],
[31443797706, 53476677934]], dtype=np.uint64)
assert_array_equal(actual, desired)
def test_random_integers(self):
- mt19937.seed(self.seed)
+ random.seed(self.seed)
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
- actual = mt19937.random_integers(-99, 99, size=(3, 2))
+ actual = random.random_integers(-99, 99, size=(3, 2))
assert_(len(w) == 1)
desired = np.array([[31, 3],
[-52, 41],
@@ -486,8 +484,8 @@ class TestRandomDist(object):
# to generate this integer.
with suppress_warnings() as sup:
w = sup.record(DeprecationWarning)
- actual = mt19937.random_integers(np.iinfo('l').max,
- np.iinfo('l').max)
+ actual = random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
assert_(len(w) == 1)
desired = np.iinfo('l').max
@@ -499,55 +497,76 @@ class TestRandomDist(object):
# DeprecationWarning raised with high == None
assert_raises(DeprecationWarning,
- mt19937.random_integers,
+ random.random_integers,
np.iinfo('l').max)
# DeprecationWarning raised with high != None
assert_raises(DeprecationWarning,
- mt19937.random_integers,
+ random.random_integers,
np.iinfo('l').max, np.iinfo('l').max)
def test_random_sample(self):
- mt19937.seed(self.seed)
- actual = mt19937.random_sample((3, 2))
+ random.seed(self.seed)
+ actual = random.random_sample((3, 2))
desired = np.array([[0.61879477158567997, 0.59162362775974664],
[0.88868358904449662, 0.89165480011560816],
[0.4575674820298663, 0.7781880808593471]])
assert_array_almost_equal(actual, desired, decimal=15)
+ random.seed(self.seed)
+ actual = random.random_sample()
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_random_sample_float(self):
+ random.seed(self.seed)
+ actual = random.random_sample((3, 2))
+ desired = np.array([[0.6187948, 0.5916236],
+ [0.8886836, 0.8916548],
+ [0.4575675, 0.7781881]])
+ assert_array_almost_equal(actual, desired, decimal=7)
+
+ def test_random_sample_float_scalar(self):
+ random.seed(self.seed)
+ actual = random.random_sample(dtype=np.float32)
+ desired = 0.6187948
+ assert_array_almost_equal(actual, desired, decimal=7)
+
+ def test_random_sample_unsupported_type(self):
+ assert_raises(TypeError, random.random_sample, dtype='int32')
+
def test_choice_uniform_replace(self):
- mt19937.seed(self.seed)
- actual = mt19937.choice(4, 4)
+ random.seed(self.seed)
+ actual = random.choice(4, 4)
desired = np.array([2, 3, 2, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_replace(self):
- mt19937.seed(self.seed)
- actual = mt19937.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ random.seed(self.seed)
+ actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
desired = np.array([1, 1, 2, 2])
assert_array_equal(actual, desired)
def test_choice_uniform_noreplace(self):
- mt19937.seed(self.seed)
- actual = mt19937.choice(4, 3, replace=False)
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False)
desired = np.array([0, 1, 3])
assert_array_equal(actual, desired)
def test_choice_nonuniform_noreplace(self):
- mt19937.seed(self.seed)
- actual = mt19937.choice(4, 3, replace=False,
- p=[0.1, 0.3, 0.5, 0.1])
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False,
+ p=[0.1, 0.3, 0.5, 0.1])
desired = np.array([2, 3, 1])
assert_array_equal(actual, desired)
def test_choice_noninteger(self):
- mt19937.seed(self.seed)
- actual = mt19937.choice(['a', 'b', 'c', 'd'], 4)
+ random.seed(self.seed)
+ actual = random.choice(['a', 'b', 'c', 'd'], 4)
desired = np.array(['c', 'd', 'c', 'd'])
assert_array_equal(actual, desired)
def test_choice_exceptions(self):
- sample = mt19937.choice
+ sample = random.choice
assert_raises(ValueError, sample, -1, 3)
assert_raises(ValueError, sample, 3., 3)
assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
@@ -568,57 +587,57 @@ class TestRandomDist(object):
def test_choice_return_shape(self):
p = [0.1, 0.9]
# Check scalar
- assert_(np.isscalar(mt19937.choice(2, replace=True)))
- assert_(np.isscalar(mt19937.choice(2, replace=False)))
- assert_(np.isscalar(mt19937.choice(2, replace=True, p=p)))
- assert_(np.isscalar(mt19937.choice(2, replace=False, p=p)))
- assert_(np.isscalar(mt19937.choice([1, 2], replace=True)))
- assert_(mt19937.choice([None], replace=True) is None)
+ assert_(np.isscalar(random.choice(2, replace=True)))
+ assert_(np.isscalar(random.choice(2, replace=False)))
+ assert_(np.isscalar(random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(random.choice([1, 2], replace=True)))
+ assert_(random.choice([None], replace=True) is None)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
- assert_(mt19937.choice(arr, replace=True) is a)
+ assert_(random.choice(arr, replace=True) is a)
# Check 0-d array
s = tuple()
- assert_(not np.isscalar(mt19937.choice(2, s, replace=True)))
- assert_(not np.isscalar(mt19937.choice(2, s, replace=False)))
- assert_(not np.isscalar(mt19937.choice(2, s, replace=True, p=p)))
- assert_(not np.isscalar(mt19937.choice(2, s, replace=False, p=p)))
- assert_(not np.isscalar(mt19937.choice([1, 2], s, replace=True)))
- assert_(mt19937.choice([None], s, replace=True).ndim == 0)
+ assert_(not np.isscalar(random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
+ assert_(random.choice([None], s, replace=True).ndim == 0)
a = np.array([1, 2])
arr = np.empty(1, dtype=object)
arr[0] = a
- assert_(mt19937.choice(arr, s, replace=True).item() is a)
+ assert_(random.choice(arr, s, replace=True).item() is a)
# Check multi dimensional array
s = (2, 3)
p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
- assert_(mt19937.choice(6, s, replace=True).shape, s)
- assert_(mt19937.choice(6, s, replace=False).shape, s)
- assert_(mt19937.choice(6, s, replace=True, p=p).shape, s)
- assert_(mt19937.choice(6, s, replace=False, p=p).shape, s)
- assert_(mt19937.choice(np.arange(6), s, replace=True).shape, s)
+ assert_(random.choice(6, s, replace=True).shape, s)
+ assert_(random.choice(6, s, replace=False).shape, s)
+ assert_(random.choice(6, s, replace=True, p=p).shape, s)
+ assert_(random.choice(6, s, replace=False, p=p).shape, s)
+ assert_(random.choice(np.arange(6), s, replace=True).shape, s)
# Check zero-size
- assert_equal(mt19937.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
- assert_equal(mt19937.randint(0, -10, size=0).shape, (0,))
- assert_equal(mt19937.randint(10, 10, size=0).shape, (0,))
- assert_equal(mt19937.choice(0, size=0).shape, (0,))
- assert_equal(mt19937.choice([], size=(0,)).shape, (0,))
- assert_equal(mt19937.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(random.randint(0, -10, size=0).shape, (0,))
+ assert_equal(random.randint(10, 10, size=0).shape, (0,))
+ assert_equal(random.choice(0, size=0).shape, (0,))
+ assert_equal(random.choice([], size=(0,)).shape, (0,))
+ assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
(3, 0, 4))
- assert_raises(ValueError, mt19937.choice, [], 10)
+ assert_raises(ValueError, random.choice, [], 10)
def test_choice_nan_probabilities(self):
a = np.array([42, 1, 2])
p = [None, None, None]
- assert_raises(ValueError, mt19937.choice, a, p=p)
+ assert_raises(ValueError, random.choice, a, p=p)
def test_bytes(self):
- mt19937.seed(self.seed)
- actual = mt19937.bytes(10)
+ random.seed(self.seed)
+ actual = random.bytes(10)
desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
assert_equal(actual, desired)
@@ -642,9 +661,9 @@ class TestRandomDist(object):
lambda x: np.asarray([(i, i) for i in x],
[("a", object, 1),
("b", np.int32, 1)])]:
- mt19937.seed(self.seed)
+ random.seed(self.seed)
alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
- mt19937.shuffle(alist)
+ random.shuffle(alist)
actual = alist
desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
assert_array_equal(actual, desired)
@@ -656,16 +675,28 @@ class TestRandomDist(object):
a_orig = a.copy()
b_orig = b.copy()
for i in range(50):
- mt19937.shuffle(a)
+ random.shuffle(a)
assert_equal(
sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
- mt19937.shuffle(b)
+ random.shuffle(b)
assert_equal(
sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+ def test_permutation(self):
+ random.seed(self.seed)
+ alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
+ actual = random.permutation(alist)
+ desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
+ actual = random.permutation(arr_2d)
+ assert_array_equal(actual, np.atleast_2d(desired).T)
+
def test_beta(self):
- mt19937.seed(self.seed)
- actual = mt19937.beta(.1, .9, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.beta(.1, .9, size=(3, 2))
desired = np.array(
[[1.45341850513746058e-02, 5.31297615662868145e-04],
[1.85366619058432324e-06, 4.19214516800110563e-03],
@@ -673,173 +704,178 @@ class TestRandomDist(object):
assert_array_almost_equal(actual, desired, decimal=15)
def test_binomial(self):
- mt19937.seed(self.seed)
- actual = mt19937.binomial(100.123, .456, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456, size=(3, 2))
desired = np.array([[37, 43],
[42, 48],
[46, 45]])
assert_array_equal(actual, desired)
def test_chisquare(self):
- legacy.seed(self.seed)
- actual = legacy.chisquare(50, size=(3, 2))
- desired = np.array([[63.87858175501090585, 68.68407748911370447],
- [65.77116116901505904, 47.09686762438974483],
- [72.3828403199695174, 74.18408615260374006]])
+ random.seed(self.seed)
+ actual = random.chisquare(50, size=(3, 2))
+ desired = np.array([[22.2534560369812, 46.9302393710074],
+ [52.9974164611614, 85.3559029505718],
+ [46.1580841240719, 36.1933148548090]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_dirichlet(self):
- legacy.seed(self.seed)
+ random.seed(self.seed)
alpha = np.array([51.72840233779265162, 39.74494232180943953])
- actual = legacy.dirichlet(alpha, size=(3, 2))
- desired = np.array([[[0.54539444573611562, 0.45460555426388438],
- [0.62345816822039413, 0.37654183177960598]],
- [[0.55206000085785778, 0.44793999914214233],
- [0.58964023305154301, 0.41035976694845688]],
- [[0.59266909280647828, 0.40733090719352177],
- [0.56974431743975207, 0.43025568256024799]]])
+ actual = random.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.444382290764855, 0.555617709235145],
+ [0.468440809291970, 0.531559190708030]],
+ [[0.613461427360549, 0.386538572639451],
+ [0.529103072088183, 0.470896927911817]],
+ [[0.513490650101800, 0.486509349898200],
+ [0.558550925712797, 0.441449074287203]]])
assert_array_almost_equal(actual, desired, decimal=15)
bad_alpha = np.array([5.4e-01, -1.0e-16])
- assert_raises(ValueError, mt19937.dirichlet, bad_alpha)
+ assert_raises(ValueError, random.dirichlet, bad_alpha)
+
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha)
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
def test_dirichlet_size(self):
# gh-3173
p = np.array([51.72840233779265162, 39.74494232180943953])
- assert_equal(mt19937.dirichlet(p, np.uint32(1)).shape, (1, 2))
- assert_equal(mt19937.dirichlet(p, np.uint32(1)).shape, (1, 2))
- assert_equal(mt19937.dirichlet(p, np.uint32(1)).shape, (1, 2))
- assert_equal(mt19937.dirichlet(p, [2, 2]).shape, (2, 2, 2))
- assert_equal(mt19937.dirichlet(p, (2, 2)).shape, (2, 2, 2))
- assert_equal(mt19937.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
- assert_raises(TypeError, mt19937.dirichlet, p, float(1))
+ assert_raises(TypeError, random.dirichlet, p, float(1))
def test_dirichlet_bad_alpha(self):
# gh-2089
alpha = np.array([5.4e-01, -1.0e-16])
- assert_raises(ValueError, mt19937.dirichlet, alpha)
+ assert_raises(ValueError, random.dirichlet, alpha)
def test_exponential(self):
- legacy.seed(self.seed)
- actual = legacy.exponential(1.1234, size=(3, 2))
- desired = np.array([[1.08342649775011624, 1.00607889924557314],
- [2.46628830085216721, 2.49668106809923884],
- [0.68717433461363442, 1.69175666993575979]])
+ random.seed(self.seed)
+ actual = random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[5.350682337747634, 1.152307441755771],
+ [3.867015473358779, 1.538765912839396],
+ [0.347846818048527, 2.715656549872026]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_exponential_0(self):
- assert_equal(mt19937.exponential(scale=0), 0)
- assert_raises(ValueError, mt19937.exponential, scale=-0.)
+ assert_equal(random.exponential(scale=0), 0)
+ assert_raises(ValueError, random.exponential, scale=-0.)
def test_f(self):
- legacy.seed(self.seed)
- actual = legacy.f(12, 77, size=(3, 2))
- desired = np.array([[1.21975394418575878, 1.75135759791559775],
- [1.44803115017146489, 1.22108959480396262],
- [1.02176975757740629, 1.34431827623300415]])
+ random.seed(self.seed)
+ actual = random.f(12, 77, size=(3, 2))
+ desired = np.array([[0.809498839488467, 2.867222762455471],
+ [0.588036831639353, 1.012185639664636],
+ [1.147554281917365, 1.150886518432105]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gamma(self):
- legacy.seed(self.seed)
- actual = legacy.gamma(5, 3, size=(3, 2))
- desired = np.array([[24.60509188649287182, 28.54993563207210627],
- [26.13476110204064184, 12.56988482927716078],
- [31.71863275789960568, 33.30143302795922011]])
+ random.seed(self.seed)
+ actual = random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[12.46569350177219, 16.46580642087044],
+ [43.65744473309084, 11.98722785682592],
+ [6.50371499559955, 7.48465689751638]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_gamma_0(self):
- assert_equal(mt19937.gamma(shape=0, scale=0), 0)
- assert_raises(ValueError, mt19937.gamma, shape=-0., scale=-0.)
+ assert_equal(random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
def test_geometric(self):
- mt19937.seed(self.seed)
- actual = mt19937.geometric(.123456789, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.geometric(.123456789, size=(3, 2))
desired = np.array([[8, 7],
[17, 17],
[5, 12]])
assert_array_equal(actual, desired)
def test_gumbel(self):
- mt19937.seed(self.seed)
- actual = mt19937.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.19591898743416816, 0.34405539668096674],
[-1.4492522252274278, -1.47374816298446865],
[1.10651090478803416, -0.69535848626236174]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_gumbel_0(self):
- assert_equal(mt19937.gumbel(scale=0), 0)
- assert_raises(ValueError, mt19937.gumbel, scale=-0.)
+ assert_equal(random.gumbel(scale=0), 0)
+ assert_raises(ValueError, random.gumbel, scale=-0.)
def test_hypergeometric(self):
- mt19937.seed(self.seed)
- actual = mt19937.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
desired = np.array([[10, 10],
[10, 10],
[9, 9]])
assert_array_equal(actual, desired)
# Test nbad = 0
- actual = mt19937.hypergeometric(5, 0, 3, size=4)
+ actual = random.hypergeometric(5, 0, 3, size=4)
desired = np.array([3, 3, 3, 3])
assert_array_equal(actual, desired)
- actual = mt19937.hypergeometric(15, 0, 12, size=4)
+ actual = random.hypergeometric(15, 0, 12, size=4)
desired = np.array([12, 12, 12, 12])
assert_array_equal(actual, desired)
# Test ngood = 0
- actual = mt19937.hypergeometric(0, 5, 3, size=4)
+ actual = random.hypergeometric(0, 5, 3, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
- actual = mt19937.hypergeometric(0, 15, 12, size=4)
+ actual = random.hypergeometric(0, 15, 12, size=4)
desired = np.array([0, 0, 0, 0])
assert_array_equal(actual, desired)
def test_laplace(self):
- mt19937.seed(self.seed)
- actual = mt19937.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[0.66599721112760157, 0.52829452552221945],
[3.12791959514407125, 3.18202813572992005],
[-0.05391065675859356, 1.74901336242837324]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_laplace_0(self):
- assert_equal(mt19937.laplace(scale=0), 0)
- assert_raises(ValueError, mt19937.laplace, scale=-0.)
+ assert_equal(random.laplace(scale=0), 0)
+ assert_raises(ValueError, random.laplace, scale=-0.)
def test_logistic(self):
- mt19937.seed(self.seed)
- actual = mt19937.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
desired = np.array([[1.09232835305011444, 0.8648196662399954],
[4.27818590694950185, 4.33897006346929714],
[-0.21682183359214885, 2.63373365386060332]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_lognormal(self):
- legacy.seed(self.seed)
- actual = legacy.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
- desired = np.array([[16.50698631688883822, 36.54846706092654784],
- [22.67886599981281748, 0.71617561058995771],
- [65.72798501792723869, 86.84341601437161273]])
+ random.seed(self.seed)
+ actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[1.0894838661036e-03, 9.0990021488311e-01],
+ [6.9178869932225e-01, 2.7672077560016e-01],
+ [2.3248645126975e+00, 1.4609997951330e+00]])
assert_array_almost_equal(actual, desired, decimal=13)
def test_lognormal_0(self):
- assert_equal(mt19937.lognormal(sigma=0), 1)
- assert_raises(ValueError, mt19937.lognormal, sigma=-0.)
+ assert_equal(random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, random.lognormal, sigma=-0.)
def test_logseries(self):
- mt19937.seed(self.seed)
- actual = mt19937.logseries(p=.923456789, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.logseries(p=.923456789, size=(3, 2))
desired = np.array([[2, 2],
[6, 17],
[3, 6]])
assert_array_equal(actual, desired)
def test_multinomial(self):
- mt19937.seed(self.seed)
- actual = mt19937.multinomial(20, [1 / 6.] * 6, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.multinomial(20, [1 / 6.] * 6, size=(3, 2))
desired = np.array([[[4, 3, 5, 4, 2, 2],
[5, 2, 8, 2, 2, 1]],
[[3, 4, 3, 6, 0, 4],
@@ -849,96 +885,112 @@ class TestRandomDist(object):
assert_array_equal(actual, desired)
def test_multivariate_normal(self):
- legacy.seed(self.seed)
+ random.seed(self.seed)
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
- actual = legacy.multivariate_normal(mean, cov, size)
- desired = np.array([[[1.463620246718631, 11.73759122771936],
- [1.622445133300628, 9.771356667546383]],
- [[2.154490787682787, 12.170324946056553],
- [1.719909438201865, 9.230548443648306]],
- [[0.689515026297799, 9.880729819607714],
- [-0.023054015651998, 9.201096623542879]]])
+ actual = random.multivariate_normal(mean, cov, size)
+ desired = np.array([[[-3.34929721161096100, 9.891061435770858],
+ [-0.12250896439641100, 9.295898449738300]],
+ [[0.48355927611635563, 10.127832101772366],
+ [3.11093021424924300, 10.283109168794352]],
+ [[-0.20332082341774727, 9.868532121697195],
+ [-1.33806889550667330, 9.813657233804179]]])
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
- actual = legacy.multivariate_normal(mean, cov)
- desired = np.array([0.895289569463708, 9.17180864067987])
+ actual = random.multivariate_normal(mean, cov)
+ desired = np.array([-1.097443117192574, 10.535787051184261])
assert_array_almost_equal(actual, desired, decimal=15)
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
mean = [0, 0]
cov = [[1, 2], [2, 1]]
- assert_warns(RuntimeWarning, mt19937.multivariate_normal, mean, cov)
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
- assert_no_warnings(mt19937.multivariate_normal, mean, cov,
+ assert_no_warnings(random.multivariate_normal, mean, cov,
check_valid='ignore')
# and that it raises with RuntimeWarning check_valid='raises'
- assert_raises(ValueError, mt19937.multivariate_normal, mean, cov,
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ mu = np.zeros(2)
+ cov = np.eye(2)
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='other')
+ assert_raises(ValueError, random.multivariate_normal,
+ np.zeros((2, 1, 1)), cov)
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.empty((3, 2)))
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.eye(3))
+
def test_negative_binomial(self):
- legacy.seed(self.seed)
- actual = legacy.negative_binomial(n=100, p=.12345, size=(3, 2))
- desired = np.array([[848, 841],
- [892, 611],
- [779, 647]])
+ random.seed(self.seed)
+ actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[521, 736],
+ [665, 690],
+ [723, 751]])
assert_array_equal(actual, desired)
def test_noncentral_chisquare(self):
- legacy.seed(self.seed)
- actual = legacy.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
- desired = np.array([[23.91905354498517511, 13.35324692733826346],
- [31.22452661329736401, 16.60047399466177254],
- [5.03461598262724586, 17.94973089023519464]])
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[9.47783251920357, 10.02066178260461],
+ [3.15869984192364, 10.5581565031544],
+ [5.01652540543548, 13.7689551218441]])
assert_array_almost_equal(actual, desired, decimal=14)
- actual = legacy.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
- desired = np.array([[1.47145377828516666, 0.15052899268012659],
- [0.00943803056963588, 1.02647251615666169],
- [0.332334982684171, 0.15451287602753125]])
+ actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[0.00145153051285, 0.22432468724778],
+ [0.02956713468556, 0.00207192946898],
+ [1.41985055641800, 0.15451287602753]])
assert_array_almost_equal(actual, desired, decimal=14)
- legacy.seed(self.seed)
- actual = legacy.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
- desired = np.array([[9.597154162763948, 11.725484450296079],
- [10.413711048138335, 3.694475922923986],
- [13.484222138963087, 14.377255424602957]])
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[3.64881368071039, 5.48224544747803],
+ [20.41999842025404, 3.44075915187367],
+ [1.29765160605552, 1.64125033268606]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_noncentral_f(self):
- legacy.seed(self.seed)
- actual = legacy.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ random.seed(self.seed)
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
size=(3, 2))
- desired = np.array([[1.40598099674926669, 0.34207973179285761],
- [3.57715069265772545, 7.92632662577829805],
- [0.43741599463544162, 1.1774208752428319]])
+ desired = np.array([[1.22680230963236, 2.56457837623956],
+ [2.7653304499494, 7.4336268865443],
+ [1.16362730891403, 2.54104276581491]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_normal(self):
- legacy.seed(self.seed)
- actual = legacy.normal(loc=.123456789, scale=2.0, size=(3, 2))
- desired = np.array([[2.80378370443726244, 3.59863924443872163],
- [3.121433477601256, -0.33382987590723379],
- [4.18552478636557357, 4.46410668111310471]])
+ random.seed(self.seed)
+ actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[-6.822051212221923, -0.094420339458285],
+ [-0.368474717792823, -1.284746311523402],
+ [0.843661763232711, 0.379120992544734]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_normal_0(self):
- assert_equal(mt19937.normal(scale=0), 0)
- assert_raises(ValueError, mt19937.normal, scale=-0.)
+ assert_equal(random.normal(scale=0), 0)
+ assert_raises(ValueError, random.normal, scale=-0.)
def test_pareto(self):
- legacy.seed(self.seed)
- actual = legacy.pareto(a=.123456789, size=(3, 2))
- desired = np.array(
- [[2.46852460439034849e+03, 1.41286880810518346e+03],
- [5.28287797029485181e+07, 6.57720981047328785e+07],
- [1.40840323350391515e+02, 1.98390255135251704e+05]])
+ random.seed(self.seed)
+ actual = random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array([[5.6883528121891552e+16, 4.0569373841667057e+03],
+ [1.2854967019379475e+12, 6.5833156486851483e+04],
+ [1.1281132447159091e+01, 3.1895968171107006e+08]])
# For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
# matrix differs by 24 nulps. Discussion:
# http://mail.scipy.org/pipermail/numpy-discussion/2012-September/063801.html
@@ -948,8 +1000,8 @@ class TestRandomDist(object):
np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
def test_poisson(self):
- mt19937.seed(self.seed)
- actual = mt19937.poisson(lam=.123456789, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.poisson(lam=.123456789, size=(3, 2))
desired = np.array([[0, 0],
[1, 0],
[0, 0]])
@@ -958,87 +1010,131 @@ class TestRandomDist(object):
def test_poisson_exceptions(self):
lambig = np.iinfo('l').max
lamneg = -1
- assert_raises(ValueError, mt19937.poisson, lamneg)
- assert_raises(ValueError, mt19937.poisson, [lamneg] * 10)
- assert_raises(ValueError, mt19937.poisson, lambig)
- assert_raises(ValueError, mt19937.poisson, [lambig] * 10)
+ assert_raises(ValueError, random.poisson, lamneg)
+ assert_raises(ValueError, random.poisson, [lamneg] * 10)
+ assert_raises(ValueError, random.poisson, lambig)
+ assert_raises(ValueError, random.poisson, [lambig] * 10)
def test_power(self):
- legacy.seed(self.seed)
- actual = legacy.power(a=.123456789, size=(3, 2))
- desired = np.array([[0.02048932883240791, 0.01424192241128213],
- [0.38446073748535298, 0.39499689943484395],
- [0.00177699707563439, 0.13115505880863756]])
+ random.seed(self.seed)
+ actual = random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[9.328833342693975e-01, 2.742250409261003e-02],
+ [7.684513237993961e-01, 9.297548209160028e-02],
+ [2.214811188828573e-05, 4.693448360603472e-01]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_rayleigh(self):
- mt19937.seed(self.seed)
- actual = mt19937.rayleigh(scale=10, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.rayleigh(scale=10, size=(3, 2))
desired = np.array([[13.8882496494248393, 13.383318339044731],
[20.95413364294492098, 21.08285015800712614],
[11.06066537006854311, 17.35468505778271009]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_rayleigh_0(self):
- assert_equal(mt19937.rayleigh(scale=0), 0)
- assert_raises(ValueError, mt19937.rayleigh, scale=-0.)
+ assert_equal(random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, random.rayleigh, scale=-0.)
def test_standard_cauchy(self):
- legacy.seed(self.seed)
- actual = legacy.standard_cauchy(size=(3, 2))
- desired = np.array([[0.77127660196445336, -6.55601161955910605],
- [0.93582023391158309, -2.07479293013759447],
- [-4.74601644297011926, 0.18338989290760804]])
+ random.seed(self.seed)
+ actual = random.standard_cauchy(size=(3, 2))
+ desired = np.array([[31.87809592667601, 0.349332782046838],
+ [2.816995747731641, 10.552372563459114],
+ [2.485608017991235, 7.843211273201831]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_standard_exponential(self):
- mt19937.seed(self.seed)
- actual = mt19937.standard_exponential(size=(3, 2), method='inv')
+ random.seed(self.seed)
+ actual = random.standard_exponential(size=(3, 2), method='inv')
desired = np.array([[0.96441739162374596, 0.89556604882105506],
[2.1953785836319808, 2.22243285392490542],
[0.6116915921431676, 1.50592546727413201]])
assert_array_almost_equal(actual, desired, decimal=15)
+ def test_standard_expoential_type_error(self):
+ assert_raises(TypeError, random.standard_exponential, dtype=np.int32)
+
def test_standard_gamma(self):
- legacy.seed(self.seed)
- actual = legacy.standard_gamma(shape=3, size=(3, 2))
- desired = np.array([[5.50841531318455058, 6.62953470301903103],
- [5.93988484943779227, 2.31044849402133989],
- [7.54838614231317084, 8.012756093271868]])
+ random.seed(self.seed)
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[2.28483515569645, 3.29899524967824],
+ [11.12492298902645, 2.16784417297277],
+ [0.92121813690910, 1.12853552328470]])
assert_array_almost_equal(actual, desired, decimal=14)
+ def test_standard_gammma_scalar_float(self):
+ random.seed(self.seed)
+ actual = random.standard_gamma(3, dtype=np.float32)
+ desired = 1.3877466
+ assert_array_almost_equal(actual, desired, decimal=6)
+
+ def test_standard_gamma_float(self):
+ random.seed(self.seed)
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[2.2848352, 3.2989952],
+ [11.124923, 2.1678442],
+ [0.9212181, 1.1285355]])
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ def test_standard_gammma_float_out(self):
+ actual = np.zeros((3, 2), dtype=np.float32)
+ random.seed(self.seed)
+ random.standard_gamma(10.0, out=actual, dtype=np.float32)
+ desired = np.array([[6.9824033, 7.3731737],
+ [14.860578, 7.5327270],
+ [11.767487, 6.2320185]], dtype=np.float32)
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ random.seed(self.seed)
+ random.standard_gamma(10.0, out=actual, size=(3, 2), dtype=np.float32)
+ assert_array_almost_equal(actual, desired, decimal=5)
+
+ def test_standard_gamma_unknown_type(self):
+ assert_raises(TypeError, random.standard_gamma, 1.,
+ dtype='int32')
+
+ def test_out_size_mismatch(self):
+ out = np.zeros(10)
+ assert_raises(ValueError, random.standard_gamma, 10.0, size=20,
+ out=out)
+ assert_raises(ValueError, random.standard_gamma, 10.0, size=(10, 1),
+ out=out)
+
def test_standard_gamma_0(self):
- assert_equal(mt19937.standard_gamma(shape=0), 0)
- assert_raises(ValueError, mt19937.standard_gamma, shape=-0.)
+ assert_equal(random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, random.standard_gamma, shape=-0.)
def test_standard_normal(self):
- legacy.seed(self.seed)
- actual = legacy.standard_normal(size=(3, 2))
- desired = np.array([[1.34016345771863121, 1.73759122771936081],
- [1.498988344300628, -0.2286433324536169],
- [2.031033998682787, 2.17032494605655257]])
+ random.seed(self.seed)
+ actual = random.standard_normal(size=(3, 2))
+ desired = np.array([[-3.472754000610961, -0.108938564229143],
+ [-0.245965753396411, -0.704101550261701],
+ [0.360102487116356, 0.127832101772367]])
assert_array_almost_equal(actual, desired, decimal=15)
+ def test_standard_normal_unsupported_type(self):
+ assert_raises(TypeError, random.standard_normal, dtype=np.int32)
+
def test_standard_t(self):
- legacy.seed(self.seed)
- actual = legacy.standard_t(df=10, size=(3, 2))
- desired = np.array([[0.97140611862659965, -0.08830486548450577],
- [1.36311143689505321, -0.55317463909867071],
- [-0.18473749069684214, 0.61181537341755321]])
+ random.seed(self.seed)
+ actual = random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[-3.68722108185508, -0.672031186266171],
+ [2.900224996448669, -0.199656996187739],
+ [-1.12179956985969, 1.85668262342106]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_triangular(self):
- mt19937.seed(self.seed)
- actual = mt19937.triangular(left=5.12, mode=10.23, right=20.34,
- size=(3, 2))
+ random.seed(self.seed)
+ actual = random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
desired = np.array([[12.68117178949215784, 12.4129206149193152],
[16.20131377335158263, 16.25692138747600524],
[11.20400690911820263, 14.4978144835829923]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_uniform(self):
- mt19937.seed(self.seed)
- actual = mt19937.uniform(low=1.23, high=10.54, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
desired = np.array([[6.99097932346268003, 6.73801597444323974],
[9.50364421400426274, 9.53130618907631089],
[5.48995325769805476, 8.47493103280052118]])
@@ -1048,7 +1144,7 @@ class TestRandomDist(object):
fmin = np.finfo('float').min
fmax = np.finfo('float').max
- func = mt19937.uniform
+ func = random.uniform
assert_raises(OverflowError, func, -np.inf, 0)
assert_raises(OverflowError, func, 0, np.inf)
assert_raises(OverflowError, func, fmin, fmax)
@@ -1058,7 +1154,7 @@ class TestRandomDist(object):
# (fmax / 1e17) - fmin is within range, so this should not throw
# account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
# DBL_MAX by increasing fmin a bit
- mt19937.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+ random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
def test_scalar_exception_propagation(self):
# Tests that exceptions are correctly propagated in distributions
@@ -1072,7 +1168,7 @@ class TestRandomDist(object):
raise TypeError
throwing_float = np.array(1.0).view(ThrowingFloat)
- assert_raises(TypeError, mt19937.uniform,
+ assert_raises(TypeError, random.uniform,
throwing_float, throwing_float)
class ThrowingInteger(np.ndarray):
@@ -1080,11 +1176,11 @@ class TestRandomDist(object):
raise TypeError
throwing_int = np.array(1).view(ThrowingInteger)
- assert_raises(TypeError, mt19937.hypergeometric, throwing_int, 1, 1)
+ assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
def test_vonmises(self):
- mt19937.seed(self.seed)
- actual = mt19937.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
desired = np.array([[2.28567572673902042, 2.89163838442285037],
[0.38198375564286025, 2.57638023113890746],
[1.19153771588353052, 1.83509849681825354]])
@@ -1092,34 +1188,34 @@ class TestRandomDist(object):
def test_vonmises_small(self):
# check infinite loop, gh-4720
- mt19937.seed(self.seed)
- r = mt19937.vonmises(mu=0., kappa=1.1e-8, size=10 ** 6)
+ random.seed(self.seed)
+ r = random.vonmises(mu=0., kappa=1.1e-8, size=10 ** 6)
assert_(np.isfinite(r).all())
def test_wald(self):
- legacy.seed(self.seed)
- actual = legacy.wald(mean=1.23, scale=1.54, size=(3, 2))
- desired = np.array([[3.82935265715889983, 5.13125249184285526],
- [0.35045403618358717, 1.50832396872003538],
- [0.24124319895843183, 0.22031101461955038]])
+ random.seed(self.seed)
+ actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[0.10653278160339, 0.98771068102461],
+ [0.89276055317879, 0.13640126419923],
+ [0.9194319091599, 0.36037816317472]])
assert_array_almost_equal(actual, desired, decimal=14)
def test_weibull(self):
- legacy.seed(self.seed)
- actual = legacy.weibull(a=1.23, size=(3, 2))
- desired = np.array([[0.97097342648766727, 0.91422896443565516],
- [1.89517770034962929, 1.91414357960479564],
- [0.67057783752390987, 1.39494046635066793]])
+ random.seed(self.seed)
+ actual = random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[3.557276979846361, 1.020870580998542],
+ [2.731847777612348, 1.29148068905082],
+ [0.385531483942839, 2.049551716717254]])
assert_array_almost_equal(actual, desired, decimal=15)
def test_weibull_0(self):
- mt19937.seed(self.seed)
- assert_equal(mt19937.weibull(a=0, size=12), np.zeros(12))
- assert_raises(ValueError, mt19937.weibull, a=-0.)
+ random.seed(self.seed)
+ assert_equal(random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, random.weibull, a=-0.)
def test_zipf(self):
- mt19937.seed(self.seed)
- actual = mt19937.zipf(a=1.23, size=(3, 2))
+ random.seed(self.seed)
+ actual = random.zipf(a=1.23, size=(3, 2))
desired = np.array([[66, 29],
[1, 1],
[3, 13]])
@@ -1134,7 +1230,6 @@ class TestBroadcast(object):
def set_seed(self):
random.seed(self.seed)
- legacy.seed(self.seed)
def test_uniform(self):
low = [0]
@@ -1156,126 +1251,112 @@ class TestBroadcast(object):
loc = [0]
scale = [1]
bad_scale = [-1]
- normal = legacy.normal
- desired = np.array([2.2129019979039612,
- 2.1283977976520019,
- 1.8417114045748335])
+ normal = random.normal
+ desired = np.array([0.454879818179180,
+ -0.62749179463661,
+ -0.06063266769872])
self.set_seed()
actual = normal(loc * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc * 3, bad_scale)
- assert_raises(ValueError, mt19937.normal, loc * 3, bad_scale)
+ assert_raises(ValueError, random.normal, loc * 3, bad_scale)
self.set_seed()
actual = normal(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, normal, loc, bad_scale * 3)
- assert_raises(ValueError, mt19937.normal, loc, bad_scale * 3)
+ assert_raises(ValueError, random.normal, loc, bad_scale * 3)
def test_beta(self):
a = [1]
b = [2]
bad_a = [-1]
bad_b = [-2]
- beta = legacy.beta
- desired = np.array([0.19843558305989056,
- 0.075230336409423643,
- 0.24976865978980844])
+ beta = random.beta
+ desired = np.array([0.63222080311226,
+ 0.33310522220774,
+ 0.64494078460190])
self.set_seed()
actual = beta(a * 3, b)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, beta, bad_a * 3, b)
assert_raises(ValueError, beta, a * 3, bad_b)
- assert_raises(ValueError, mt19937.beta, bad_a * 3, b)
- assert_raises(ValueError, mt19937.beta, a * 3, bad_b)
self.set_seed()
actual = beta(a, b * 3)
assert_array_almost_equal(actual, desired, decimal=14)
- assert_raises(ValueError, mt19937.beta, bad_a, b * 3)
- assert_raises(ValueError, mt19937.beta, a, bad_b * 3)
def test_exponential(self):
scale = [1]
bad_scale = [-1]
- exponential = legacy.exponential
- desired = np.array([0.76106853658845242,
- 0.76386282278691653,
- 0.71243813125891797])
+ exponential = random.exponential
+ desired = np.array([1.68591211640990,
+ 3.14186859487914,
+ 0.67717375919228])
self.set_seed()
actual = exponential(scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, exponential, bad_scale * 3)
- assert_raises(ValueError, mt19937.exponential, bad_scale * 3)
def test_standard_gamma(self):
shape = [1]
bad_shape = [-1]
- std_gamma = legacy.standard_gamma
- desired = np.array([0.76106853658845242,
- 0.76386282278691653,
- 0.71243813125891797])
+ std_gamma = random.standard_gamma
+ desired = np.array([1.68591211640990,
+ 3.14186859487914,
+ 0.67717375919228])
self.set_seed()
actual = std_gamma(shape * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, std_gamma, bad_shape * 3)
- assert_raises(ValueError, mt19937.standard_gamma, bad_shape * 3)
def test_gamma(self):
shape = [1]
scale = [2]
bad_shape = [-1]
bad_scale = [-2]
- gamma = legacy.gamma
- desired = np.array([1.5221370731769048,
- 1.5277256455738331,
- 1.4248762625178359])
+ gamma = random.gamma
+ desired = np.array([3.37182423281980,
+ 6.28373718975827,
+ 1.35434751838456])
self.set_seed()
actual = gamma(shape * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape * 3, scale)
assert_raises(ValueError, gamma, shape * 3, bad_scale)
- assert_raises(ValueError, mt19937.gamma, bad_shape * 3, scale)
- assert_raises(ValueError, mt19937.gamma, shape * 3, bad_scale)
self.set_seed()
actual = gamma(shape, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, gamma, bad_shape, scale * 3)
assert_raises(ValueError, gamma, shape, bad_scale * 3)
- assert_raises(ValueError, mt19937.gamma, bad_shape, scale * 3)
- assert_raises(ValueError, mt19937.gamma, shape, bad_scale * 3)
def test_f(self):
dfnum = [1]
dfden = [2]
bad_dfnum = [-1]
bad_dfden = [-2]
- f = legacy.f
- desired = np.array([0.80038951638264799,
- 0.86768719635363512,
- 2.7251095168386801])
+ f = random.f
+ desired = np.array([0.84207044881810,
+ 3.08607209903483,
+ 3.12823105933169])
self.set_seed()
actual = f(dfnum * 3, dfden)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum * 3, dfden)
assert_raises(ValueError, f, dfnum * 3, bad_dfden)
- assert_raises(ValueError, mt19937.f, bad_dfnum * 3, dfden)
- assert_raises(ValueError, mt19937.f, dfnum * 3, bad_dfden)
self.set_seed()
actual = f(dfnum, dfden * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, f, bad_dfnum, dfden * 3)
assert_raises(ValueError, f, dfnum, bad_dfden * 3)
- assert_raises(ValueError, mt19937.f, bad_dfnum, dfden * 3)
- assert_raises(ValueError, mt19937.f, dfnum, bad_dfden * 3)
def test_noncentral_f(self):
dfnum = [2]
@@ -1284,20 +1365,18 @@ class TestBroadcast(object):
bad_dfnum = [0]
bad_dfden = [-1]
bad_nonc = [-2]
- nonc_f = legacy.noncentral_f
- desired = np.array([9.1393943263705211,
- 13.025456344595602,
- 8.8018098359100545])
+ nonc_f = random.noncentral_f
+ desired = np.array([3.83710578542563,
+ 8.74926819712029,
+ 0.48892943835401])
self.set_seed()
actual = nonc_f(dfnum * 3, dfden, nonc)
+ mt_nonc_f = random.noncentral_f
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
- assert_raises(ValueError, mt19937.noncentral_f, bad_dfnum * 3, dfden, nonc)
- assert_raises(ValueError, mt19937.noncentral_f, dfnum * 3, bad_dfden, nonc)
- assert_raises(ValueError, mt19937.noncentral_f, dfnum * 3, dfden, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden * 3, nonc)
@@ -1305,9 +1384,6 @@ class TestBroadcast(object):
assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
- assert_raises(ValueError, mt19937.noncentral_f, bad_dfnum, dfden * 3, nonc)
- assert_raises(ValueError, mt19937.noncentral_f, dfnum, bad_dfden * 3, nonc)
- assert_raises(ValueError, mt19937.noncentral_f, dfnum, dfden * 3, bad_nonc)
self.set_seed()
actual = nonc_f(dfnum, dfden, nonc * 3)
@@ -1315,9 +1391,9 @@ class TestBroadcast(object):
assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
- assert_raises(ValueError, mt19937.noncentral_f, bad_dfnum, dfden, nonc * 3)
- assert_raises(ValueError, mt19937.noncentral_f, dfnum, bad_dfden, nonc * 3)
- assert_raises(ValueError, mt19937.noncentral_f, dfnum, dfden, bad_nonc * 3)
+ assert_raises(ValueError, mt_nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, mt_nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, mt_nonc_f, dfnum, dfden, bad_nonc * 3)
def test_chisquare(self):
df = [1]
@@ -1337,40 +1413,41 @@ class TestBroadcast(object):
nonc = [2]
bad_df = [-1]
bad_nonc = [-2]
- nonc_chi = legacy.noncentral_chisquare
- desired = np.array([9.0015599467913763,
- 4.5804135049718742,
- 6.0872302432834564])
+ nonc_chi = random.noncentral_chisquare
+ desired = np.array([2.20478739452297,
+ 1.45177405755115,
+ 1.00418921695354])
self.set_seed()
actual = nonc_chi(df * 3, nonc)
+ mt_nonc_chi2 = random.noncentral_chisquare
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
- assert_raises(ValueError, mt19937.noncentral_chisquare, bad_df * 3, nonc)
- assert_raises(ValueError, mt19937.noncentral_chisquare, df * 3, bad_nonc)
+ assert_raises(ValueError, mt_nonc_chi2, bad_df * 3, nonc)
+ assert_raises(ValueError, mt_nonc_chi2, df * 3, bad_nonc)
self.set_seed()
actual = nonc_chi(df, nonc * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
- assert_raises(ValueError, mt19937.noncentral_chisquare, bad_df, nonc * 3)
- assert_raises(ValueError, mt19937.noncentral_chisquare, df, bad_nonc * 3)
+ assert_raises(ValueError, mt_nonc_chi2, bad_df, nonc * 3)
+ assert_raises(ValueError, mt_nonc_chi2, df, bad_nonc * 3)
def test_standard_t(self):
df = [1]
bad_df = [-1]
- t = legacy.standard_t
- desired = np.array([3.0702872575217643,
- 5.8560725167361607,
- 1.0274791436474273])
+ t = random.standard_t
+ desired = np.array([0.60081050724244,
+ -0.90380889829210,
+ -0.64499590504117])
self.set_seed()
actual = t(df * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, t, bad_df * 3)
- assert_raises(ValueError, mt19937.standard_t, bad_df * 3)
+ assert_raises(ValueError, random.standard_t, bad_df * 3)
def test_vonmises(self):
mu = [2]
@@ -1394,44 +1471,44 @@ class TestBroadcast(object):
def test_pareto(self):
a = [1]
bad_a = [-1]
- pareto = legacy.pareto
- desired = np.array([1.1405622680198362,
- 1.1465519762044529,
- 1.0389564467453547])
+ pareto = random.pareto
+ desired = np.array([4.397371719158540,
+ 22.14707898642946,
+ 0.968306954322200])
self.set_seed()
actual = pareto(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, pareto, bad_a * 3)
- assert_raises(ValueError, mt19937.pareto, bad_a * 3)
+ assert_raises(ValueError, random.pareto, bad_a * 3)
def test_weibull(self):
a = [1]
bad_a = [-1]
- weibull = legacy.weibull
- desired = np.array([0.76106853658845242,
- 0.76386282278691653,
- 0.71243813125891797])
+ weibull = random.weibull
+ desired = np.array([1.68591211640990,
+ 3.14186859487914,
+ 0.67717375919228])
self.set_seed()
actual = weibull(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, weibull, bad_a * 3)
- assert_raises(ValueError, mt19937.weibull, bad_a * 3)
+ assert_raises(ValueError, random.weibull, bad_a * 3)
def test_power(self):
a = [1]
bad_a = [-1]
- power = legacy.power
- desired = np.array([0.53283302478975902,
- 0.53413660089041659,
- 0.50955303552646702])
+ power = random.power
+ desired = np.array([0.81472463783615,
+ 0.95679800459547,
+ 0.49194916077287])
self.set_seed()
actual = power(a * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, power, bad_a * 3)
- assert_raises(ValueError, mt19937.power, bad_a * 3)
+ assert_raises(ValueError, random.power, bad_a * 3)
def test_laplace(self):
loc = [0]
@@ -1489,28 +1566,28 @@ class TestBroadcast(object):
actual = logistic(loc, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, logistic, loc, bad_scale * 3)
- assert_equal(mt19937.logistic(1.0, 0.0), 1.0)
+ assert_equal(random.logistic(1.0, 0.0), 1.0)
def test_lognormal(self):
mean = [0]
sigma = [1]
bad_sigma = [-1]
- lognormal = legacy.lognormal
- desired = np.array([9.1422086044848427,
- 8.4013952870126261,
- 6.3073234116578671])
+ lognormal = random.lognormal
+ desired = np.array([1.57598396702930,
+ 0.53392932731280,
+ 0.94116889802361])
self.set_seed()
actual = lognormal(mean * 3, sigma)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
- assert_raises(ValueError, mt19937.lognormal, mean * 3, bad_sigma)
+ assert_raises(ValueError, random.lognormal, mean * 3, bad_sigma)
self.set_seed()
actual = lognormal(mean, sigma * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
- assert_raises(ValueError, mt19937.lognormal, mean, bad_sigma * 3)
+ assert_raises(ValueError, random.lognormal, mean, bad_sigma * 3)
def test_rayleigh(self):
scale = [1]
@@ -1530,26 +1607,26 @@ class TestBroadcast(object):
scale = [1]
bad_mean = [0]
bad_scale = [-2]
- wald = legacy.wald
- desired = np.array([0.11873681120271318,
- 0.12450084820795027,
- 0.9096122728408238])
+ wald = random.wald
+ desired = np.array([0.36297361471752,
+ 0.52190135028254,
+ 0.55111022040727])
self.set_seed()
actual = wald(mean * 3, scale)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean * 3, scale)
assert_raises(ValueError, wald, mean * 3, bad_scale)
- assert_raises(ValueError, mt19937.wald, bad_mean * 3, scale)
- assert_raises(ValueError, mt19937.wald, mean * 3, bad_scale)
+ assert_raises(ValueError, random.wald, bad_mean * 3, scale)
+ assert_raises(ValueError, random.wald, mean * 3, bad_scale)
self.set_seed()
actual = wald(mean, scale * 3)
assert_array_almost_equal(actual, desired, decimal=14)
assert_raises(ValueError, wald, bad_mean, scale * 3)
assert_raises(ValueError, wald, mean, bad_scale * 3)
- assert_raises(ValueError, mt19937.wald, bad_mean, scale * 3)
- assert_raises(ValueError, mt19937.wald, mean, bad_scale * 3)
+ assert_raises(ValueError, random.wald, bad_mean, scale * 3)
+ assert_raises(ValueError, random.wald, mean, bad_scale * 3)
def test_triangular(self):
left = [1]
@@ -1587,6 +1664,10 @@ class TestBroadcast(object):
assert_raises(ValueError, triangular, bad_left_two,
bad_mode_two, right * 3)
+ assert_raises(ValueError, triangular, 10., 0., 20.)
+ assert_raises(ValueError, triangular, 10., 25., 20.)
+ assert_raises(ValueError, triangular, 10., 10., 10.)
+
def test_binomial(self):
n = [1]
p = [0.5]
@@ -1616,8 +1697,8 @@ class TestBroadcast(object):
bad_n = [-1]
bad_p_one = [-1]
bad_p_two = [1.5]
- neg_binom = legacy.negative_binomial
- desired = np.array([1, 0, 1])
+ neg_binom = random.negative_binomial
+ desired = np.array([3, 1, 2], dtype=np.int64)
self.set_seed()
actual = neg_binom(n * 3, p)
@@ -1625,9 +1706,6 @@ class TestBroadcast(object):
assert_raises(ValueError, neg_binom, bad_n * 3, p)
assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
- assert_raises(ValueError, mt19937.negative_binomial, bad_n * 3, p)
- assert_raises(ValueError, mt19937.negative_binomial, n * 3, bad_p_one)
- assert_raises(ValueError, mt19937.negative_binomial, n * 3, bad_p_two)
self.set_seed()
actual = neg_binom(n, p * 3)
@@ -1635,9 +1713,6 @@ class TestBroadcast(object):
assert_raises(ValueError, neg_binom, bad_n, p * 3)
assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
- assert_raises(ValueError, mt19937.negative_binomial, bad_n, p * 3)
- assert_raises(ValueError, mt19937.negative_binomial, n, bad_p_one * 3)
- assert_raises(ValueError, mt19937.negative_binomial, n, bad_p_two * 3)
def test_poisson(self):
max_lam = random.poisson_lam_max
@@ -1713,6 +1788,11 @@ class TestBroadcast(object):
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+ assert_raises(ValueError, hypergeom, -1, 10, 20)
+ assert_raises(ValueError, hypergeom, 10, -1, 20)
+ assert_raises(ValueError, hypergeom, 10, 10, 0)
+ assert_raises(ValueError, hypergeom, 10, 10, 25)
+
def test_logseries(self):
p = [0.5]
bad_p_one = [2]
@@ -1783,14 +1863,14 @@ class TestSingleEltArrayInput(object):
self.tgtShape = (1,)
def test_one_arg_funcs(self):
- funcs = (mt19937.exponential, mt19937.standard_gamma,
- mt19937.chisquare, mt19937.standard_t,
- mt19937.pareto, mt19937.weibull,
- mt19937.power, mt19937.rayleigh,
- mt19937.poisson, mt19937.zipf,
- mt19937.geometric, mt19937.logseries)
+ funcs = (random.exponential, random.standard_gamma,
+ random.chisquare, random.standard_t,
+ random.pareto, random.weibull,
+ random.power, random.rayleigh,
+ random.poisson, random.zipf,
+ random.geometric, random.logseries)
- probfuncs = (mt19937.geometric, mt19937.logseries)
+ probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
@@ -1802,15 +1882,15 @@ class TestSingleEltArrayInput(object):
assert_equal(out.shape, self.tgtShape)
def test_two_arg_funcs(self):
- funcs = (mt19937.uniform, mt19937.normal,
- mt19937.beta, mt19937.gamma,
- mt19937.f, mt19937.noncentral_chisquare,
- mt19937.vonmises, mt19937.laplace,
- mt19937.gumbel, mt19937.logistic,
- mt19937.lognormal, mt19937.wald,
- mt19937.binomial, mt19937.negative_binomial)
+ funcs = (random.uniform, random.normal,
+ random.beta, random.gamma,
+ random.f, random.noncentral_chisquare,
+ random.vonmises, random.laplace,
+ random.gumbel, random.logistic,
+ random.lognormal, random.wald,
+ random.binomial, random.negative_binomial)
- probfuncs = (mt19937.binomial, mt19937.negative_binomial)
+ probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
@@ -1828,26 +1908,26 @@ class TestSingleEltArrayInput(object):
out = func(self.argOne, argTwo[0])
assert_equal(out.shape, self.tgtShape)
- def test_randint(self):
- itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
- np.int32, np.uint32, np.int64, np.uint64]
- func = mt19937.randint
- high = np.array([1])
- low = np.array([0])
-
- for dt in itype:
- out = func(low, high, dtype=dt)
- assert_equal(out.shape, self.tgtShape)
+ def test_randint(self):
+ itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+ func = random.randint
+ high = np.array([1])
+ low = np.array([0])
+
+ for dt in itype:
+ out = func(low, high, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
- out = func(low[0], high, dtype=dt)
- assert_equal(out.shape, self.tgtShape)
+ out = func(low[0], high, dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
- out = func(low, high[0], dtype=dt)
- assert_equal(out.shape, self.tgtShape)
+ out = func(low, high[0], dtype=dt)
+ assert_equal(out.shape, self.tgtShape)
def test_three_arg_funcs(self):
- funcs = [mt19937.noncentral_f, mt19937.triangular,
- mt19937.hypergeometric]
+ funcs = [random.noncentral_f, random.triangular,
+ random.hypergeometric]
for func in funcs:
out = func(self.argOne, self.argTwo, self.argThree)
diff --git a/_randomgen/randomgen/tests/test_numpy_mt19937_regressions.py b/numpy/random/randomgen/tests/test_generator_mt19937_regressions.py
index bc644e122..75064720f 100644
--- a/_randomgen/randomgen/tests/test_numpy_mt19937_regressions.py
+++ b/numpy/random/randomgen/tests/test_generator_mt19937_regressions.py
@@ -1,11 +1,9 @@
-from __future__ import division, absolute_import, print_function
-
import sys
from numpy.testing import (assert_, assert_array_equal)
from numpy.compat import long
import numpy as np
import pytest
-from randomgen import RandomGenerator, MT19937
+from ...randomgen import RandomGenerator, MT19937
mt19937 = RandomGenerator(MT19937())
diff --git a/numpy/random/randomgen/tests/test_randomstate.py b/numpy/random/randomgen/tests/test_randomstate.py
new file mode 100644
index 000000000..167d1b0aa
--- /dev/null
+++ b/numpy/random/randomgen/tests/test_randomstate.py
@@ -0,0 +1,1808 @@
+import pickle
+import sys
+import warnings
+
+import numpy as np
+from numpy.testing import (
+ assert_, assert_raises, assert_equal, assert_warns,
+ assert_no_warnings, assert_array_equal, assert_array_almost_equal,
+ suppress_warnings
+ )
+
+from ...randomgen import MT19937, Xoshiro256StarStar, mtrand as random
+
+
+def assert_mt19937_state_equal(a, b):
+ assert_equal(a['brng'], b['brng'])
+ assert_array_equal(a['state']['key'], b['state']['key'])
+ assert_array_equal(a['state']['pos'], b['state']['pos'])
+ assert_equal(a['has_gauss'], b['has_gauss'])
+ assert_equal(a['gauss'], b['gauss'])
+
+
+class TestSeed(object):
+ def test_scalar(self):
+ s = random.RandomState(0)
+ assert_equal(s.randint(1000), 684)
+ s = random.RandomState(4294967295)
+ assert_equal(s.randint(1000), 419)
+
+ def test_array(self):
+ s = random.RandomState(range(10))
+ assert_equal(s.randint(1000), 468)
+ s = random.RandomState(np.arange(10))
+ assert_equal(s.randint(1000), 468)
+ s = random.RandomState([0])
+ assert_equal(s.randint(1000), 973)
+ s = random.RandomState([4294967295])
+ assert_equal(s.randint(1000), 265)
+
+ def test_invalid_scalar(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, random.RandomState, -0.5)
+ assert_raises(ValueError, random.RandomState, -1)
+
+ def test_invalid_array(self):
+ # seed must be an unsigned 32 bit integer
+ assert_raises(TypeError, random.RandomState, [-0.5])
+ assert_raises(ValueError, random.RandomState, [-1])
+ assert_raises(ValueError, random.RandomState, [4294967296])
+ assert_raises(ValueError, random.RandomState, [1, 2, 4294967296])
+ assert_raises(ValueError, random.RandomState, [1, -2, 4294967296])
+
+ def test_invalid_array_shape(self):
+ # gh-9832
+ assert_raises(ValueError, random.RandomState, np.array([],
+ dtype=np.int64))
+ assert_raises(ValueError, random.RandomState, [[1, 2, 3]])
+ assert_raises(ValueError, random.RandomState, [[1, 2, 3],
+ [4, 5, 6]])
+
+ def test_seed_equivalency(self):
+ rs = random.RandomState(0)
+ rs2 = random.RandomState(MT19937(0))
+ assert_mt19937_state_equal(rs.get_state(legacy=False),
+ rs2.get_state(legacy=False))
+
+ def test_invalid_initialization(self):
+ assert_raises(ValueError, random.RandomState, MT19937)
+
+
+class TestBinomial(object):
+ def test_n_zero(self):
+ # Tests the corner case of n == 0 for the binomial distribution.
+ # binomial(0, p) should be zero for any p in [0, 1].
+ # This test addresses issue #3480.
+ zeros = np.zeros(2, dtype='int')
+ for p in [0, .5, 1]:
+ assert_(random.binomial(0, p) == 0)
+ assert_array_equal(random.binomial(zeros, p), zeros)
+
+ def test_p_is_nan(self):
+ # Issue #4571.
+ assert_raises(ValueError, random.binomial, 1, np.nan)
+
+
+class TestMultinomial(object):
+ def test_basic(self):
+ random.multinomial(100, [0.2, 0.8])
+
+ def test_zero_probability(self):
+ random.multinomial(100, [0.2, 0.8, 0.0, 0.0, 0.0])
+
+ def test_int_negative_interval(self):
+ assert_(-5 <= random.randint(-5, -1) < -1)
+ x = random.randint(-5, -1, 5)
+ assert_(np.all(-5 <= x))
+ assert_(np.all(x < -1))
+
+ def test_size(self):
+ # gh-3173
+ p = [0.5, 0.5]
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.multinomial(1, p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.multinomial(1, p, np.array((2, 2))).shape,
+ (2, 2, 2))
+
+ assert_raises(TypeError, random.multinomial, 1, p,
+ float(1))
+ assert_raises(ValueError, random.multinomial, 1, [1.1, .1])
+
+
+class TestSetState(object):
+ def setup(self):
+ self.seed = 1234567890
+ self.random_state = random.RandomState(self.seed)
+ self.state = self.random_state.get_state()
+
+ def test_basic(self):
+ old = self.random_state.tomaxint(16)
+ self.random_state.set_state(self.state)
+ new = self.random_state.tomaxint(16)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset(self):
+ # Make sure the cached every-other-Gaussian is reset.
+ old = self.random_state.standard_normal(size=3)
+ self.random_state.set_state(self.state)
+ new = self.random_state.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_gaussian_reset_in_media_res(self):
+ # When the state is saved with a cached Gaussian, make sure the
+ # cached Gaussian is restored.
+
+ self.random_state.standard_normal()
+ state = self.random_state.get_state()
+ old = self.random_state.standard_normal(size=3)
+ self.random_state.set_state(state)
+ new = self.random_state.standard_normal(size=3)
+ assert_(np.all(old == new))
+
+ def test_backwards_compatibility(self):
+ # Make sure we can accept old state tuples that do not have the
+ # cached Gaussian value.
+ old_state = self.state[:-2]
+ x1 = self.random_state.standard_normal(size=16)
+ self.random_state.set_state(old_state)
+ x2 = self.random_state.standard_normal(size=16)
+ self.random_state.set_state(self.state)
+ x3 = self.random_state.standard_normal(size=16)
+ assert_(np.all(x1 == x2))
+ assert_(np.all(x1 == x3))
+
+ def test_negative_binomial(self):
+ # Ensure that the negative binomial results take floating point
+ # arguments without truncation.
+ self.random_state.negative_binomial(0.5, 0.5)
+
+ def test_get_state_warning(self):
+ rs = random.RandomState(Xoshiro256StarStar())
+ with suppress_warnings() as sup:
+ w = sup.record(RuntimeWarning)
+ state = rs.get_state()
+ assert_(len(w) == 1)
+ assert isinstance(state, dict)
+ assert state['brng'] == 'Xoshiro256StarStar'
+
+ def test_invalid_legacy_state_setting(self):
+ state = self.random_state.get_state()
+ new_state = ('Unknown', ) + state[1:]
+ assert_raises(ValueError, self.random_state.set_state, new_state)
+ assert_raises(TypeError, self.random_state.set_state,
+ np.array(new_state, dtype=np.object))
+ state = self.random_state.get_state(legacy=False)
+ del state['brng']
+ assert_raises(ValueError, self.random_state.set_state, state)
+
+ def test_pickle(self):
+ self.random_state.seed(0)
+ self.random_state.random_sample(100)
+ self.random_state.standard_normal()
+ pickled = self.random_state.get_state(legacy=False)
+ assert_equal(pickled['has_gauss'], 1)
+ rs_unpick = pickle.loads(pickle.dumps(self.random_state))
+ unpickled = rs_unpick.get_state(legacy=False)
+ assert_mt19937_state_equal(pickled, unpickled)
+
+ def test_state_setting(self):
+ attr_state = self.random_state.__getstate__()
+ self.random_state.standard_normal()
+ self.random_state.__setstate__(attr_state)
+ state = self.random_state.get_state(legacy=False)
+ assert_mt19937_state_equal(attr_state, state)
+
+ def test_repr(self):
+ assert repr(self.random_state).startswith('RandomState(MT19937)')
+
+
+class TestRandint(object):
+
+ rfunc = random.randint
+
+ # valid integer/boolean types
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
+ np.int32, np.uint32, np.int64, np.uint64]
+
+ def test_unsupported_type(self):
+ assert_raises(TypeError, self.rfunc, 1, dtype=float)
+
+ def test_bounds_checking(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+ assert_raises(ValueError, self.rfunc, lbnd - 1, ubnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, lbnd, ubnd + 1, dtype=dt)
+ assert_raises(ValueError, self.rfunc, ubnd, lbnd, dtype=dt)
+ assert_raises(ValueError, self.rfunc, 1, 0, dtype=dt)
+
+ def test_rng_zero_and_extremes(self):
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ tgt = ubnd - 1
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = lbnd
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ tgt = (lbnd + ubnd)//2
+ assert_equal(self.rfunc(tgt, tgt + 1, size=1000, dtype=dt), tgt)
+
+ def test_full_range(self):
+ # Test for ticket #1690
+
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ try:
+ self.rfunc(lbnd, ubnd, dtype=dt)
+ except Exception as e:
+ raise AssertionError("No error should have been raised, "
+ "but one was with the following "
+ "message:\n\n%s" % str(e))
+
+ def test_in_bounds_fuzz(self):
+ # Don't use fixed seed
+ random.seed()
+
+ for dt in self.itype[1:]:
+ for ubnd in [4, 8, 16]:
+ vals = self.rfunc(2, ubnd, size=2**16, dtype=dt)
+ assert_(vals.max() < ubnd)
+ assert_(vals.min() >= 2)
+
+ vals = self.rfunc(0, 2, size=2**16, dtype=np.bool_)
+
+ assert_(vals.max() < 2)
+ assert_(vals.min() >= 0)
+
+ def test_repeatability(self):
+ import hashlib
+ # We use a md5 hash of generated sequences of 1000 samples
+ # in the range [0, 6) for all but bool, where the range
+ # is [0, 2). Hashes are for little endian numbers.
+ tgt = {'bool': '7dd3170d7aa461d201a65f8bcf3944b0',
+ 'int16': '1b7741b80964bb190c50d541dca1cac1',
+ 'int32': '4dc9fcc2b395577ebb51793e58ed1a05',
+ 'int64': '17db902806f448331b5a758d7d2ee672',
+ 'int8': '27dd30c4e08a797063dffac2490b0be6',
+ 'uint16': '1b7741b80964bb190c50d541dca1cac1',
+ 'uint32': '4dc9fcc2b395577ebb51793e58ed1a05',
+ 'uint64': '17db902806f448331b5a758d7d2ee672',
+ 'uint8': '27dd30c4e08a797063dffac2490b0be6'}
+
+ for dt in self.itype[1:]:
+ random.seed(1234)
+
+ # view as little endian for hash
+ if sys.byteorder == 'little':
+ val = self.rfunc(0, 6, size=1000, dtype=dt)
+ else:
+ val = self.rfunc(0, 6, size=1000, dtype=dt).byteswap()
+
+ res = hashlib.md5(val.view(np.int8)).hexdigest()
+ assert_(tgt[np.dtype(dt).name] == res)
+
+ # bools do not depend on endianness
+ random.seed(1234)
+ val = self.rfunc(0, 2, size=1000, dtype=bool).view(np.int8)
+ res = hashlib.md5(val).hexdigest()
+ assert_(tgt[np.dtype(bool).name] == res)
+
+ def test_int64_uint64_corner_case(self):
+ # When stored in Numpy arrays, `lbnd` is casted
+ # as np.int64, and `ubnd` is casted as np.uint64.
+ # Checking whether `lbnd` >= `ubnd` used to be
+ # done solely via direct comparison, which is incorrect
+ # because when Numpy tries to compare both numbers,
+ # it casts both to np.float64 because there is
+ # no integer superset of np.int64 and np.uint64. However,
+ # `ubnd` is too large to be represented in np.float64,
+ # causing it be round down to np.iinfo(np.int64).max,
+ # leading to a ValueError because `lbnd` now equals
+ # the new `ubnd`.
+
+ dt = np.int64
+ tgt = np.iinfo(np.int64).max
+ lbnd = np.int64(np.iinfo(np.int64).max)
+ ubnd = np.uint64(np.iinfo(np.int64).max + 1)
+
+ # None of these function calls should
+ # generate a ValueError now.
+ actual = random.randint(lbnd, ubnd, dtype=dt)
+ assert_equal(actual, tgt)
+
+ def test_respect_dtype_singleton(self):
+ # See gh-7203
+ for dt in self.itype:
+ lbnd = 0 if dt is np.bool_ else np.iinfo(dt).min
+ ubnd = 2 if dt is np.bool_ else np.iinfo(dt).max + 1
+
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_equal(sample.dtype, np.dtype(dt))
+
+ for dt in (bool, int, np.long):
+ lbnd = 0 if dt is bool else np.iinfo(dt).min
+ ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
+
+ # gh-7284: Ensure that we get Python data types
+ sample = self.rfunc(lbnd, ubnd, dtype=dt)
+ assert_(not hasattr(sample, 'dtype'))
+ assert_equal(type(sample), dt)
+
+
+class TestRandomDist(object):
+ # Make sure the random distribution returns the correct value for a
+ # given seed
+
+ def setup(self):
+ self.seed = 1234567890
+
+ def test_rand(self):
+ random.seed(self.seed)
+ actual = random.rand(3, 2)
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn(self):
+ random.seed(self.seed)
+ actual = random.randn(3, 2)
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randint(self):
+ random.seed(self.seed)
+ actual = random.randint(-99, 99, size=(3, 2))
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ def test_random_integers(self):
+ random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(-99, 99, size=(3, 2))
+ assert_(len(w) == 1)
+ desired = np.array([[31, 3],
+ [-52, 41],
+ [-48, -66]])
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(198, size=(3, 2))
+ assert_(len(w) == 1)
+ assert_array_equal(actual, desired + 100)
+
+ def test_tomaxint(self):
+ random.seed(self.seed)
+ rs = random.RandomState(self.seed)
+ actual = rs.tomaxint(size=(3, 2))
+ if np.iinfo(np.int).max == 2147483647:
+ desired = np.array([[1328851649, 731237375],
+ [1270502067, 320041495],
+ [1908433478, 499156889]], dtype=np.int64)
+ else:
+ desired = np.array([[5707374374421908479, 5456764827585442327],
+ [8196659375100692377, 8224063923314595285],
+ [4220315081820346526, 7177518203184491332]],
+ dtype=np.int64)
+
+ assert_equal(actual, desired)
+
+ rs.seed(self.seed)
+ actual = rs.tomaxint()
+ assert_equal(actual, desired[0, 0])
+
+ def test_random_integers_max_int(self):
+ # Tests whether random_integers can generate the
+ # maximum allowed Python int that can be converted
+ # into a C long. Previous implementations of this
+ # method have thrown an OverflowError when attempting
+ # to generate this integer.
+ with suppress_warnings() as sup:
+ w = sup.record(DeprecationWarning)
+ actual = random.random_integers(np.iinfo('l').max,
+ np.iinfo('l').max)
+ assert_(len(w) == 1)
+
+ desired = np.iinfo('l').max
+ assert_equal(actual, desired)
+
+ def test_random_integers_deprecated(self):
+ with warnings.catch_warnings():
+ warnings.simplefilter("error", DeprecationWarning)
+
+ # DeprecationWarning raised with high == None
+ assert_raises(DeprecationWarning,
+ random.random_integers,
+ np.iinfo('l').max)
+
+ # DeprecationWarning raised with high != None
+ assert_raises(DeprecationWarning,
+ random.random_integers,
+ np.iinfo('l').max, np.iinfo('l').max)
+
+ def test_random_sample(self):
+ random.seed(self.seed)
+ actual = random.random_sample((3, 2))
+ desired = np.array([[0.61879477158567997, 0.59162362775974664],
+ [0.88868358904449662, 0.89165480011560816],
+ [0.4575674820298663, 0.7781880808593471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rand_singleton(self):
+ random.seed(self.seed)
+ actual = random.rand()
+ desired = np.array(0.61879477158567997)
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_choice_uniform_replace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 4)
+ desired = np.array([2, 3, 2, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_replace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 4, p=[0.4, 0.4, 0.1, 0.1])
+ desired = np.array([1, 1, 2, 2])
+ assert_array_equal(actual, desired)
+
+ def test_choice_uniform_noreplace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False)
+ desired = np.array([0, 1, 3])
+ assert_array_equal(actual, desired)
+
+ def test_choice_nonuniform_noreplace(self):
+ random.seed(self.seed)
+ actual = random.choice(4, 3, replace=False, p=[0.1, 0.3, 0.5, 0.1])
+ desired = np.array([2, 3, 1])
+ assert_array_equal(actual, desired)
+
+ def test_choice_noninteger(self):
+ random.seed(self.seed)
+ actual = random.choice(['a', 'b', 'c', 'd'], 4)
+ desired = np.array(['c', 'd', 'c', 'd'])
+ assert_array_equal(actual, desired)
+
+ def test_choice_exceptions(self):
+ sample = random.choice
+ assert_raises(ValueError, sample, -1, 3)
+ assert_raises(ValueError, sample, 3., 3)
+ assert_raises(ValueError, sample, [[1, 2], [3, 4]], 3)
+ assert_raises(ValueError, sample, [], 3)
+ assert_raises(ValueError, sample, [1, 2, 3, 4], 3,
+ p=[[0.25, 0.25], [0.25, 0.25]])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4, 0.2])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[1.1, -0.1])
+ assert_raises(ValueError, sample, [1, 2], 3, p=[0.4, 0.4])
+ assert_raises(ValueError, sample, [1, 2, 3], 4, replace=False)
+ # gh-13087
+ assert_raises(ValueError, sample, [1, 2, 3], -2, replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1,), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], (-1, 1), replace=False)
+ assert_raises(ValueError, sample, [1, 2, 3], 2,
+ replace=False, p=[1, 0, 0])
+
+ def test_choice_return_shape(self):
+ p = [0.1, 0.9]
+ # Check scalar
+ assert_(np.isscalar(random.choice(2, replace=True)))
+ assert_(np.isscalar(random.choice(2, replace=False)))
+ assert_(np.isscalar(random.choice(2, replace=True, p=p)))
+ assert_(np.isscalar(random.choice(2, replace=False, p=p)))
+ assert_(np.isscalar(random.choice([1, 2], replace=True)))
+ assert_(random.choice([None], replace=True) is None)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, replace=True) is a)
+
+ # Check 0-d array
+ s = tuple()
+ assert_(not np.isscalar(random.choice(2, s, replace=True)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False)))
+ assert_(not np.isscalar(random.choice(2, s, replace=True, p=p)))
+ assert_(not np.isscalar(random.choice(2, s, replace=False, p=p)))
+ assert_(not np.isscalar(random.choice([1, 2], s, replace=True)))
+ assert_(random.choice([None], s, replace=True).ndim == 0)
+ a = np.array([1, 2])
+ arr = np.empty(1, dtype=object)
+ arr[0] = a
+ assert_(random.choice(arr, s, replace=True).item() is a)
+
+ # Check multi dimensional array
+ s = (2, 3)
+ p = [0.1, 0.1, 0.1, 0.1, 0.4, 0.2]
+ assert_equal(random.choice(6, s, replace=True).shape, s)
+ assert_equal(random.choice(6, s, replace=False).shape, s)
+ assert_equal(random.choice(6, s, replace=True, p=p).shape, s)
+ assert_equal(random.choice(6, s, replace=False, p=p).shape, s)
+ assert_equal(random.choice(np.arange(6), s, replace=True).shape, s)
+
+ # Check zero-size
+ assert_equal(random.randint(0, 0, size=(3, 0, 4)).shape, (3, 0, 4))
+ assert_equal(random.randint(0, -10, size=0).shape, (0,))
+ assert_equal(random.randint(10, 10, size=0).shape, (0,))
+ assert_equal(random.choice(0, size=0).shape, (0,))
+ assert_equal(random.choice([], size=(0,)).shape, (0,))
+ assert_equal(random.choice(['a', 'b'], size=(3, 0, 4)).shape,
+ (3, 0, 4))
+ assert_raises(ValueError, random.choice, [], 10)
+
+ def test_choice_nan_probabilities(self):
+ a = np.array([42, 1, 2])
+ p = [None, None, None]
+ assert_raises(ValueError, random.choice, a, p=p)
+
+ def test_bytes(self):
+ random.seed(self.seed)
+ actual = random.bytes(10)
+ desired = b'\x82Ui\x9e\xff\x97+Wf\xa5'
+ assert_equal(actual, desired)
+
+ def test_shuffle(self):
+ # Test lists, arrays (of various dtypes), and multidimensional versions
+ # of both, c-contiguous or not:
+ for conv in [lambda x: np.array([]),
+ lambda x: x,
+ lambda x: np.asarray(x).astype(np.int8),
+ lambda x: np.asarray(x).astype(np.float32),
+ lambda x: np.asarray(x).astype(np.complex64),
+ lambda x: np.asarray(x).astype(object),
+ lambda x: [(i, i) for i in x],
+ lambda x: np.asarray([[i, i] for i in x]),
+ lambda x: np.vstack([x, x]).T,
+ # gh-11442
+ lambda x: (np.asarray([(i, i) for i in x],
+ [("a", int), ("b", int)])
+ .view(np.recarray)),
+ # gh-4270
+ lambda x: np.asarray([(i, i) for i in x],
+ [("a", object, 1),
+ ("b", np.int32, 1)])]:
+ random.seed(self.seed)
+ alist = conv([1, 2, 3, 4, 5, 6, 7, 8, 9, 0])
+ random.shuffle(alist)
+ actual = alist
+ desired = conv([0, 1, 9, 6, 2, 4, 5, 8, 7, 3])
+ assert_array_equal(actual, desired)
+
+ def test_permutation(self):
+ random.seed(self.seed)
+ alist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]
+ actual = random.permutation(alist)
+ desired = [0, 1, 9, 6, 2, 4, 5, 8, 7, 3]
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ arr_2d = np.atleast_2d([1, 2, 3, 4, 5, 6, 7, 8, 9, 0]).T
+ actual = random.permutation(arr_2d)
+ assert_array_equal(actual, np.atleast_2d(desired).T)
+
+ def test_shuffle_masked(self):
+ # gh-3263
+ a = np.ma.masked_values(np.reshape(range(20), (5, 4)) % 3 - 1, -1)
+ b = np.ma.masked_values(np.arange(20) % 3 - 1, -1)
+ a_orig = a.copy()
+ b_orig = b.copy()
+ for i in range(50):
+ random.shuffle(a)
+ assert_equal(
+ sorted(a.data[~a.mask]), sorted(a_orig.data[~a_orig.mask]))
+ random.shuffle(b)
+ assert_equal(
+ sorted(b.data[~b.mask]), sorted(b_orig.data[~b_orig.mask]))
+
+ def test_beta(self):
+ random.seed(self.seed)
+ actual = random.beta(.1, .9, size=(3, 2))
+ desired = np.array(
+ [[1.45341850513746058e-02, 5.31297615662868145e-04],
+ [1.85366619058432324e-06, 4.19214516800110563e-03],
+ [1.58405155108498093e-04, 1.26252891949397652e-04]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_binomial(self):
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456, size=(3, 2))
+ desired = np.array([[37, 43],
+ [42, 48],
+ [46, 45]])
+ assert_array_equal(actual, desired)
+
+ random.seed(self.seed)
+ actual = random.binomial(100.123, .456)
+ desired = 37
+ assert_array_equal(actual, desired)
+
+ def test_chisquare(self):
+ random.seed(self.seed)
+ actual = random.chisquare(50, size=(3, 2))
+ desired = np.array([[63.87858175501090585, 68.68407748911370447],
+ [65.77116116901505904, 47.09686762438974483],
+ [72.3828403199695174, 74.18408615260374006]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_dirichlet(self):
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha, size=(3, 2))
+ desired = np.array([[[0.54539444573611562, 0.45460555426388438],
+ [0.62345816822039413, 0.37654183177960598]],
+ [[0.55206000085785778, 0.44793999914214233],
+ [0.58964023305154301, 0.41035976694845688]],
+ [[0.59266909280647828, 0.40733090719352177],
+ [0.56974431743975207, 0.43025568256024799]]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ random.seed(self.seed)
+ alpha = np.array([51.72840233779265162, 39.74494232180943953])
+ actual = random.dirichlet(alpha)
+ assert_array_almost_equal(actual, desired[0, 0], decimal=15)
+
+ def test_dirichlet_size(self):
+ # gh-3173
+ p = np.array([51.72840233779265162, 39.74494232180943953])
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, np.uint32(1)).shape, (1, 2))
+ assert_equal(random.dirichlet(p, [2, 2]).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, (2, 2)).shape, (2, 2, 2))
+ assert_equal(random.dirichlet(p, np.array((2, 2))).shape, (2, 2, 2))
+
+ assert_raises(TypeError, random.dirichlet, p, float(1))
+
+ def test_dirichlet_bad_alpha(self):
+ # gh-2089
+ alpha = np.array([5.4e-01, -1.0e-16])
+ assert_raises(ValueError, random.dirichlet, alpha)
+
+ def test_exponential(self):
+ random.seed(self.seed)
+ actual = random.exponential(1.1234, size=(3, 2))
+ desired = np.array([[1.08342649775011624, 1.00607889924557314],
+ [2.46628830085216721, 2.49668106809923884],
+ [0.68717433461363442, 1.69175666993575979]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_exponential_0(self):
+ assert_equal(random.exponential(scale=0), 0)
+ assert_raises(ValueError, random.exponential, scale=-0.)
+
+ def test_f(self):
+ random.seed(self.seed)
+ actual = random.f(12, 77, size=(3, 2))
+ desired = np.array([[1.21975394418575878, 1.75135759791559775],
+ [1.44803115017146489, 1.22108959480396262],
+ [1.02176975757740629, 1.34431827623300415]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gamma(self):
+ random.seed(self.seed)
+ actual = random.gamma(5, 3, size=(3, 2))
+ desired = np.array([[24.60509188649287182, 28.54993563207210627],
+ [26.13476110204064184, 12.56988482927716078],
+ [31.71863275789960568, 33.30143302795922011]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_gamma_0(self):
+ assert_equal(random.gamma(shape=0, scale=0), 0)
+ assert_raises(ValueError, random.gamma, shape=-0., scale=-0.)
+
+ def test_geometric(self):
+ random.seed(self.seed)
+ actual = random.geometric(.123456789, size=(3, 2))
+ desired = np.array([[8, 7],
+ [17, 17],
+ [5, 12]])
+ assert_array_equal(actual, desired)
+
+ def test_gumbel(self):
+ random.seed(self.seed)
+ actual = random.gumbel(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.19591898743416816, 0.34405539668096674],
+ [-1.4492522252274278, -1.47374816298446865],
+ [1.10651090478803416, -0.69535848626236174]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_gumbel_0(self):
+ assert_equal(random.gumbel(scale=0), 0)
+ assert_raises(ValueError, random.gumbel, scale=-0.)
+
+ def test_hypergeometric(self):
+ random.seed(self.seed)
+ actual = random.hypergeometric(10.1, 5.5, 14, size=(3, 2))
+ desired = np.array([[10, 10],
+ [10, 10],
+ [9, 9]])
+ assert_array_equal(actual, desired)
+
+ # Test nbad = 0
+ actual = random.hypergeometric(5, 0, 3, size=4)
+ desired = np.array([3, 3, 3, 3])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(15, 0, 12, size=4)
+ desired = np.array([12, 12, 12, 12])
+ assert_array_equal(actual, desired)
+
+ # Test ngood = 0
+ actual = random.hypergeometric(0, 5, 3, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ actual = random.hypergeometric(0, 15, 12, size=4)
+ desired = np.array([0, 0, 0, 0])
+ assert_array_equal(actual, desired)
+
+ def test_laplace(self):
+ random.seed(self.seed)
+ actual = random.laplace(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[0.66599721112760157, 0.52829452552221945],
+ [3.12791959514407125, 3.18202813572992005],
+ [-0.05391065675859356, 1.74901336242837324]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_laplace_0(self):
+ assert_equal(random.laplace(scale=0), 0)
+ assert_raises(ValueError, random.laplace, scale=-0.)
+
+ def test_logistic(self):
+ random.seed(self.seed)
+ actual = random.logistic(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[1.09232835305011444, 0.8648196662399954],
+ [4.27818590694950185, 4.33897006346929714],
+ [-0.21682183359214885, 2.63373365386060332]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_lognormal(self):
+ random.seed(self.seed)
+ actual = random.lognormal(mean=.123456789, sigma=2.0, size=(3, 2))
+ desired = np.array([[16.50698631688883822, 36.54846706092654784],
+ [22.67886599981281748, 0.71617561058995771],
+ [65.72798501792723869, 86.84341601437161273]])
+ assert_array_almost_equal(actual, desired, decimal=13)
+
+ def test_lognormal_0(self):
+ assert_equal(random.lognormal(sigma=0), 1)
+ assert_raises(ValueError, random.lognormal, sigma=-0.)
+
+ def test_logseries(self):
+ random.seed(self.seed)
+ actual = random.logseries(p=.923456789, size=(3, 2))
+ desired = np.array([[2, 2],
+ [6, 17],
+ [3, 6]])
+ assert_array_equal(actual, desired)
+
+ def test_multinomial(self):
+ random.seed(self.seed)
+ actual = random.multinomial(20, [1/6.]*6, size=(3, 2))
+ desired = np.array([[[4, 3, 5, 4, 2, 2],
+ [5, 2, 8, 2, 2, 1]],
+ [[3, 4, 3, 6, 0, 4],
+ [2, 1, 4, 3, 6, 4]],
+ [[4, 4, 2, 5, 2, 3],
+ [4, 3, 4, 2, 3, 4]]])
+ assert_array_equal(actual, desired)
+
+ def test_multivariate_normal(self):
+ random.seed(self.seed)
+ mean = (.123456789, 10)
+ cov = [[1, 0], [0, 1]]
+ size = (3, 2)
+ actual = random.multivariate_normal(mean, cov, size)
+ desired = np.array([[[1.463620246718631, 11.73759122771936],
+ [1.622445133300628, 9.771356667546383]],
+ [[2.154490787682787, 12.170324946056553],
+ [1.719909438201865, 9.230548443648306]],
+ [[0.689515026297799, 9.880729819607714],
+ [-0.023054015651998, 9.201096623542879]]])
+
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check for default size, was raising deprecation warning
+ actual = random.multivariate_normal(mean, cov)
+ desired = np.array([0.895289569463708, 9.17180864067987])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ # Check that non positive-semidefinite covariance warns with
+ # RuntimeWarning
+ mean = [0, 0]
+ cov = [[1, 2], [2, 1]]
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+
+ # and that it doesn't warn with RuntimeWarning check_valid='ignore'
+ assert_no_warnings(random.multivariate_normal, mean, cov,
+ check_valid='ignore')
+
+ # and that it raises with RuntimeWarning check_valid='raises'
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
+
+ cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
+ with suppress_warnings() as sup:
+ random.multivariate_normal(mean, cov)
+ w = sup.record(RuntimeWarning)
+ assert len(w) == 0
+
+ mu = np.zeros(2)
+ cov = np.eye(2)
+ assert_raises(ValueError, random.multivariate_normal, mean,
+ cov, check_valid='other')
+ assert_raises(ValueError, random.multivariate_normal,
+ np.zeros((2, 1, 1)), cov)
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.empty((3, 2)))
+ assert_raises(ValueError, random.multivariate_normal,
+ mu, np.eye(3))
+
+ def test_negative_binomial(self):
+ random.seed(self.seed)
+ actual = random.negative_binomial(n=100, p=.12345, size=(3, 2))
+ desired = np.array([[848, 841],
+ [892, 611],
+ [779, 647]])
+ assert_array_equal(actual, desired)
+
+ def test_noncentral_chisquare(self):
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=5, size=(3, 2))
+ desired = np.array([[23.91905354498517511, 13.35324692733826346],
+ [31.22452661329736401, 16.60047399466177254],
+ [5.03461598262724586, 17.94973089023519464]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ actual = random.noncentral_chisquare(df=.5, nonc=.2, size=(3, 2))
+ desired = np.array([[1.47145377828516666, 0.15052899268012659],
+ [0.00943803056963588, 1.02647251615666169],
+ [0.332334982684171, 0.15451287602753125]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ random.seed(self.seed)
+ actual = random.noncentral_chisquare(df=5, nonc=0, size=(3, 2))
+ desired = np.array([[9.597154162763948, 11.725484450296079],
+ [10.413711048138335, 3.694475922923986],
+ [13.484222138963087, 14.377255424602957]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_noncentral_f(self):
+ random.seed(self.seed)
+ actual = random.noncentral_f(dfnum=5, dfden=2, nonc=1,
+ size=(3, 2))
+ desired = np.array([[1.40598099674926669, 0.34207973179285761],
+ [3.57715069265772545, 7.92632662577829805],
+ [0.43741599463544162, 1.1774208752428319]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ random.seed(self.seed)
+ actual = random.normal(loc=.123456789, scale=2.0, size=(3, 2))
+ desired = np.array([[2.80378370443726244, 3.59863924443872163],
+ [3.121433477601256, -0.33382987590723379],
+ [4.18552478636557357, 4.46410668111310471]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_normal_0(self):
+ assert_equal(random.normal(scale=0), 0)
+ assert_raises(ValueError, random.normal, scale=-0.)
+
+ def test_pareto(self):
+ random.seed(self.seed)
+ actual = random.pareto(a=.123456789, size=(3, 2))
+ desired = np.array(
+ [[2.46852460439034849e+03, 1.41286880810518346e+03],
+ [5.28287797029485181e+07, 6.57720981047328785e+07],
+ [1.40840323350391515e+02, 1.98390255135251704e+05]])
+ # For some reason on 32-bit x86 Ubuntu 12.10 the [1, 0] entry in this
+ # matrix differs by 24 nulps. Discussion:
+ # https://mail.python.org/pipermail/numpy-discussion/2012-September/063801.html
+ # Consensus is that this is probably some gcc quirk that affects
+ # rounding but not in any important way, so we just use a looser
+ # tolerance on this test:
+ np.testing.assert_array_almost_equal_nulp(actual, desired, nulp=30)
+
+ def test_poisson(self):
+ random.seed(self.seed)
+ actual = random.poisson(lam=.123456789, size=(3, 2))
+ desired = np.array([[0, 0],
+ [1, 0],
+ [0, 0]])
+ assert_array_equal(actual, desired)
+
+ def test_poisson_exceptions(self):
+ lambig = np.iinfo('l').max
+ lamneg = -1
+ assert_raises(ValueError, random.poisson, lamneg)
+ assert_raises(ValueError, random.poisson, [lamneg]*10)
+ assert_raises(ValueError, random.poisson, lambig)
+ assert_raises(ValueError, random.poisson, [lambig]*10)
+
+ def test_power(self):
+ random.seed(self.seed)
+ actual = random.power(a=.123456789, size=(3, 2))
+ desired = np.array([[0.02048932883240791, 0.01424192241128213],
+ [0.38446073748535298, 0.39499689943484395],
+ [0.00177699707563439, 0.13115505880863756]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_rayleigh(self):
+ random.seed(self.seed)
+ actual = random.rayleigh(scale=10, size=(3, 2))
+ desired = np.array([[13.8882496494248393, 13.383318339044731],
+ [20.95413364294492098, 21.08285015800712614],
+ [11.06066537006854311, 17.35468505778271009]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_rayleigh_0(self):
+ assert_equal(random.rayleigh(scale=0), 0)
+ assert_raises(ValueError, random.rayleigh, scale=-0.)
+
+ def test_standard_cauchy(self):
+ random.seed(self.seed)
+ actual = random.standard_cauchy(size=(3, 2))
+ desired = np.array([[0.77127660196445336, -6.55601161955910605],
+ [0.93582023391158309, -2.07479293013759447],
+ [-4.74601644297011926, 0.18338989290760804]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_exponential(self):
+ random.seed(self.seed)
+ actual = random.standard_exponential(size=(3, 2))
+ desired = np.array([[0.96441739162374596, 0.89556604882105506],
+ [2.1953785836319808, 2.22243285392490542],
+ [0.6116915921431676, 1.50592546727413201]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_gamma(self):
+ random.seed(self.seed)
+ actual = random.standard_gamma(shape=3, size=(3, 2))
+ desired = np.array([[5.50841531318455058, 6.62953470301903103],
+ [5.93988484943779227, 2.31044849402133989],
+ [7.54838614231317084, 8.012756093271868]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_standard_gamma_0(self):
+ assert_equal(random.standard_gamma(shape=0), 0)
+ assert_raises(ValueError, random.standard_gamma, shape=-0.)
+
+ def test_standard_normal(self):
+ random.seed(self.seed)
+ actual = random.standard_normal(size=(3, 2))
+ desired = np.array([[1.34016345771863121, 1.73759122771936081],
+ [1.498988344300628, -0.2286433324536169],
+ [2.031033998682787, 2.17032494605655257]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_randn_singleton(self):
+ random.seed(self.seed)
+ actual = random.randn()
+ desired = np.array(1.34016345771863121)
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_standard_t(self):
+ random.seed(self.seed)
+ actual = random.standard_t(df=10, size=(3, 2))
+ desired = np.array([[0.97140611862659965, -0.08830486548450577],
+ [1.36311143689505321, -0.55317463909867071],
+ [-0.18473749069684214, 0.61181537341755321]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_triangular(self):
+ random.seed(self.seed)
+ actual = random.triangular(left=5.12, mode=10.23, right=20.34,
+ size=(3, 2))
+ desired = np.array([[12.68117178949215784, 12.4129206149193152],
+ [16.20131377335158263, 16.25692138747600524],
+ [11.20400690911820263, 14.4978144835829923]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_uniform(self):
+ random.seed(self.seed)
+ actual = random.uniform(low=1.23, high=10.54, size=(3, 2))
+ desired = np.array([[6.99097932346268003, 6.73801597444323974],
+ [9.50364421400426274, 9.53130618907631089],
+ [5.48995325769805476, 8.47493103280052118]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_uniform_range_bounds(self):
+ fmin = np.finfo('float').min
+ fmax = np.finfo('float').max
+
+ func = random.uniform
+ assert_raises(OverflowError, func, -np.inf, 0)
+ assert_raises(OverflowError, func, 0, np.inf)
+ assert_raises(OverflowError, func, fmin, fmax)
+ assert_raises(OverflowError, func, [-np.inf], [0])
+ assert_raises(OverflowError, func, [0], [np.inf])
+
+ # (fmax / 1e17) - fmin is within range, so this should not throw
+ # account for i386 extended precision DBL_MAX / 1e17 + DBL_MAX >
+ # DBL_MAX by increasing fmin a bit
+ random.uniform(low=np.nextafter(fmin, 1), high=fmax / 1e17)
+
+ def test_scalar_exception_propagation(self):
+ # Tests that exceptions are correctly propagated in distributions
+ # when called with objects that throw exceptions when converted to
+ # scalars.
+ #
+ # Regression test for gh: 8865
+
+ class ThrowingFloat(np.ndarray):
+ def __float__(self):
+ raise TypeError
+
+ throwing_float = np.array(1.0).view(ThrowingFloat)
+ assert_raises(TypeError, random.uniform, throwing_float,
+ throwing_float)
+
+ class ThrowingInteger(np.ndarray):
+ def __int__(self):
+ raise TypeError
+
+ throwing_int = np.array(1).view(ThrowingInteger)
+ assert_raises(TypeError, random.hypergeometric, throwing_int, 1, 1)
+
+ def test_vonmises(self):
+ random.seed(self.seed)
+ actual = random.vonmises(mu=1.23, kappa=1.54, size=(3, 2))
+ desired = np.array([[2.28567572673902042, 2.89163838442285037],
+ [0.38198375564286025, 2.57638023113890746],
+ [1.19153771588353052, 1.83509849681825354]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_vonmises_small(self):
+ # check infinite loop, gh-4720
+ random.seed(self.seed)
+ r = random.vonmises(mu=0., kappa=1.1e-8, size=10**6)
+ np.testing.assert_(np.isfinite(r).all())
+
+ def test_wald(self):
+ random.seed(self.seed)
+ actual = random.wald(mean=1.23, scale=1.54, size=(3, 2))
+ desired = np.array([[3.82935265715889983, 5.13125249184285526],
+ [0.35045403618358717, 1.50832396872003538],
+ [0.24124319895843183, 0.22031101461955038]])
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_weibull(self):
+ random.seed(self.seed)
+ actual = random.weibull(a=1.23, size=(3, 2))
+ desired = np.array([[0.97097342648766727, 0.91422896443565516],
+ [1.89517770034962929, 1.91414357960479564],
+ [0.67057783752390987, 1.39494046635066793]])
+ assert_array_almost_equal(actual, desired, decimal=15)
+
+ def test_weibull_0(self):
+ random.seed(self.seed)
+ assert_equal(random.weibull(a=0, size=12), np.zeros(12))
+ assert_raises(ValueError, random.weibull, a=-0.)
+
+ def test_zipf(self):
+ random.seed(self.seed)
+ actual = random.zipf(a=1.23, size=(3, 2))
+ desired = np.array([[66, 29],
+ [1, 1],
+ [3, 13]])
+ assert_array_equal(actual, desired)
+
+
+class TestBroadcast(object):
+ # tests that functions that broadcast behave
+ # correctly when presented with non-scalar arguments
+ def setup(self):
+ self.seed = 123456789
+
+ def setSeed(self):
+ random.seed(self.seed)
+
+ # TODO: Include test for randint once it can broadcast
+ # Can steal the test written in PR #6938
+
+ def test_uniform(self):
+ low = [0]
+ high = [1]
+ uniform = random.uniform
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.setSeed()
+ actual = uniform(low * 3, high)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ self.setSeed()
+ actual = uniform(low, high * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_normal(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ normal = random.normal
+ desired = np.array([2.2129019979039612,
+ 2.1283977976520019,
+ 1.8417114045748335])
+
+ self.setSeed()
+ actual = normal(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = normal(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, normal, loc, bad_scale * 3)
+
+ def test_beta(self):
+ a = [1]
+ b = [2]
+ bad_a = [-1]
+ bad_b = [-2]
+ beta = random.beta
+ desired = np.array([0.19843558305989056,
+ 0.075230336409423643,
+ 0.24976865978980844])
+
+ self.setSeed()
+ actual = beta(a * 3, b)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a * 3, b)
+ assert_raises(ValueError, beta, a * 3, bad_b)
+
+ self.setSeed()
+ actual = beta(a, b * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, beta, bad_a, b * 3)
+ assert_raises(ValueError, beta, a, bad_b * 3)
+
+ def test_exponential(self):
+ scale = [1]
+ bad_scale = [-1]
+ exponential = random.exponential
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.setSeed()
+ actual = exponential(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, exponential, bad_scale * 3)
+
+ def test_standard_gamma(self):
+ shape = [1]
+ bad_shape = [-1]
+ std_gamma = random.standard_gamma
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.setSeed()
+ actual = std_gamma(shape * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, std_gamma, bad_shape * 3)
+
+ def test_gamma(self):
+ shape = [1]
+ scale = [2]
+ bad_shape = [-1]
+ bad_scale = [-2]
+ gamma = random.gamma
+ desired = np.array([1.5221370731769048,
+ 1.5277256455738331,
+ 1.4248762625178359])
+
+ self.setSeed()
+ actual = gamma(shape * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape * 3, scale)
+ assert_raises(ValueError, gamma, shape * 3, bad_scale)
+
+ self.setSeed()
+ actual = gamma(shape, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gamma, bad_shape, scale * 3)
+ assert_raises(ValueError, gamma, shape, bad_scale * 3)
+
+ def test_f(self):
+ dfnum = [1]
+ dfden = [2]
+ bad_dfnum = [-1]
+ bad_dfden = [-2]
+ f = random.f
+ desired = np.array([0.80038951638264799,
+ 0.86768719635363512,
+ 2.7251095168386801])
+
+ self.setSeed()
+ actual = f(dfnum * 3, dfden)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum * 3, dfden)
+ assert_raises(ValueError, f, dfnum * 3, bad_dfden)
+
+ self.setSeed()
+ actual = f(dfnum, dfden * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, f, bad_dfnum, dfden * 3)
+ assert_raises(ValueError, f, dfnum, bad_dfden * 3)
+
+ def test_noncentral_f(self):
+ dfnum = [2]
+ dfden = [3]
+ nonc = [4]
+ bad_dfnum = [0]
+ bad_dfden = [-1]
+ bad_nonc = [-2]
+ nonc_f = random.noncentral_f
+ desired = np.array([9.1393943263705211,
+ 13.025456344595602,
+ 8.8018098359100545])
+
+ self.setSeed()
+ actual = nonc_f(dfnum * 3, dfden, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum * 3, dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, bad_dfden, nonc)
+ assert_raises(ValueError, nonc_f, dfnum * 3, dfden, bad_nonc)
+
+ self.setSeed()
+ actual = nonc_f(dfnum, dfden * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden * 3, nonc)
+ assert_raises(ValueError, nonc_f, dfnum, dfden * 3, bad_nonc)
+
+ self.setSeed()
+ actual = nonc_f(dfnum, dfden, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_f, bad_dfnum, dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, bad_dfden, nonc * 3)
+ assert_raises(ValueError, nonc_f, dfnum, dfden, bad_nonc * 3)
+
+ def test_noncentral_f_small_df(self):
+ self.setSeed()
+ desired = np.array([6.869638627492048, 0.785880199263955])
+ actual = random.noncentral_f(0.9, 0.9, 2, size=2)
+ assert_array_almost_equal(actual, desired, decimal=14)
+
+ def test_chisquare(self):
+ df = [1]
+ bad_df = [-1]
+ chisquare = random.chisquare
+ desired = np.array([0.57022801133088286,
+ 0.51947702108840776,
+ 0.1320969254923558])
+
+ self.setSeed()
+ actual = chisquare(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, chisquare, bad_df * 3)
+
+ def test_noncentral_chisquare(self):
+ df = [1]
+ nonc = [2]
+ bad_df = [-1]
+ bad_nonc = [-2]
+ nonc_chi = random.noncentral_chisquare
+ desired = np.array([9.0015599467913763,
+ 4.5804135049718742,
+ 6.0872302432834564])
+
+ self.setSeed()
+ actual = nonc_chi(df * 3, nonc)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df * 3, nonc)
+ assert_raises(ValueError, nonc_chi, df * 3, bad_nonc)
+
+ self.setSeed()
+ actual = nonc_chi(df, nonc * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, nonc_chi, bad_df, nonc * 3)
+ assert_raises(ValueError, nonc_chi, df, bad_nonc * 3)
+
+ def test_standard_t(self):
+ df = [1]
+ bad_df = [-1]
+ t = random.standard_t
+ desired = np.array([3.0702872575217643,
+ 5.8560725167361607,
+ 1.0274791436474273])
+
+ self.setSeed()
+ actual = t(df * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, t, bad_df * 3)
+
+ def test_vonmises(self):
+ mu = [2]
+ kappa = [1]
+ bad_kappa = [-1]
+ vonmises = random.vonmises
+ desired = np.array([2.9883443664201312,
+ -2.7064099483995943,
+ -1.8672476700665914])
+
+ self.setSeed()
+ actual = vonmises(mu * 3, kappa)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu * 3, bad_kappa)
+
+ self.setSeed()
+ actual = vonmises(mu, kappa * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, vonmises, mu, bad_kappa * 3)
+
+ def test_pareto(self):
+ a = [1]
+ bad_a = [-1]
+ pareto = random.pareto
+ desired = np.array([1.1405622680198362,
+ 1.1465519762044529,
+ 1.0389564467453547])
+
+ self.setSeed()
+ actual = pareto(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, pareto, bad_a * 3)
+
+ def test_weibull(self):
+ a = [1]
+ bad_a = [-1]
+ weibull = random.weibull
+ desired = np.array([0.76106853658845242,
+ 0.76386282278691653,
+ 0.71243813125891797])
+
+ self.setSeed()
+ actual = weibull(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, weibull, bad_a * 3)
+
+ def test_power(self):
+ a = [1]
+ bad_a = [-1]
+ power = random.power
+ desired = np.array([0.53283302478975902,
+ 0.53413660089041659,
+ 0.50955303552646702])
+
+ self.setSeed()
+ actual = power(a * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, power, bad_a * 3)
+
+ def test_laplace(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ laplace = random.laplace
+ desired = np.array([0.067921356028507157,
+ 0.070715642226971326,
+ 0.019290950698972624])
+
+ self.setSeed()
+ actual = laplace(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = laplace(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, laplace, loc, bad_scale * 3)
+
+ def test_gumbel(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ gumbel = random.gumbel
+ desired = np.array([0.2730318639556768,
+ 0.26936705726291116,
+ 0.33906220393037939])
+
+ self.setSeed()
+ actual = gumbel(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = gumbel(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, gumbel, loc, bad_scale * 3)
+
+ def test_logistic(self):
+ loc = [0]
+ scale = [1]
+ bad_scale = [-1]
+ logistic = random.logistic
+ desired = np.array([0.13152135837586171,
+ 0.13675915696285773,
+ 0.038216792802833396])
+
+ self.setSeed()
+ actual = logistic(loc * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc * 3, bad_scale)
+
+ self.setSeed()
+ actual = logistic(loc, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, logistic, loc, bad_scale * 3)
+
+ def test_lognormal(self):
+ mean = [0]
+ sigma = [1]
+ bad_sigma = [-1]
+ lognormal = random.lognormal
+ desired = np.array([9.1422086044848427,
+ 8.4013952870126261,
+ 6.3073234116578671])
+
+ self.setSeed()
+ actual = lognormal(mean * 3, sigma)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean * 3, bad_sigma)
+
+ self.setSeed()
+ actual = lognormal(mean, sigma * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, lognormal, mean, bad_sigma * 3)
+
+ def test_rayleigh(self):
+ scale = [1]
+ bad_scale = [-1]
+ rayleigh = random.rayleigh
+ desired = np.array([1.2337491937897689,
+ 1.2360119924878694,
+ 1.1936818095781789])
+
+ self.setSeed()
+ actual = rayleigh(scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, rayleigh, bad_scale * 3)
+
+ def test_wald(self):
+ mean = [0.5]
+ scale = [1]
+ bad_mean = [0]
+ bad_scale = [-2]
+ wald = random.wald
+ desired = np.array([0.11873681120271318,
+ 0.12450084820795027,
+ 0.9096122728408238])
+
+ self.setSeed()
+ actual = wald(mean * 3, scale)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean * 3, scale)
+ assert_raises(ValueError, wald, mean * 3, bad_scale)
+
+ self.setSeed()
+ actual = wald(mean, scale * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, wald, bad_mean, scale * 3)
+ assert_raises(ValueError, wald, mean, bad_scale * 3)
+ assert_raises(ValueError, wald, 0.0, 1)
+ assert_raises(ValueError, wald, 0.5, 0.0)
+
+ def test_triangular(self):
+ left = [1]
+ right = [3]
+ mode = [2]
+ bad_left_one = [3]
+ bad_mode_one = [4]
+ bad_left_two, bad_mode_two = right * 2
+ triangular = random.triangular
+ desired = np.array([2.03339048710429,
+ 2.0347400359389356,
+ 2.0095991069536208])
+
+ self.setSeed()
+ actual = triangular(left * 3, mode, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one * 3, mode, right)
+ assert_raises(ValueError, triangular, left * 3, bad_mode_one, right)
+ assert_raises(ValueError, triangular, bad_left_two * 3, bad_mode_two,
+ right)
+
+ self.setSeed()
+ actual = triangular(left, mode * 3, right)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode * 3, right)
+ assert_raises(ValueError, triangular, left, bad_mode_one * 3, right)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two * 3,
+ right)
+
+ self.setSeed()
+ actual = triangular(left, mode, right * 3)
+ assert_array_almost_equal(actual, desired, decimal=14)
+ assert_raises(ValueError, triangular, bad_left_one, mode, right * 3)
+ assert_raises(ValueError, triangular, left, bad_mode_one, right * 3)
+ assert_raises(ValueError, triangular, bad_left_two, bad_mode_two,
+ right * 3)
+
+ assert_raises(ValueError, triangular, 10., 0., 20.)
+ assert_raises(ValueError, triangular, 10., 25., 20.)
+ assert_raises(ValueError, triangular, 10., 10., 10.)
+
+ def test_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ binom = random.binomial
+ desired = np.array([1, 1, 1])
+
+ self.setSeed()
+ actual = binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n * 3, p)
+ assert_raises(ValueError, binom, n * 3, bad_p_one)
+ assert_raises(ValueError, binom, n * 3, bad_p_two)
+
+ self.setSeed()
+ actual = binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, binom, bad_n, p * 3)
+ assert_raises(ValueError, binom, n, bad_p_one * 3)
+ assert_raises(ValueError, binom, n, bad_p_two * 3)
+
+ def test_negative_binomial(self):
+ n = [1]
+ p = [0.5]
+ bad_n = [-1]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ neg_binom = random.negative_binomial
+ desired = np.array([1, 0, 1])
+
+ self.setSeed()
+ actual = neg_binom(n * 3, p)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n * 3, p)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_one)
+ assert_raises(ValueError, neg_binom, n * 3, bad_p_two)
+
+ self.setSeed()
+ actual = neg_binom(n, p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, neg_binom, bad_n, p * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_one * 3)
+ assert_raises(ValueError, neg_binom, n, bad_p_two * 3)
+
+ def test_poisson(self):
+ max_lam = random.RandomState().poisson_lam_max
+
+ lam = [1]
+ bad_lam_one = [-1]
+ bad_lam_two = [max_lam * 2]
+ poisson = random.poisson
+ desired = np.array([1, 1, 0])
+
+ self.setSeed()
+ actual = poisson(lam * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, poisson, bad_lam_one * 3)
+ assert_raises(ValueError, poisson, bad_lam_two * 3)
+
+ def test_zipf(self):
+ a = [2]
+ bad_a = [0]
+ zipf = random.zipf
+ desired = np.array([2, 2, 1])
+
+ self.setSeed()
+ actual = zipf(a * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, zipf, bad_a * 3)
+ with np.errstate(invalid='ignore'):
+ assert_raises(ValueError, zipf, np.nan)
+ assert_raises(ValueError, zipf, [0, 0, np.nan])
+
+ def test_geometric(self):
+ p = [0.5]
+ bad_p_one = [-1]
+ bad_p_two = [1.5]
+ geom = random.geometric
+ desired = np.array([2, 2, 2])
+
+ self.setSeed()
+ actual = geom(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, geom, bad_p_one * 3)
+ assert_raises(ValueError, geom, bad_p_two * 3)
+
+ def test_hypergeometric(self):
+ ngood = [1]
+ nbad = [2]
+ nsample = [2]
+ bad_ngood = [-1]
+ bad_nbad = [-2]
+ bad_nsample_one = [0]
+ bad_nsample_two = [4]
+ hypergeom = random.hypergeometric
+ desired = np.array([1, 1, 1])
+
+ self.setSeed()
+ actual = hypergeom(ngood * 3, nbad, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood * 3, nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, bad_nbad, nsample)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood * 3, nbad, bad_nsample_two)
+
+ self.setSeed()
+ actual = hypergeom(ngood, nbad * 3, nsample)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad * 3, nsample)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_one)
+ assert_raises(ValueError, hypergeom, ngood, nbad * 3, bad_nsample_two)
+
+ self.setSeed()
+ actual = hypergeom(ngood, nbad, nsample * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, hypergeom, bad_ngood, nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, bad_nbad, nsample * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_one * 3)
+ assert_raises(ValueError, hypergeom, ngood, nbad, bad_nsample_two * 3)
+
+ assert_raises(ValueError, hypergeom, 10, 10, 25)
+
+ def test_logseries(self):
+ p = [0.5]
+ bad_p_one = [2]
+ bad_p_two = [-1]
+ logseries = random.logseries
+ desired = np.array([1, 1, 1])
+
+ self.setSeed()
+ actual = logseries(p * 3)
+ assert_array_equal(actual, desired)
+ assert_raises(ValueError, logseries, bad_p_one * 3)
+ assert_raises(ValueError, logseries, bad_p_two * 3)
+
+
+class TestThread(object):
+ # make sure each state produces the same sequence even in threads
+ def setup(self):
+ self.seeds = range(4)
+
+ def check_function(self, function, sz):
+ from threading import Thread
+
+ out1 = np.empty((len(self.seeds),) + sz)
+ out2 = np.empty((len(self.seeds),) + sz)
+
+ # threaded generation
+ t = [Thread(target=function, args=(random.RandomState(s), o))
+ for s, o in zip(self.seeds, out1)]
+ [x.start() for x in t]
+ [x.join() for x in t]
+
+ # the same serial
+ for s, o in zip(self.seeds, out2):
+ function(random.RandomState(s), o)
+
+ # these platforms change x87 fpu precision mode in threads
+ if np.intp().dtype.itemsize == 4 and sys.platform == "win32":
+ assert_array_almost_equal(out1, out2)
+ else:
+ assert_array_equal(out1, out2)
+
+ def test_normal(self):
+ def gen_random(state, out):
+ out[...] = state.normal(size=10000)
+ self.check_function(gen_random, sz=(10000,))
+
+ def test_exp(self):
+ def gen_random(state, out):
+ out[...] = state.exponential(scale=np.ones((100, 1000)))
+ self.check_function(gen_random, sz=(100, 1000))
+
+ def test_multinomial(self):
+ def gen_random(state, out):
+ out[...] = state.multinomial(10, [1/6.]*6, size=10000)
+ self.check_function(gen_random, sz=(10000, 6))
+
+
+# See Issue #4263
+class TestSingleEltArrayInput(object):
+ def setup(self):
+ self.argOne = np.array([2])
+ self.argTwo = np.array([3])
+ self.argThree = np.array([4])
+ self.tgtShape = (1,)
+
+ def test_one_arg_funcs(self):
+ funcs = (random.exponential, random.standard_gamma,
+ random.chisquare, random.standard_t,
+ random.pareto, random.weibull,
+ random.power, random.rayleigh,
+ random.poisson, random.zipf,
+ random.geometric, random.logseries)
+
+ probfuncs = (random.geometric, random.logseries)
+
+ for func in funcs:
+ if func in probfuncs: # p < 1.0
+ out = func(np.array([0.5]))
+
+ else:
+ out = func(self.argOne)
+
+ assert_equal(out.shape, self.tgtShape)
+
+ def test_two_arg_funcs(self):
+ funcs = (random.uniform, random.normal,
+ random.beta, random.gamma,
+ random.f, random.noncentral_chisquare,
+ random.vonmises, random.laplace,
+ random.gumbel, random.logistic,
+ random.lognormal, random.wald,
+ random.binomial, random.negative_binomial)
+
+ probfuncs = (random.binomial, random.negative_binomial)
+
+ for func in funcs:
+ if func in probfuncs: # p <= 1
+ argTwo = np.array([0.5])
+
+ else:
+ argTwo = self.argTwo
+
+ out = func(self.argOne, argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], argTwo)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, argTwo[0])
+ assert_equal(out.shape, self.tgtShape)
+
+# TODO: Uncomment once randint can broadcast arguments
+# def test_randint(self):
+# itype = [bool, np.int8, np.uint8, np.int16, np.uint16,
+# np.int32, np.uint32, np.int64, np.uint64]
+# func = random.randint
+# high = np.array([1])
+# low = np.array([0])
+#
+# for dt in itype:
+# out = func(low, high, dtype=dt)
+# self.assert_equal(out.shape, self.tgtShape)
+#
+# out = func(low[0], high, dtype=dt)
+# self.assert_equal(out.shape, self.tgtShape)
+#
+# out = func(low, high[0], dtype=dt)
+# self.assert_equal(out.shape, self.tgtShape)
+
+ def test_three_arg_funcs(self):
+ funcs = [random.noncentral_f, random.triangular,
+ random.hypergeometric]
+
+ for func in funcs:
+ out = func(self.argOne, self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne[0], self.argTwo, self.argThree)
+ assert_equal(out.shape, self.tgtShape)
+
+ out = func(self.argOne, self.argTwo[0], self.argThree)
+ assert_equal(out.shape, self.tgtShape)
diff --git a/numpy/random/randomgen/tests/test_randomstate_regression.py b/numpy/random/randomgen/tests/test_randomstate_regression.py
new file mode 100644
index 000000000..cf21ee756
--- /dev/null
+++ b/numpy/random/randomgen/tests/test_randomstate_regression.py
@@ -0,0 +1,157 @@
+import sys
+from numpy.testing import (
+ assert_, assert_array_equal, assert_raises,
+ )
+from numpy.compat import long
+import numpy as np
+
+from ...randomgen import mtrand as random
+
+
+class TestRegression(object):
+
+ def test_VonMises_range(self):
+ # Make sure generated random variables are in [-pi, pi].
+ # Regression test for ticket #986.
+ for mu in np.linspace(-7., 7., 5):
+ r = random.vonmises(mu, 1, 50)
+ assert_(np.all(r > -np.pi) and np.all(r <= np.pi))
+
+ def test_hypergeometric_range(self):
+ # Test for ticket #921
+ assert_(np.all(random.hypergeometric(3, 18, 11, size=10) < 4))
+ assert_(np.all(random.hypergeometric(18, 3, 11, size=10) > 0))
+
+ # Test for ticket #5623
+ args = [
+ (2**20 - 2, 2**20 - 2, 2**20 - 2), # Check for 32-bit systems
+ ]
+ is_64bits = sys.maxsize > 2**32
+ if is_64bits and sys.platform != 'win32':
+ # Check for 64-bit systems
+ args.append((2**40 - 2, 2**40 - 2, 2**40 - 2))
+ for arg in args:
+ assert_(random.hypergeometric(*arg) > 0)
+
+ def test_logseries_convergence(self):
+ # Test for ticket #923
+ N = 1000
+ random.seed(0)
+ rvsn = random.logseries(0.8, size=N)
+ # these two frequency counts should be close to theoretical
+ # numbers with this large sample
+ # theoretical large N result is 0.49706795
+ freq = np.sum(rvsn == 1) / float(N)
+ msg = "Frequency was %f, should be > 0.45" % freq
+ assert_(freq > 0.45, msg)
+ # theoretical large N result is 0.19882718
+ freq = np.sum(rvsn == 2) / float(N)
+ msg = "Frequency was %f, should be < 0.23" % freq
+ assert_(freq < 0.23, msg)
+
+ def test_permutation_longs(self):
+ random.seed(1234)
+ a = random.permutation(12)
+ random.seed(1234)
+ b = random.permutation(long(12))
+ assert_array_equal(a, b)
+
+ def test_shuffle_mixed_dimension(self):
+ # Test for trac ticket #2074
+ for t in [[1, 2, 3, None],
+ [(1, 1), (2, 2), (3, 3), None],
+ [1, (2, 2), (3, 3), None],
+ [(1, 1), 2, 3, None]]:
+ random.seed(12345)
+ shuffled = list(t)
+ random.shuffle(shuffled)
+ assert_array_equal(shuffled, [t[0], t[3], t[1], t[2]])
+
+ def test_call_within_randomstate(self):
+ # Check that custom RandomState does not call into global state
+ m = random.RandomState()
+ res = np.array([0, 8, 7, 2, 1, 9, 4, 7, 0, 3])
+ for i in range(3):
+ random.seed(i)
+ m.seed(4321)
+ # If m.state is not honored, the result will change
+ assert_array_equal(m.choice(10, size=10, p=np.ones(10)/10.), res)
+
+ def test_multivariate_normal_size_types(self):
+ # Test for multivariate_normal issue with 'size' argument.
+ # Check that the multivariate_normal size argument can be a
+ # numpy integer.
+ random.multivariate_normal([0], [[0]], size=1)
+ random.multivariate_normal([0], [[0]], size=np.int_(1))
+ random.multivariate_normal([0], [[0]], size=np.int64(1))
+
+ def test_beta_small_parameters(self):
+ # Test that beta with small a and b parameters does not produce
+ # NaNs due to roundoff errors causing 0 / 0, gh-5851
+ random.seed(1234567890)
+ x = random.beta(0.0001, 0.0001, size=100)
+ assert_(not np.any(np.isnan(x)), 'Nans in random.beta')
+
+ def test_choice_sum_of_probs_tolerance(self):
+ # The sum of probs should be 1.0 with some tolerance.
+ # For low precision dtypes the tolerance was too tight.
+ # See numpy github issue 6123.
+ random.seed(1234)
+ a = [1, 2, 3]
+ counts = [4, 4, 2]
+ for dt in np.float16, np.float32, np.float64:
+ probs = np.array(counts, dtype=dt) / sum(counts)
+ c = random.choice(a, p=probs)
+ assert_(c in a)
+ assert_raises(ValueError, random.choice, a, p=probs*0.9)
+
+ def test_shuffle_of_array_of_different_length_strings(self):
+ # Test that permuting an array of different length strings
+ # will not cause a segfault on garbage collection
+ # Tests gh-7710
+ random.seed(1234)
+
+ a = np.array(['a', 'a' * 1000])
+
+ for _ in range(100):
+ random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_shuffle_of_array_of_objects(self):
+ # Test that permuting an array of objects will not cause
+ # a segfault on garbage collection.
+ # See gh-7719
+ random.seed(1234)
+ a = np.array([np.arange(1), np.arange(4)])
+
+ for _ in range(1000):
+ random.shuffle(a)
+
+ # Force Garbage Collection - should not segfault.
+ import gc
+ gc.collect()
+
+ def test_permutation_subclass(self):
+ class N(np.ndarray):
+ pass
+
+ random.seed(1)
+ orig = np.arange(3).view(N)
+ perm = random.permutation(orig)
+ assert_array_equal(perm, np.array([0, 2, 1]))
+ assert_array_equal(orig, np.arange(3).view(N))
+
+ class M(object):
+ a = np.arange(5)
+
+ def __array__(self):
+ return self.a
+
+ random.seed(1)
+ m = M()
+ perm = random.permutation(m)
+ assert_array_equal(perm, np.array([2, 1, 4, 0, 3]))
+ assert_array_equal(m.__array__(), np.arange(5))
diff --git a/_randomgen/randomgen/tests/test_smoke.py b/numpy/random/randomgen/tests/test_smoke.py
index 5a9882a16..816ce6ddc 100644
--- a/_randomgen/randomgen/tests/test_smoke.py
+++ b/numpy/random/randomgen/tests/test_smoke.py
@@ -2,17 +2,15 @@ import os
import pickle
import sys
import time
+from functools import partial
import numpy as np
import pytest
-from numpy.testing import assert_almost_equal, assert_equal, assert_, \
- assert_array_equal
-
-from randomgen._testing import suppress_warnings
-from randomgen import RandomGenerator, MT19937, DSFMT, ThreeFry32, ThreeFry, \
- PCG32, PCG64, Philox, Xoroshiro128, Xorshift1024, Xoshiro256StarStar, \
- Xoshiro512StarStar
-from randomgen import entropy
+from numpy.testing import (assert_almost_equal, assert_equal, assert_,
+ assert_array_equal, suppress_warnings)
+from ...randomgen import (RandomGenerator, MT19937, DSFMT, ThreeFry32, ThreeFry,
+ PCG32, PCG64, Philox, Xoroshiro128, Xorshift1024, Xoshiro256StarStar,
+ Xoshiro512StarStar, entropy)
@pytest.fixture(scope='module',
@@ -155,13 +153,6 @@ class RNG(object):
brng_name = self.rg._basicrng.__class__.__name__
pytest.skip('Jump is not supported by {0}'.format(brng_name))
- def test_random_uintegers(self):
- assert_(len(self.rg.random_uintegers(10)) == 10)
-
- def test_random_raw(self):
- assert_(len(self.rg.random_raw(10)) == 10)
- assert_(self.rg.random_raw((10, 10)).shape == (10, 10))
-
def test_uniform(self):
r = self.rg.uniform(-1.0, 0.0, size=10)
assert_(len(r) == 10)
@@ -203,6 +194,20 @@ class RNG(object):
assert_(len(self.rg.standard_exponential(10)) == 10)
params_0(self.rg.standard_exponential)
+ def test_standard_exponential_float(self):
+ randoms = self.rg.standard_exponential(10, dtype='float32')
+ assert_(len(randoms) == 10)
+ assert randoms.dtype == np.float32
+ params_0(partial(self.rg.standard_exponential, dtype='float32'))
+
+ def test_standard_exponential_float_log(self):
+ randoms = self.rg.standard_exponential(10, dtype='float32',
+ method='inv')
+ assert_(len(randoms) == 10)
+ assert randoms.dtype == np.float32
+ params_0(partial(self.rg.standard_exponential, dtype='float32',
+ method='inv'))
+
def test_standard_cauchy(self):
assert_(len(self.rg.standard_cauchy(10)) == 10)
params_0(self.rg.standard_cauchy)
@@ -217,9 +222,9 @@ class RNG(object):
def test_reset_state(self):
state = self.rg.state
- int_1 = self.rg.random_raw(1)
+ int_1 = self.rg.randint(2**31)
self.rg.state = state
- int_2 = self.rg.random_raw(1)
+ int_2 = self.rg.randint(2**31)
assert_(int_1 == int_2)
def test_entropy_init(self):
@@ -232,11 +237,6 @@ class RNG(object):
rg2 = RandomGenerator(self.brng(*self.seed))
rg.random_sample()
rg2.random_sample()
- if not comp_state(rg.state, rg2.state):
- for key in rg.state:
- print(key)
- print(rg.state[key])
- print(rg2.state[key])
assert_(comp_state(rg.state, rg2.state))
def test_reset_state_gauss(self):
@@ -259,14 +259,14 @@ class RNG(object):
n2 = rg2.randint(0, 2 ** 24, 10, dtype=np.uint32)
assert_array_equal(n1, n2)
- def test_reset_state_uintegers(self):
+ def test_reset_state_float(self):
rg = RandomGenerator(self.brng(*self.seed))
- rg.random_uintegers(bits=32)
+ rg.random_sample(dtype='float32')
state = rg.state
- n1 = rg.random_uintegers(bits=32, size=10)
+ n1 = rg.random_sample(size=10, dtype='float32')
rg2 = RandomGenerator(self.brng())
rg2.state = state
- n2 = rg2.random_uintegers(bits=32, size=10)
+ n2 = rg2.random_sample(size=10, dtype='float32')
assert_((n1 == n2).all())
def test_shuffle(self):
@@ -315,70 +315,6 @@ class RNG(object):
assert_(len(vals) == 10)
params_1(self.rg.chisquare)
- def test_complex_normal(self):
- st = self.rg.state
- vals = self.rg.complex_normal(
- 2.0 + 7.0j, 10.0, 5.0 - 5.0j, size=10)
- assert_(len(vals) == 10)
-
- self.rg.state = st
- vals2 = [self.rg.complex_normal(
- 2.0 + 7.0j, 10.0, 5.0 - 5.0j) for _ in range(10)]
- np.testing.assert_allclose(vals, vals2)
-
- self.rg.state = st
- vals3 = self.rg.complex_normal(
- 2.0 + 7.0j * np.ones(10), 10.0 * np.ones(1), 5.0 - 5.0j)
- np.testing.assert_allclose(vals, vals3)
-
- self.rg.state = st
- norms = self.rg.standard_normal(size=20)
- norms = np.reshape(norms, (10, 2))
- cov = 0.5 * (-5.0)
- v_real = 7.5
- v_imag = 2.5
- rho = cov / np.sqrt(v_real * v_imag)
- imag = 7 + np.sqrt(v_imag) * (rho *
- norms[:, 0] + np.sqrt(1 - rho ** 2) *
- norms[:, 1])
- real = 2 + np.sqrt(v_real) * norms[:, 0]
- vals4 = [re + im * (0 + 1.0j) for re, im in zip(real, imag)]
-
- np.testing.assert_allclose(vals4, vals)
-
- def test_complex_normal_bm(self):
- st = self.rg.state
- vals = self.rg.complex_normal(
- 2.0 + 7.0j, 10.0, 5.0 - 5.0j, size=10)
- assert_(len(vals) == 10)
-
- self.rg.state = st
- vals2 = [self.rg.complex_normal(
- 2.0 + 7.0j, 10.0, 5.0 - 5.0j) for _ in range(10)]
- np.testing.assert_allclose(vals, vals2)
-
- self.rg.state = st
- vals3 = self.rg.complex_normal(
- 2.0 + 7.0j * np.ones(10), 10.0 * np.ones(1), 5.0 - 5.0j)
- np.testing.assert_allclose(vals, vals3)
-
- def test_complex_normal_zero_variance(self):
- st = self.rg.state
- c = self.rg.complex_normal(0, 1.0, 1.0)
- assert_almost_equal(c.imag, 0.0)
- self.rg.state = st
- n = self.rg.standard_normal()
- np.testing.assert_allclose(c, n, atol=1e-8)
-
- st = self.rg.state
- c = self.rg.complex_normal(0, 1.0, -1.0)
- assert_almost_equal(c.real, 0.0)
- self.rg.state = st
- self.rg.standard_normal()
- n = self.rg.standard_normal()
- assert_almost_equal(c.real, 0.0)
- np.testing.assert_allclose(c.imag, n, atol=1e-8)
-
def test_exponential(self):
vals = self.rg.exponential(2.0, 10)
assert_(len(vals) == 10)
@@ -682,6 +618,10 @@ class RNG(object):
direct = rg.standard_normal(size=size)
assert_equal(direct, existing)
+ sized = np.empty(size)
+ rg.state = state
+ rg.standard_normal(out=sized, size=sized.shape)
+
existing = np.empty(size, dtype=np.float32)
rg.state = state
rg.standard_normal(out=existing, dtype=np.float32)
diff --git a/_randomgen/randomgen/threefry.pyx b/numpy/random/randomgen/threefry.pyx
index 96e65f625..8140c6a9b 100644
--- a/_randomgen/randomgen/threefry.pyx
+++ b/numpy/random/randomgen/threefry.pyx
@@ -1,15 +1,16 @@
-from __future__ import absolute_import
-
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
import numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
np.import_array()
@@ -25,10 +26,10 @@ cdef extern from 'src/threefry/threefry.h':
ctypedef r123array4x64 threefry4x64_ctr_t
struct s_threefry_state:
- threefry4x64_ctr_t *ctr;
- threefry4x64_key_t *key;
- int buffer_pos;
- uint64_t buffer[THREEFRY_BUFFER_SIZE];
+ threefry4x64_ctr_t *ctr
+ threefry4x64_key_t *key
+ int buffer_pos
+ uint64_t buffer[THREEFRY_BUFFER_SIZE]
int has_uint32
uint32_t uinteger
@@ -154,13 +155,13 @@ cdef class ThreeFry:
the International Conference for High Performance Computing,
Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
"""
- cdef threefry_state *rng_state
+ cdef threefry_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
-
+ cdef public object lock
def __init__(self, seed=None, counter=None, key=None):
self.rng_state = <threefry_state *>malloc(sizeof(threefry_state))
@@ -168,6 +169,7 @@ cdef class ThreeFry:
self.rng_state.key = <threefry4x64_key_t *>malloc(sizeof(threefry4x64_key_t))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed, counter, key)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &threefry_uint64
@@ -190,7 +192,8 @@ cdef class ThreeFry:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -207,16 +210,39 @@ cdef class ThreeFry:
for i in range(THREEFRY_BUFFER_SIZE):
self.rng_state.buffer[i] = 0
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
+
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None, counter=None, key=None):
"""
@@ -294,7 +320,7 @@ cdef class ThreeFry:
key[i] = self.rng_state.key.v[i]
for i in range(THREEFRY_BUFFER_SIZE):
buffer[i] = self.rng_state.buffer[i]
- state = {'counter':ctr,'key':key}
+ state = {'counter': ctr, 'key': key}
return {'brng': self.__class__.__name__,
'state': state,
'buffer': buffer,
@@ -385,14 +411,15 @@ cdef class ThreeFry:
self._reset_state_variables()
return self
+ @property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -401,25 +428,10 @@ cdef class ThreeFry:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&threefry_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&threefry_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&threefry_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -440,19 +452,8 @@ cdef class ThreeFry:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -461,10 +462,10 @@ cdef class ThreeFry:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the core PRNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/threefry32.pyx b/numpy/random/randomgen/threefry32.pyx
index 62c0e9cd8..1fa98eabd 100644
--- a/_randomgen/randomgen/threefry32.pyx
+++ b/numpy/random/randomgen/threefry32.pyx
@@ -1,14 +1,15 @@
-from __future__ import absolute_import
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
import numpy as np
from cpython.pycapsule cimport PyCapsule_New
from libc.stdlib cimport malloc, free
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
np.import_array()
@@ -24,10 +25,10 @@ cdef extern from 'src/threefry32/threefry32.h':
ctypedef r123array4x32 threefry4x32_ctr_t
struct s_threefry32_state:
- threefry4x32_ctr_t *ctr;
- threefry4x32_key_t *key;
- int buffer_pos;
- uint32_t buffer[THREEFRY_BUFFER_SIZE];
+ threefry4x32_ctr_t *ctr
+ threefry4x32_key_t *key
+ int buffer_pos
+ uint32_t buffer[THREEFRY_BUFFER_SIZE]
ctypedef s_threefry32_state threefry32_state
@@ -157,12 +158,13 @@ cdef class ThreeFry32:
the International Conference for High Performance Computing,
Networking, Storage and Analysis (SC11), New York, NY: ACM, 2011.
"""
- cdef threefry32_state *rng_state
+ cdef threefry32_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None, counter=None, key=None):
self.rng_state = <threefry32_state *> malloc(sizeof(threefry32_state))
@@ -170,6 +172,7 @@ cdef class ThreeFry32:
self.rng_state.key = <threefry4x32_key_t *> malloc(sizeof(threefry4x32_key_t))
self._brng = <brng_t *> malloc(sizeof(brng_t))
self.seed(seed, counter, key)
+ self.lock = Lock()
self._brng.state = <void *> self.rng_state
self._brng.next_uint64 = &threefry32_uint64
@@ -192,7 +195,8 @@ cdef class ThreeFry32:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -207,16 +211,39 @@ cdef class ThreeFry32:
for i in range(THREEFRY_BUFFER_SIZE):
self.rng_state.buffer[i] = 0
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
+
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method == u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method == u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None, counter=None, key=None):
"""
@@ -380,14 +407,15 @@ cdef class ThreeFry32:
self._reset_state_variables()
return self
+ @property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -396,25 +424,10 @@ cdef class ThreeFry32:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<Py_ssize_t> self.rng_state,
- ctypes.c_void_p(<Py_ssize_t> self.rng_state),
- ctypes.cast(<Py_ssize_t> &threefry32_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<Py_ssize_t> &threefry32_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<Py_ssize_t> &threefry32_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<Py_ssize_t> self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -435,22 +448,8 @@ cdef class ThreeFry32:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<Py_ssize_t> self.rng_state,
- ffi.cast('void *', <Py_ssize_t> self.rng_state),
- ffi.cast('uint64_t (*)(void *)',
- <uint64_t> self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',
- <uint64_t> self._brng.next_uint32),
- ffi.cast('double (*)(void *)',
- <uint64_t> self._brng.next_double),
- ffi.cast('void *', <Py_ssize_t> self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -459,7 +458,7 @@ cdef class ThreeFry32:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the core PRNG
"""
if self._generator is None:
diff --git a/_randomgen/randomgen/xoroshiro128.pyx b/numpy/random/randomgen/xoroshiro128.pyx
index 57760c53a..7795500e8 100644
--- a/_randomgen/randomgen/xoroshiro128.pyx
+++ b/numpy/random/randomgen/xoroshiro128.pyx
@@ -1,4 +1,7 @@
-from __future__ import absolute_import
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
@@ -6,26 +9,24 @@ from cpython.pycapsule cimport PyCapsule_New
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
np.import_array()
cdef extern from "src/xoroshiro128/xoroshiro128.h":
struct s_xoroshiro128_state:
- uint64_t s[2]
- int has_uint32
- uint32_t uinteger
+ uint64_t s[2]
+ int has_uint32
+ uint32_t uinteger
ctypedef s_xoroshiro128_state xoroshiro128_state
uint64_t xoroshiro128_next64(xoroshiro128_state *state) nogil
uint32_t xoroshiro128_next32(xoroshiro128_state *state) nogil
- void xoroshiro128_jump(xoroshiro128_state *state)
+ void xoroshiro128_jump(xoroshiro128_state *state)
cdef uint64_t xoroshiro128_uint64(void* st) nogil:
return xoroshiro128_next64(<xoroshiro128_state *>st)
@@ -121,17 +122,19 @@ cdef class Xoroshiro128:
.. [1] "xoroshiro+ / xorshift* / xorshift+ generators and the PRNG shootout",
http://xorshift.di.unimi.it/
"""
- cdef xoroshiro128_state *rng_state
+ cdef xoroshiro128_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None):
self.rng_state = <xoroshiro128_state *>malloc(sizeof(xoroshiro128_state))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &xoroshiro128_uint64
@@ -154,7 +157,8 @@ cdef class Xoroshiro128:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -166,17 +170,39 @@ cdef class Xoroshiro128:
self.rng_state.has_uint32 = 0
self.rng_state.uinteger = 0
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
+
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- """Private benchmark command"""
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None):
"""
@@ -198,7 +224,7 @@ cdef class Xoroshiro128:
ValueError
If seed values are out of range for the PRNG.
"""
- ub = 2 ** 64
+ ub = 2 ** 64
if seed is None:
try:
state = random_entropy(4)
@@ -273,12 +299,12 @@ cdef class Xoroshiro128:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -288,24 +314,10 @@ cdef class Xoroshiro128:
* brng - pointer to the Basic RNG struct
"""
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&xoroshiro128_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xoroshiro128_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xoroshiro128_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
+
+ return self._ctypes
@property
def cffi(self):
@@ -326,19 +338,8 @@ cdef class Xoroshiro128:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -347,10 +348,10 @@ cdef class Xoroshiro128:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the basic RNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/xorshift1024.pyx b/numpy/random/randomgen/xorshift1024.pyx
index e7ad546e0..3c7ffac52 100644
--- a/_randomgen/randomgen/xorshift1024.pyx
+++ b/numpy/random/randomgen/xorshift1024.pyx
@@ -1,4 +1,7 @@
-from __future__ import absolute_import
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
@@ -6,27 +9,25 @@ from cpython.pycapsule cimport PyCapsule_New
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
np.import_array()
cdef extern from "src/xorshift1024/xorshift1024.h":
struct s_xorshift1024_state:
- uint64_t s[16]
- int p
- int has_uint32
- uint32_t uinteger
+ uint64_t s[16]
+ int p
+ int has_uint32
+ uint32_t uinteger
ctypedef s_xorshift1024_state xorshift1024_state
uint64_t xorshift1024_next64(xorshift1024_state *state) nogil
uint32_t xorshift1024_next32(xorshift1024_state *state) nogil
- void xorshift1024_jump(xorshift1024_state *state)
+ void xorshift1024_jump(xorshift1024_state *state)
cdef uint64_t xorshift1024_uint64(void* st) nogil:
return xorshift1024_next64(<xorshift1024_state *>st)
@@ -127,17 +128,19 @@ cdef class Xorshift1024:
generators." CoRR, abs/1403.0930, 2014.
"""
- cdef xorshift1024_state *rng_state
+ cdef xorshift1024_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None):
self.rng_state = <xorshift1024_state *>malloc(sizeof(xorshift1024_state))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &xorshift1024_uint64
@@ -160,7 +163,8 @@ cdef class Xorshift1024:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -172,41 +176,39 @@ cdef class Xorshift1024:
self.rng_state.has_uint32 = 0
self.rng_state.uinteger = 0
- def __random_integer(self, bits=64):
+ def random_raw(self, size=None, output=True):
"""
- 64-bit Random Integers from the PRNG
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
Parameters
----------
- bits : {32, 64}
- Number of random bits to return
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
Returns
-------
- rv : int
- Next random value
+ out : uint or ndarray
+ Drawn samples.
Notes
-----
- Testing only
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
"""
- if bits == 64:
- return self._brng.next_uint64(self._brng.state)
- elif bits == 32:
- return self._brng.next_uint32(self._brng.state)
- else:
- raise ValueError('bits must be 32 or 64')
+ return random_raw(self._brng, self.lock, size, output)
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None):
"""
@@ -229,7 +231,7 @@ cdef class Xorshift1024:
If seed values are out of range for the PRNG.
"""
- ub = 2 ** 64
+ ub = 2 ** 64
if seed is None:
try:
state = random_entropy(32)
@@ -285,7 +287,7 @@ cdef class Xorshift1024:
for i in range(16):
s[i] = self.rng_state.s[i]
return {'brng': self.__class__.__name__,
- 'state': {'s':s,'p':self.rng_state.p},
+ 'state': {'s': s, 'p': self.rng_state.p},
'has_uint32': self.rng_state.has_uint32,
'uinteger': self.rng_state.uinteger}
@@ -306,12 +308,12 @@ cdef class Xorshift1024:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -320,25 +322,10 @@ cdef class Xorshift1024:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&xorshift1024_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xorshift1024_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xorshift1024_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -359,19 +346,8 @@ cdef class Xorshift1024:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -380,10 +356,10 @@ cdef class Xorshift1024:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the core PRNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/xoshiro256starstar.pyx b/numpy/random/randomgen/xoshiro256starstar.pyx
index 33ecb50f1..c3856b6f7 100644
--- a/_randomgen/randomgen/xoshiro256starstar.pyx
+++ b/numpy/random/randomgen/xoshiro256starstar.pyx
@@ -1,31 +1,32 @@
-from __future__ import absolute_import
-
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
+
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
np.import_array()
cdef extern from "src/xoshiro256starstar/xoshiro256starstar.h":
struct s_xoshiro256starstar_state:
- uint64_t s[4]
- int has_uint32
- uint32_t uinteger
+ uint64_t s[4]
+ int has_uint32
+ uint32_t uinteger
ctypedef s_xoshiro256starstar_state xoshiro256starstar_state
uint64_t xoshiro256starstar_next64(xoshiro256starstar_state *state) nogil
uint32_t xoshiro256starstar_next32(xoshiro256starstar_state *state) nogil
- void xoshiro256starstar_jump(xoshiro256starstar_state *state)
+ void xoshiro256starstar_jump(xoshiro256starstar_state *state)
cdef uint64_t xoshiro256starstar_uint64(void* st) nogil:
return xoshiro256starstar_next64(<xoshiro256starstar_state *>st)
@@ -121,17 +122,19 @@ cdef class Xoshiro256StarStar:
.. [1] "xoroshiro+ / xorshift* / xorshift+ generators and the PRNG shootout",
http://xorshift.di.unimi.it/
"""
- cdef xoshiro256starstar_state *rng_state
+ cdef xoshiro256starstar_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None):
self.rng_state = <xoshiro256starstar_state *>malloc(sizeof(xoshiro256starstar_state))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &xoshiro256starstar_uint64
@@ -154,7 +157,8 @@ cdef class Xoshiro256StarStar:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -166,17 +170,39 @@ cdef class Xoshiro256StarStar:
self.rng_state.has_uint32 = 0
self.rng_state.uinteger = 0
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
+
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- """Private benchmark command"""
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None):
"""
@@ -198,7 +224,7 @@ cdef class Xoshiro256StarStar:
ValueError
If seed values are out of range for the PRNG.
"""
- ub = 2 ** 64
+ ub = 2 ** 64
if seed is None:
try:
state = random_entropy(8)
@@ -279,12 +305,12 @@ cdef class Xoshiro256StarStar:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -293,25 +319,10 @@ cdef class Xoshiro256StarStar:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&xoshiro256starstar_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xoshiro256starstar_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xoshiro256starstar_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -332,19 +343,8 @@ cdef class Xoshiro256StarStar:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -353,10 +353,10 @@ cdef class Xoshiro256StarStar:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the basic RNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/_randomgen/randomgen/xoshiro512starstar.pyx b/numpy/random/randomgen/xoshiro512starstar.pyx
index 17fe3c420..761d1f1d0 100644
--- a/_randomgen/randomgen/xoshiro512starstar.pyx
+++ b/numpy/random/randomgen/xoshiro512starstar.pyx
@@ -1,4 +1,7 @@
-from __future__ import absolute_import
+try:
+ from threading import Lock
+except ImportError:
+ from dummy_threading import Lock
from libc.stdlib cimport malloc, free
from cpython.pycapsule cimport PyCapsule_New
@@ -6,26 +9,24 @@ from cpython.pycapsule cimport PyCapsule_New
import numpy as np
cimport numpy as np
-from randomgen.common import interface
-from randomgen.common cimport *
-from randomgen.distributions cimport brng_t
-from randomgen.entropy import random_entropy, seed_by_array
-import randomgen.pickle
+from .common cimport *
+from .distributions cimport brng_t
+from .entropy import random_entropy, seed_by_array
np.import_array()
cdef extern from "src/xoshiro512starstar/xoshiro512starstar.h":
struct s_xoshiro512starstar_state:
- uint64_t s[8]
- int has_uint32
- uint32_t uinteger
+ uint64_t s[8]
+ int has_uint32
+ uint32_t uinteger
ctypedef s_xoshiro512starstar_state xoshiro512starstar_state
uint64_t xoshiro512starstar_next64(xoshiro512starstar_state *state) nogil
uint32_t xoshiro512starstar_next32(xoshiro512starstar_state *state) nogil
- void xoshiro512starstar_jump(xoshiro512starstar_state *state)
+ void xoshiro512starstar_jump(xoshiro512starstar_state *state)
cdef uint64_t xoshiro512starstar_uint64(void* st) nogil:
return xoshiro512starstar_next64(<xoshiro512starstar_state *>st)
@@ -121,17 +122,19 @@ cdef class Xoshiro512StarStar:
.. [1] "xoroshiro+ / xorshift* / xorshift+ generators and the PRNG shootout",
http://xorshift.di.unimi.it/
"""
- cdef xoshiro512starstar_state *rng_state
+ cdef xoshiro512starstar_state *rng_state
cdef brng_t *_brng
cdef public object capsule
cdef object _ctypes
cdef object _cffi
cdef object _generator
+ cdef public object lock
def __init__(self, seed=None):
self.rng_state = <xoshiro512starstar_state *>malloc(sizeof(xoshiro512starstar_state))
self._brng = <brng_t *>malloc(sizeof(brng_t))
self.seed(seed)
+ self.lock = Lock()
self._brng.state = <void *>self.rng_state
self._brng.next_uint64 = &xoshiro512starstar_uint64
@@ -154,7 +157,8 @@ cdef class Xoshiro512StarStar:
self.state = state
def __reduce__(self):
- return (randomgen.pickle.__brng_ctor,
+ from ._pickle import __brng_ctor
+ return (__brng_ctor,
(self.state['brng'],),
self.state)
@@ -166,17 +170,39 @@ cdef class Xoshiro512StarStar:
self.rng_state.has_uint32 = 0
self.rng_state.uinteger = 0
+ def random_raw(self, size=None, output=True):
+ """
+ random_raw(self, size=None)
+
+ Return randoms as generated by the underlying BasicRNG
+
+ Parameters
+ ----------
+ size : int or tuple of ints, optional
+ Output shape. If the given shape is, e.g., ``(m, n, k)``, then
+ ``m * n * k`` samples are drawn. Default is None, in which case a
+ single value is returned.
+ output : bool, optional
+ Output values. Used for performance testing since the generated
+ values are not returned.
+
+ Returns
+ -------
+ out : uint or ndarray
+ Drawn samples.
+
+ Notes
+ -----
+ This method directly exposes the the raw underlying pseudo-random
+ number generator. All values are returned as unsigned 64-bit
+ values irrespective of the number of bits produced by the PRNG.
+
+ See the class docstring for the number of bits returned.
+ """
+ return random_raw(self._brng, self.lock, size, output)
+
def _benchmark(self, Py_ssize_t cnt, method=u'uint64'):
- """Private benchmark command"""
- cdef Py_ssize_t i
- if method==u'uint64':
- for i in range(cnt):
- self._brng.next_uint64(self._brng.state)
- elif method==u'double':
- for i in range(cnt):
- self._brng.next_double(self._brng.state)
- else:
- raise ValueError('Unknown method')
+ return benchmark(self._brng, self.lock, cnt, method)
def seed(self, seed=None):
"""
@@ -198,7 +224,7 @@ cdef class Xoshiro512StarStar:
ValueError
If seed values are out of range for the PRNG.
"""
- ub = 2 ** 64
+ ub = 2 ** 64
if seed is None:
try:
state = random_entropy(16)
@@ -273,12 +299,12 @@ cdef class Xoshiro512StarStar:
@property
def ctypes(self):
"""
- Ctypes interface
+ ctypes interface
Returns
-------
interface : namedtuple
- Named tuple containing CFFI wrapper
+ Named tuple containing ctypes wrapper
* state_address - Memory address of the state struct
* state - pointer to the state struct
@@ -287,25 +313,10 @@ cdef class Xoshiro512StarStar:
* next_double - function pointer to produce doubles
* brng - pointer to the Basic RNG struct
"""
+ if self._ctypes is None:
+ self._ctypes = prepare_ctypes(self._brng)
- if self._ctypes is not None:
- return self._ctypes
-
- import ctypes
-
- self._ctypes = interface(<uintptr_t>self.rng_state,
- ctypes.c_void_p(<uintptr_t>self.rng_state),
- ctypes.cast(<uintptr_t>&xoshiro512starstar_uint64,
- ctypes.CFUNCTYPE(ctypes.c_uint64,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xoshiro512starstar_uint32,
- ctypes.CFUNCTYPE(ctypes.c_uint32,
- ctypes.c_void_p)),
- ctypes.cast(<uintptr_t>&xoshiro512starstar_double,
- ctypes.CFUNCTYPE(ctypes.c_double,
- ctypes.c_void_p)),
- ctypes.c_void_p(<uintptr_t>self._brng))
- return self.ctypes
+ return self._ctypes
@property
def cffi(self):
@@ -326,19 +337,8 @@ cdef class Xoshiro512StarStar:
"""
if self._cffi is not None:
return self._cffi
- try:
- import cffi
- except ImportError:
- raise ImportError('cffi is cannot be imported.')
-
- ffi = cffi.FFI()
- self._cffi = interface(<uintptr_t>self.rng_state,
- ffi.cast('void *',<uintptr_t>self.rng_state),
- ffi.cast('uint64_t (*)(void *)',<uintptr_t>self._brng.next_uint64),
- ffi.cast('uint32_t (*)(void *)',<uintptr_t>self._brng.next_uint32),
- ffi.cast('double (*)(void *)',<uintptr_t>self._brng.next_double),
- ffi.cast('void *',<uintptr_t>self._brng))
- return self.cffi
+ self._cffi = prepare_cffi(self._brng)
+ return self._cffi
@property
def generator(self):
@@ -347,10 +347,10 @@ cdef class Xoshiro512StarStar:
Returns
-------
- gen : randomgen.generator.RandomGenerator
+ gen : numpy.random.randomgen.generator.RandomGenerator
Random generator used this instance as the basic RNG
"""
if self._generator is None:
from .generator import RandomGenerator
self._generator = RandomGenerator(self)
- return self._generator \ No newline at end of file
+ return self._generator
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index 394a70ead..481c4e380 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -2,6 +2,9 @@ from __future__ import division, print_function
from os.path import join
import sys
+import os
+import platform
+import struct
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
@@ -16,6 +19,7 @@ def needs_mingw_ftime_workaround():
return False
+
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
@@ -40,6 +44,7 @@ def configuration(parent_package='',top_path=None):
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = []
+ defs.append(('NPY_NO_DEPRECATED_API', 0))
# Configure mtrand
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
@@ -55,9 +60,9 @@ def configuration(parent_package='',top_path=None):
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
+ config.add_subpackage('randomgen')
return config
-
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
diff --git a/setup.py b/setup.py
index e88723820..d7fa3f544 100755
--- a/setup.py
+++ b/setup.py
@@ -225,12 +225,13 @@ class sdist_checked(sdist):
def generate_cython():
cwd = os.path.abspath(os.path.dirname(__file__))
print("Cythonizing sources")
- p = subprocess.call([sys.executable,
- os.path.join(cwd, 'tools', 'cythonize.py'),
- 'numpy/random'],
- cwd=cwd)
- if p != 0:
- raise RuntimeError("Running cythonize failed!")
+ for d in ('mtrand', 'randomgen', 'randomgen/legacy'):
+ p = subprocess.call([sys.executable,
+ os.path.join(cwd, 'tools', 'cythonize.py'),
+ 'numpy/random/{0}'.format(d)],
+ cwd=cwd)
+ if p != 0:
+ raise RuntimeError("Running cythonize failed!")
def parse_setuppy_commands():
diff --git a/tools/cythonize.py b/tools/cythonize.py
index 9e2af840d..c81b72d25 100755
--- a/tools/cythonize.py
+++ b/tools/cythonize.py
@@ -52,7 +52,7 @@ except NameError:
# Rules
#
def process_pyx(fromfile, tofile):
- flags = ['--fast-fail']
+ flags = ['-3', '--fast-fail']
if tofile.endswith('.cxx'):
flags += ['--cplus']
@@ -99,6 +99,17 @@ def process_tempita_pyx(fromfile, tofile):
process_pyx(pyxfile, tofile)
+def process_tempita_pyd(fromfile, tofile):
+ import npy_tempita as tempita
+
+ assert fromfile.endswith('.pxd.in')
+ assert tofile.endswith('.pxd')
+ with open(fromfile, "r") as f:
+ tmpl = f.read()
+ pyxcontent = tempita.sub(tmpl)
+ with open(tofile, "w") as f:
+ f.write(pyxcontent)
+
def process_tempita_pxi(fromfile, tofile):
import npy_tempita as tempita
@@ -110,10 +121,24 @@ def process_tempita_pxi(fromfile, tofile):
with open(tofile, "w") as f:
f.write(pyxcontent)
+def process_tempita_pxd(fromfile, tofile):
+ import npy_tempita as tempita
+
+ assert fromfile.endswith('.pxd.in')
+ assert tofile.endswith('.pxd')
+ with open(fromfile, "r") as f:
+ tmpl = f.read()
+ pyxcontent = tempita.sub(tmpl)
+ with open(tofile, "w") as f:
+ f.write(pyxcontent)
+
rules = {
- # fromext : function
- '.pyx' : process_pyx,
- '.pyx.in' : process_tempita_pyx
+ # fromext : function, toext
+ '.pyx' : (process_pyx, '.c'),
+ '.pyx.in' : (process_tempita_pyx, '.c'),
+ '.pxi.in' : (process_tempita_pxi, '.pxi'),
+ '.pxd.in' : (process_tempita_pxd, '.pxd'),
+ '.pyd.in' : (process_tempita_pyd, '.pyd'),
}
#
# Hash db
@@ -179,38 +204,32 @@ def process(path, fromfile, tofile, processor_function, hash_db):
def find_process_files(root_dir):
hash_db = load_hashes(HASH_FILE)
- for cur_dir, dirs, files in os.walk(root_dir):
- # .pxi or .pxi.in files are most likely dependencies for
- # .pyx files, so we need to process them first
- files.sort(key=lambda name: (name.endswith('.pxi') or
- name.endswith('.pxi.in')),
- reverse=True)
-
- for filename in files:
- in_file = os.path.join(cur_dir, filename + ".in")
- if filename.endswith('.pyx') and os.path.isfile(in_file):
- continue
- elif filename.endswith('.pxi.in'):
- toext = '.pxi'
- fromext = '.pxi.in'
+ files = [x for x in os.listdir(root_dir) if not os.path.isdir(x)]
+ # .pxi or .pxi.in files are most likely dependencies for
+ # .pyx files, so we need to process them first
+ files.sort(key=lambda name: (name.endswith('.pxi') or
+ name.endswith('.pxi.in') or
+ name.endswith('.pxd.in')),
+ reverse=True)
+
+ for filename in files:
+ in_file = os.path.join(root_dir, filename + ".in")
+ for fromext, value in rules.items():
+ if filename.endswith(fromext):
+ if not value:
+ break
+ function, toext = value
+ if toext == '.c':
+ with open(os.path.join(root_dir, filename), 'rb') as f:
+ data = f.read()
+ m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
+ if m:
+ toext = ".cxx"
fromfile = filename
- function = process_tempita_pxi
tofile = filename[:-len(fromext)] + toext
- process(cur_dir, fromfile, tofile, function, hash_db)
+ process(root_dir, fromfile, tofile, function, hash_db)
save_hashes(hash_db, HASH_FILE)
- else:
- for fromext, function in rules.items():
- if filename.endswith(fromext):
- toext = ".c"
- with open(os.path.join(cur_dir, filename), 'rb') as f:
- data = f.read()
- m = re.search(br"^\s*#\s*distutils:\s*language\s*=\s*c\+\+\s*$", data, re.I|re.M)
- if m:
- toext = ".cxx"
- fromfile = filename
- tofile = filename[:-len(fromext)] + toext
- process(cur_dir, fromfile, tofile, function, hash_db)
- save_hashes(hash_db, HASH_FILE)
+ break
def main():
try: