summaryrefslogtreecommitdiff
path: root/numpy
diff options
context:
space:
mode:
Diffstat (limited to 'numpy')
-rw-r--r--numpy/__init__.py10
-rw-r--r--numpy/compat/py3k.py12
-rw-r--r--numpy/conftest.py18
-rw-r--r--numpy/core/_add_newdocs.py33
-rw-r--r--numpy/core/code_generators/cversions.txt2
-rw-r--r--numpy/core/code_generators/genapi.py8
-rw-r--r--numpy/core/code_generators/generate_umath.py27
-rw-r--r--numpy/core/fromnumeric.py42
-rw-r--r--numpy/core/function_base.py5
-rw-r--r--numpy/core/include/numpy/numpyconfig.h3
-rw-r--r--numpy/core/include/numpy/random/bitgen.h (renamed from numpy/random/include/bitgen.h)0
-rw-r--r--numpy/core/include/numpy/random/distributions.h (renamed from numpy/random/include/distributions.h)4
-rw-r--r--numpy/core/numerictypes.py2
-rw-r--r--numpy/core/setup.py17
-rw-r--r--numpy/core/setup_common.py3
-rw-r--r--numpy/core/shape_base.py2
-rw-r--r--numpy/core/src/common/cblasfuncs.c86
-rw-r--r--numpy/core/src/common/npy_cblas.h556
-rw-r--r--numpy/core/src/common/npy_cblas64_.h31
-rw-r--r--numpy/core/src/common/npy_cblas_base.h557
-rw-r--r--numpy/core/src/common/python_xerbla.c9
-rw-r--r--numpy/core/src/multiarray/arrayobject.c2
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src110
-rw-r--r--numpy/core/src/multiarray/buffer.c2
-rw-r--r--numpy/core/src/multiarray/common.c2
-rw-r--r--numpy/core/src/multiarray/conversion_utils.c2
-rw-r--r--numpy/core/src/multiarray/convert_datatype.c8
-rw-r--r--numpy/core/src/multiarray/ctors.c186
-rw-r--r--numpy/core/src/multiarray/descriptor.c2
-rw-r--r--numpy/core/src/multiarray/getset.c2
-rw-r--r--numpy/core/src/multiarray/multiarraymodule.c8
-rw-r--r--numpy/core/src/multiarray/npy_buffer.h (renamed from numpy/core/src/multiarray/buffer.h)0
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src9
-rw-r--r--numpy/core/src/npysort/npysort_common.h8
-rw-r--r--numpy/core/src/umath/loops.c.src312
-rw-r--r--numpy/core/src/umath/loops.h.src12
-rw-r--r--numpy/core/tests/test_api.py2
-rw-r--r--numpy/core/tests/test_datetime.py78
-rw-r--r--numpy/core/tests/test_deprecations.py2
-rw-r--r--numpy/core/tests/test_dtype.py4
-rw-r--r--numpy/core/tests/test_function_base.py20
-rw-r--r--numpy/core/tests/test_longdouble.py114
-rw-r--r--numpy/core/tests/test_multiarray.py49
-rw-r--r--numpy/core/tests/test_nditer.py2
-rw-r--r--numpy/core/tests/test_numeric.py7
-rw-r--r--numpy/core/tests/test_regression.py26
-rw-r--r--numpy/core/tests/test_scalarinherit.py5
-rw-r--r--numpy/core/tests/test_scalarmath.py12
-rw-r--r--numpy/core/tests/test_ufunc.py3
-rw-r--r--numpy/core/tests/test_umath.py6
-rw-r--r--numpy/core/tests/test_umath_accuracy.py2
-rw-r--r--numpy/distutils/misc_util.py7
-rw-r--r--numpy/distutils/system_info.py131
-rw-r--r--numpy/doc/basics.py4
-rw-r--r--numpy/fft/__init__.py8
-rw-r--r--numpy/lib/format.py14
-rw-r--r--numpy/lib/function_base.py2
-rw-r--r--numpy/lib/nanfunctions.py2
-rw-r--r--numpy/lib/npyio.py8
-rw-r--r--numpy/lib/shape_base.py67
-rw-r--r--numpy/lib/tests/test_format.py30
-rw-r--r--numpy/lib/tests/test_function_base.py4
-rw-r--r--numpy/lib/tests/test_io.py33
-rw-r--r--numpy/lib/tests/test_shape_base.py24
-rw-r--r--numpy/lib/tests/test_ufunclike.py4
-rw-r--r--numpy/lib/type_check.py10
-rw-r--r--numpy/linalg/lapack_lite/python_xerbla.c9
-rw-r--r--numpy/linalg/lapack_litemodule.c217
-rw-r--r--numpy/linalg/linalg.py88
-rw-r--r--numpy/linalg/setup.py7
-rw-r--r--numpy/linalg/tests/test_linalg.py16
-rw-r--r--numpy/linalg/umath_linalg.c.src541
-rw-r--r--numpy/ma/core.py41
-rw-r--r--numpy/ma/extras.py16
-rw-r--r--numpy/ma/tests/test_extras.py13
-rw-r--r--numpy/random/.gitignore3
-rw-r--r--numpy/random/__init__.pxd14
-rw-r--r--numpy/random/_bit_generator.pxd2
-rw-r--r--numpy/random/_bounded_integers.pxd29
-rw-r--r--numpy/random/_bounded_integers.pxd.in2
-rw-r--r--numpy/random/_bounded_integers.pyx1564
-rw-r--r--numpy/random/_bounded_integers.pyx.in18
-rw-r--r--numpy/random/_common.pxd2
-rw-r--r--numpy/random/_examples/cffi/extending.py72
-rw-r--r--numpy/random/_examples/cython/extending.pyx (renamed from numpy/random/examples/cython/extending.pyx)4
-rw-r--r--numpy/random/_examples/cython/extending_distributions.pyx (renamed from numpy/random/examples/cython/extending_distributions.pyx)49
-rw-r--r--numpy/random/_examples/cython/setup.py31
-rw-r--r--numpy/random/_examples/numba/extending.py84
-rw-r--r--numpy/random/_examples/numba/extending_distributions.py (renamed from numpy/random/examples/numba/extending_distributions.py)22
-rw-r--r--numpy/random/_generator.pyx89
-rw-r--r--numpy/random/_mt19937.pyx2
-rw-r--r--numpy/random/_pcg64.pyx2
-rw-r--r--numpy/random/_philox.pyx2
-rw-r--r--numpy/random/_sfc64.pyx2
-rw-r--r--numpy/random/examples/cython/setup.py27
-rw-r--r--numpy/random/examples/numba/extending.py77
-rw-r--r--numpy/random/include/legacy-distributions.h2
-rw-r--r--numpy/random/mtrand.pyx309
-rw-r--r--numpy/random/setup.py13
-rw-r--r--numpy/random/src/distributions/distributions.c2
-rw-r--r--numpy/random/src/distributions/random_hypergeometric.c2
-rw-r--r--numpy/random/src/distributions/random_mvhg_count.c2
-rw-r--r--numpy/random/src/distributions/random_mvhg_marginals.c2
-rw-r--r--numpy/random/tests/test_direct.py7
-rw-r--r--numpy/random/tests/test_extending.py51
-rw-r--r--numpy/random/tests/test_generator_mt19937.py37
-rw-r--r--numpy/random/tests/test_random.py2
-rw-r--r--numpy/random/tests/test_randomstate.py6
-rw-r--r--numpy/random/tests/test_smoke.py8
-rw-r--r--numpy/testing/_private/decorators.py4
-rw-r--r--numpy/testing/_private/utils.py143
-rw-r--r--numpy/tests/test_public_api.py2
112 files changed, 3019 insertions, 3390 deletions
diff --git a/numpy/__init__.py b/numpy/__init__.py
index fef8245de..349914b2f 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -158,6 +158,7 @@ else:
# Make these accessible from numpy name-space
# but not imported in from numpy import *
+ # TODO[gh-6103]: Deprecate these
if sys.version_info[0] >= 3:
from builtins import bool, int, float, complex, object, str
unicode = str
@@ -168,14 +169,17 @@ else:
# now that numpy modules are imported, can initialize limits
core.getlimits._register_known_types()
- __all__.extend(['bool', 'int', 'float', 'complex', 'object', 'unicode',
- 'str'])
__all__.extend(['__version__', 'show_config'])
__all__.extend(core.__all__)
__all__.extend(_mat.__all__)
__all__.extend(lib.__all__)
__all__.extend(['linalg', 'fft', 'random', 'ctypeslib', 'ma'])
+ # These are added by `from .core import *` and `core.__all__`, but we
+ # overwrite them above with builtins we do _not_ want to export.
+ __all__.remove('long')
+ __all__.remove('unicode')
+
# Remove things that are in the numpy.lib but not in the numpy namespace
# Note that there is a test (numpy/tests/test_public_api.py:test_numpy_namespace)
# that prevents adding more things to the main namespace by accident.
@@ -216,7 +220,7 @@ else:
"{!r}".format(__name__, attr))
def __dir__():
- return __all__ + ['Tester', 'testing']
+ return list(globals().keys()) + ['Tester', 'testing']
else:
# We don't actually use this ourselves anymore, but I'm not 100% sure that
diff --git a/numpy/compat/py3k.py b/numpy/compat/py3k.py
index c9ed9d52c..90e17d6d6 100644
--- a/numpy/compat/py3k.py
+++ b/numpy/compat/py3k.py
@@ -1,9 +1,15 @@
"""
-Python 3 compatibility tools.
+Python 3.X compatibility tools.
-"""
-from __future__ import division, absolute_import, print_function
+While this file was originally intented for Python 2 -> 3 transition,
+it is now used to create a compatibility layer between different
+minor versions of Python 3.
+While the active version of numpy may not support a given version of python, we
+allow downstream libraries to continue to use these shims for forward
+compatibility with numpy while they transition their code to newer versions of
+Python.
+"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1', 'long', 'basestring', 'sixu',
diff --git a/numpy/conftest.py b/numpy/conftest.py
index 18d5d1ce9..1baf4adda 100644
--- a/numpy/conftest.py
+++ b/numpy/conftest.py
@@ -3,6 +3,8 @@ Pytest configuration and fixtures for the Numpy test suite.
"""
from __future__ import division, absolute_import, print_function
+import os
+
import pytest
import numpy
@@ -22,6 +24,22 @@ def pytest_configure(config):
"slow: Tests that are very slow.")
+def pytest_addoption(parser):
+ parser.addoption("--available-memory", action="store", default=None,
+ help=("Set amount of memory available for running the "
+ "test suite. This can result to tests requiring "
+ "especially large amounts of memory to be skipped. "
+ "Equivalent to setting environment variable "
+ "NPY_AVAILABLE_MEM. Default: determined"
+ "automatically."))
+
+
+def pytest_sessionstart(session):
+ available_mem = session.config.getoption('available_memory')
+ if available_mem is not None:
+ os.environ['NPY_AVAILABLE_MEM'] = available_mem
+
+
#FIXME when yield tests are gone.
@pytest.hookimpl()
def pytest_itemcollected(item):
diff --git a/numpy/core/_add_newdocs.py b/numpy/core/_add_newdocs.py
index bd309f4a5..2f1273904 100644
--- a/numpy/core/_add_newdocs.py
+++ b/numpy/core/_add_newdocs.py
@@ -1036,7 +1036,12 @@ add_newdoc('numpy.core.multiarray', 'fromstring',
A string containing the data.
dtype : data-type, optional
The data type of the array; default: float. For binary input data,
- the data must be in exactly this format.
+ the data must be in exactly this format. Most builtin numeric types are
+ supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
count : int, optional
Read this number of `dtype` elements from the data. If this is
negative (the default), the count will be determined from the
@@ -1172,6 +1177,11 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Data type of the returned array.
For binary files, it is used to determine the size and byte-order
of the items in the file.
+ Most builtin numeric types are supported and extension types may be supported.
+
+ .. versionadded:: 1.18.0
+ Complex dtypes.
+
count : int
Number of items to read. ``-1`` means all items (i.e., the complete
file).
@@ -1196,7 +1206,7 @@ add_newdoc('numpy.core.multiarray', 'fromfile',
Notes
-----
Do not rely on the combination of `tofile` and `fromfile` for
- data storage, as the binary files generated are are not platform
+ data storage, as the binary files generated are not platform
independent. In particular, no byte-order or data-type information is
saved. Data can be stored in the platform independent ``.npy`` format
using `save` and `load` instead.
@@ -3943,15 +3953,22 @@ add_newdoc('numpy.core.multiarray', 'ndarray', ('tolist',
Examples
--------
- For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``:
+ For a 1D array, ``a.tolist()`` is almost the same as ``list(a)``,
+ except that ``tolist`` changes numpy scalars to Python scalars:
- >>> a = np.array([1, 2])
- >>> list(a)
+ >>> a = np.uint32([1, 2])
+ >>> a_list = list(a)
+ >>> a_list
[1, 2]
- >>> a.tolist()
+ >>> type(a_list[0])
+ <class 'numpy.uint32'>
+ >>> a_tolist = a.tolist()
+ >>> a_tolist
[1, 2]
+ >>> type(a_tolist[0])
+ <class 'int'>
- However, for a 2D array, ``tolist`` applies recursively:
+ Additionally, for a 2D array, ``tolist`` applies recursively:
>>> a = np.array([[1, 2], [3, 4]])
>>> list(a)
@@ -4236,7 +4253,7 @@ add_newdoc('numpy.core.umath', 'frompyfunc',
See Also
--------
- vectorize : evaluates pyfunc over input arrays using broadcasting rules of numpy
+ vectorize : Evaluates pyfunc over input arrays using broadcasting rules of numpy.
Notes
-----
diff --git a/numpy/core/code_generators/cversions.txt b/numpy/core/code_generators/cversions.txt
index 00f10df57..72d2af8b9 100644
--- a/numpy/core/code_generators/cversions.txt
+++ b/numpy/core/code_generators/cversions.txt
@@ -47,4 +47,6 @@
# Deprecate PyArray_SetNumericOps and PyArray_GetNumericOps,
# Add fields core_dim_flags and core_dim_sizes to PyUFuncObject.
# Add PyUFunc_FromFuncAndDataAndSignatureAndIdentity to ufunc_funcs_api.
+# Version 13 (NumPy 1.17) No change.
+# Version 13 (NumPy 1.18) No change.
0x0000000d = 5b0e8bbded00b166125974fc71e80a33
diff --git a/numpy/core/code_generators/genapi.py b/numpy/core/code_generators/genapi.py
index 7336e5e13..22afa0320 100644
--- a/numpy/core/code_generators/genapi.py
+++ b/numpy/core/code_generators/genapi.py
@@ -8,8 +8,11 @@ specified.
"""
from __future__ import division, absolute_import, print_function
+from numpy.distutils.conv_template import process_file as process_c_file
+
import sys, os, re
import hashlib
+import io
import textwrap
@@ -215,7 +218,10 @@ def find_functions(filename, tag='API'):
This function does foo...
*/
"""
- fo = open(filename, 'r')
+ if filename.endswith(('.c.src', '.h.src')):
+ fo = io.StringIO(process_c_file(filename))
+ else:
+ fo = open(filename, 'r')
functions = []
return_type = None
function_name = None
diff --git a/numpy/core/code_generators/generate_umath.py b/numpy/core/code_generators/generate_umath.py
index e0b6a654c..6d76f7ca2 100644
--- a/numpy/core/code_generators/generate_umath.py
+++ b/numpy/core/code_generators/generate_umath.py
@@ -226,7 +226,9 @@ chartoname = {
'P': 'OBJECT',
}
-all = '?bBhHiIlLqQefdgFDGOMm'
+noobj = '?bBhHiIlLqQefdgFDGmM'
+all = '?bBhHiIlLqQefdgFDGOmM'
+
O = 'O'
P = 'P'
ints = 'bBhHiIlLqQ'
@@ -246,10 +248,8 @@ inexactvec = 'fd'
noint = inexact+O
nointP = inexact+P
allP = bints+times+flts+cmplxP
-nobool = all[1:]
-noobj = all[:-3]+all[-2:]
-nobool_or_obj = all[1:-3]+all[-2:]
-nobool_or_datetime = all[1:-2]+all[-1:]
+nobool_or_obj = noobj[1:]
+nobool_or_datetime = noobj[1:-1] + O # includes m - timedelta64
intflt = ints+flts
intfltcmplx = ints+flts+cmplx
nocmplx = bints+times+flts
@@ -433,6 +433,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'greater_equal':
Ufunc(2, 1, None,
@@ -440,6 +441,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'less':
Ufunc(2, 1, None,
@@ -447,6 +449,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'less_equal':
Ufunc(2, 1, None,
@@ -454,6 +457,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'equal':
Ufunc(2, 1, None,
@@ -461,6 +465,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'not_equal':
Ufunc(2, 1, None,
@@ -468,6 +473,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(all, out='?', simd=[('avx2', ints)]),
[TypeDescription('O', FullTypeDescr, 'OO', 'O')],
+ TD('O', out='?'),
),
'logical_and':
Ufunc(2, 1, True_,
@@ -475,6 +481,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalAnd'),
+ TD(O, f='npy_ObjectLogicalAnd', out='?'),
),
'logical_not':
Ufunc(1, 1, None,
@@ -482,6 +489,7 @@ defdict = {
None,
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalNot'),
+ TD(O, f='npy_ObjectLogicalNot', out='?'),
),
'logical_or':
Ufunc(2, 1, False_,
@@ -489,6 +497,7 @@ defdict = {
'PyUFunc_SimpleBinaryComparisonTypeResolver',
TD(nodatetime_or_obj, out='?', simd=[('avx2', ints)]),
TD(O, f='npy_ObjectLogicalOr'),
+ TD(O, f='npy_ObjectLogicalOr', out='?'),
),
'logical_xor':
Ufunc(2, 1, False_,
@@ -849,8 +858,8 @@ defdict = {
'isnan':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isnan'),
- None,
- TD(nodatetime_or_obj, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'isnat':
Ufunc(1, 1, None,
@@ -861,8 +870,8 @@ defdict = {
'isinf':
Ufunc(1, 1, None,
docstrings.get('numpy.core.umath.isinf'),
- None,
- TD(nodatetime_or_obj, out='?'),
+ 'PyUFunc_IsFiniteTypeResolver',
+ TD(noobj, out='?'),
),
'isfinite':
Ufunc(1, 1, None,
diff --git a/numpy/core/fromnumeric.py b/numpy/core/fromnumeric.py
index 5f7716455..d454480a8 100644
--- a/numpy/core/fromnumeric.py
+++ b/numpy/core/fromnumeric.py
@@ -796,7 +796,9 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
--------
partition : Describes partition algorithms used.
ndarray.partition : Inplace partition.
- argsort : Full indirect sort
+ argsort : Full indirect sort.
+ take_along_axis : Apply ``index_array`` from argpartition
+ to an array as if by calling partition.
Notes
-----
@@ -816,6 +818,14 @@ def argpartition(a, kth, axis=-1, kind='introselect', order=None):
>>> np.array(x)[np.argpartition(x, 3)]
array([2, 1, 3, 4])
+ Multi-dimensional array:
+
+ >>> x = np.array([[3, 4, 2], [1, 3, 1]])
+ >>> index_array = np.argpartition(x, kth=1, axis=-1)
+ >>> np.take_along_axis(x, index_array, axis=-1) # same as np.partition(x, kth=1)
+ array([[2, 3, 4],
+ [1, 1, 3]])
+
"""
return _wrapfunc(a, 'argpartition', kth, axis=axis, kind=kind, order=order)
@@ -934,6 +944,10 @@ def sort(a, axis=-1, kind=None, order=None):
'mergesort' and 'stable' are mapped to radix sort for integer data types. Radix sort is an
O(n) sort instead of O(n log n).
+ .. versionchanged:: 1.17.0
+
+ NaT now sorts to the end of arrays for consistency with NaN.
+
Examples
--------
>>> a = np.array([[1,4],[3,1]])
@@ -1025,6 +1039,8 @@ def argsort(a, axis=-1, kind=None, order=None):
lexsort : Indirect stable sort with multiple keys.
ndarray.sort : Inplace sort.
argpartition : Indirect partial sort.
+ take_along_axis : Apply ``index_array`` from argsort
+ to an array as if by calling sort.
Notes
-----
@@ -1120,6 +1136,8 @@ def argmax(a, axis=None, out=None):
ndarray.argmax, argmin
amax : The maximum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmax to an array as if by calling max.
Notes
-----
@@ -1154,6 +1172,16 @@ def argmax(a, axis=None, out=None):
>>> np.argmax(b) # Only the first occurrence is returned.
1
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmax(x, axis=-1)
+ >>> # Same as np.max(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[4],
+ [3]])
+ >>> # Same as np.max(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([4, 3])
+
"""
return _wrapfunc(a, 'argmax', axis=axis, out=out)
@@ -1189,6 +1217,8 @@ def argmin(a, axis=None, out=None):
ndarray.argmin, argmax
amin : The minimum value along a given axis.
unravel_index : Convert a flat index into an index tuple.
+ take_along_axis : Apply ``np.expand_dims(index_array, axis)``
+ from argmin to an array as if by calling min.
Notes
-----
@@ -1223,6 +1253,16 @@ def argmin(a, axis=None, out=None):
>>> np.argmin(b) # Only the first occurrence is returned.
0
+ >>> x = np.array([[4,2,3], [1,0,3]])
+ >>> index_array = np.argmin(x, axis=-1)
+ >>> # Same as np.min(x, axis=-1, keepdims=True)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1)
+ array([[2],
+ [0]])
+ >>> # Same as np.max(x, axis=-1)
+ >>> np.take_along_axis(x, np.expand_dims(index_array, axis=-1), axis=-1).squeeze(axis=-1)
+ array([2, 0])
+
"""
return _wrapfunc(a, 'argmin', axis=axis, out=out)
diff --git a/numpy/core/function_base.py b/numpy/core/function_base.py
index 42604ec3f..538ac8b84 100644
--- a/numpy/core/function_base.py
+++ b/numpy/core/function_base.py
@@ -139,7 +139,7 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
# from overriding what class is produced, and thus prevents, e.g. use of Quantities,
# see gh-7142. Hence, we multiply in place only for standard scalar types.
_mult_inplace = _nx.isscalar(delta)
- if num > 1:
+ if div > 0:
step = delta / div
if _nx.any(step == 0):
# Special handling for denormal numbers, gh-5437
@@ -154,7 +154,8 @@ def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None,
else:
y = y * step
else:
- # 0 and 1 item long sequences have an undefined step
+ # sequences with 0 items or 1 item with endpoint=True (i.e. div <= 0)
+ # have an undefined step
step = NaN
# Multiply with delta to allow possible override of output class.
y = y * delta
diff --git a/numpy/core/include/numpy/numpyconfig.h b/numpy/core/include/numpy/numpyconfig.h
index ab198f36b..4bca82f9f 100644
--- a/numpy/core/include/numpy/numpyconfig.h
+++ b/numpy/core/include/numpy/numpyconfig.h
@@ -37,5 +37,8 @@
#define NPY_1_13_API_VERSION 0x00000008
#define NPY_1_14_API_VERSION 0x00000008
#define NPY_1_15_API_VERSION 0x00000008
+#define NPY_1_16_API_VERSION 0x00000008
+#define NPY_1_17_API_VERSION 0x00000008
+#define NPY_1_18_API_VERSION 0x00000008
#endif
diff --git a/numpy/random/include/bitgen.h b/numpy/core/include/numpy/random/bitgen.h
index 83c2858dd..83c2858dd 100644
--- a/numpy/random/include/bitgen.h
+++ b/numpy/core/include/numpy/random/bitgen.h
diff --git a/numpy/random/include/distributions.h b/numpy/core/include/numpy/random/distributions.h
index c02ea605e..1a8e44e40 100644
--- a/numpy/random/include/distributions.h
+++ b/numpy/core/include/numpy/random/distributions.h
@@ -8,7 +8,7 @@
#include <stdint.h>
#include "numpy/npy_math.h"
-#include "include/bitgen.h"
+#include "numpy/random/bitgen.h"
/*
* RAND_INT_TYPE is used to share integer generators with RandomState which
@@ -24,7 +24,7 @@
#define RAND_INT_MAX INT64_MAX
#endif
-#ifdef DLL_EXPORT
+#ifdef _MSC_VER
#define DECLDIR __declspec(dllexport)
#else
#define DECLDIR extern
diff --git a/numpy/core/numerictypes.py b/numpy/core/numerictypes.py
index ab1ff65a4..761c7087c 100644
--- a/numpy/core/numerictypes.py
+++ b/numpy/core/numerictypes.py
@@ -485,7 +485,7 @@ def sctype2char(sctype):
Examples
--------
- >>> for sctype in [np.int32, np.double, np.complex, np.string_, np.ndarray]:
+ >>> for sctype in [np.int32, np.double, np.complex_, np.string_, np.ndarray]:
... print(np.sctype2char(sctype))
l # may vary
d
diff --git a/numpy/core/setup.py b/numpy/core/setup.py
index a33318472..5bbb1c622 100644
--- a/numpy/core/setup.py
+++ b/numpy/core/setup.py
@@ -91,6 +91,9 @@ def is_npy_no_smp():
# block.
return 'NPY_NOSMP' in os.environ
+def is_npy_use_blas64_():
+ return (os.environ.get('NPY_USE_BLAS64_', "0") != "0")
+
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
@@ -394,7 +397,7 @@ def visibility_define(config):
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
- from numpy.distutils.system_info import get_info
+ from numpy.distutils.system_info import get_info, dict_append
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
@@ -753,8 +756,14 @@ def configuration(parent_package='',top_path=None):
join('src', 'common', 'numpyos.c'),
]
- blas_info = get_info('blas_opt', 0)
- if blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', []):
+ if is_npy_use_blas64_():
+ blas_info = get_info('blas64__opt', 2)
+ have_blas = blas_info and ('HAVE_CBLAS64_', None) in blas_info.get('define_macros', [])
+ else:
+ blas_info = get_info('blas_opt', 0)
+ have_blas = blas_info and ('HAVE_CBLAS', None) in blas_info.get('define_macros', [])
+
+ if have_blas:
extra_info = blas_info
# These files are also in MANIFEST.in so that they are always in
# the source distribution independently of HAVE_CBLAS.
@@ -774,7 +783,7 @@ def configuration(parent_package='',top_path=None):
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'arrayfunction_override.h'),
- join('src', 'multiarray', 'buffer.h'),
+ join('src', 'multiarray', 'npy_buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
diff --git a/numpy/core/setup_common.py b/numpy/core/setup_common.py
index 84b78b585..6356f08ba 100644
--- a/numpy/core/setup_common.py
+++ b/numpy/core/setup_common.py
@@ -266,8 +266,9 @@ def check_long_double_representation(cmd):
except ValueError:
# try linking to support CC="gcc -flto" or icc -ipo
# struct needs to be volatile so it isn't optimized away
+ # additionally "clang -flto" requires the foo struct to be used
body = body.replace('struct', 'volatile struct')
- body += "int main(void) { return 0; }\n"
+ body += "int main(void) { return foo.before[0]; }\n"
src, obj = cmd._compile(body, None, None, 'c')
cmd.temp_files.append("_configtest")
cmd.compiler.link_executable([obj], "_configtest")
diff --git a/numpy/core/shape_base.py b/numpy/core/shape_base.py
index 369d956fb..31b1c20b9 100644
--- a/numpy/core/shape_base.py
+++ b/numpy/core/shape_base.py
@@ -575,7 +575,7 @@ def _concatenate_shapes(shapes, axis):
that was computed deeper in the recursion.
These are returned as tuples to ensure that they can quickly be added
- to existing slice tuple without creating a new tuple everytime.
+ to existing slice tuple without creating a new tuple every time.
"""
# Cache a result that will be reused.
diff --git a/numpy/core/src/common/cblasfuncs.c b/numpy/core/src/common/cblasfuncs.c
index 39572fed4..14d13a6c7 100644
--- a/numpy/core/src/common/cblasfuncs.c
+++ b/numpy/core/src/common/cblasfuncs.c
@@ -10,10 +10,20 @@
#include <assert.h>
#include <numpy/arrayobject.h>
#include "npy_cblas.h"
+#include "npy_cblas64_.h"
#include "arraytypes.h"
#include "common.h"
+/*
+ * If 64-bit CBLAS with symbol suffix '64_' is available, use it.
+ */
+#ifdef HAVE_CBLAS64_
+#define CBLAS_FUNC(name) name ## 64_
+#else
+#define CBLAS_FUNC(name) name
+#endif
+
static const double oneD[2] = {1.0, 0.0}, zeroD[2] = {0.0, 0.0};
static const float oneF[2] = {1.0, 0.0}, zeroF[2] = {0.0, 0.0};
@@ -24,28 +34,28 @@ static const float oneF[2] = {1.0, 0.0}, zeroF[2] = {0.0, 0.0};
static void
gemm(int typenum, enum CBLAS_ORDER order,
enum CBLAS_TRANSPOSE transA, enum CBLAS_TRANSPOSE transB,
- int m, int n, int k,
- PyArrayObject *A, int lda, PyArrayObject *B, int ldb, PyArrayObject *R)
+ npy_intp m, npy_intp n, npy_intp k,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *B, npy_intp ldb, PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A), *Bdata = PyArray_DATA(B);
void *Rdata = PyArray_DATA(R);
- int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+ npy_intp ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
switch (typenum) {
case NPY_DOUBLE:
- cblas_dgemm(order, transA, transB, m, n, k, 1.,
+ CBLAS_FUNC(cblas_dgemm)(order, transA, transB, m, n, k, 1.,
Adata, lda, Bdata, ldb, 0., Rdata, ldc);
break;
case NPY_FLOAT:
- cblas_sgemm(order, transA, transB, m, n, k, 1.f,
+ CBLAS_FUNC(cblas_sgemm)(order, transA, transB, m, n, k, 1.f,
Adata, lda, Bdata, ldb, 0.f, Rdata, ldc);
break;
case NPY_CDOUBLE:
- cblas_zgemm(order, transA, transB, m, n, k, oneD,
+ CBLAS_FUNC(cblas_zgemm)(order, transA, transB, m, n, k, oneD,
Adata, lda, Bdata, ldb, zeroD, Rdata, ldc);
break;
case NPY_CFLOAT:
- cblas_cgemm(order, transA, transB, m, n, k, oneF,
+ CBLAS_FUNC(cblas_cgemm)(order, transA, transB, m, n, k, oneF,
Adata, lda, Bdata, ldb, zeroF, Rdata, ldc);
break;
}
@@ -57,29 +67,29 @@ gemm(int typenum, enum CBLAS_ORDER order,
*/
static void
gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
- PyArrayObject *A, int lda, PyArrayObject *X, int incX,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *X, npy_intp incX,
PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A), *Xdata = PyArray_DATA(X);
void *Rdata = PyArray_DATA(R);
- int m = PyArray_DIM(A, 0), n = PyArray_DIM(A, 1);
+ npy_intp m = PyArray_DIM(A, 0), n = PyArray_DIM(A, 1);
switch (typenum) {
case NPY_DOUBLE:
- cblas_dgemv(order, trans, m, n, 1., Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_dgemv)(order, trans, m, n, 1., Adata, lda, Xdata, incX,
0., Rdata, 1);
break;
case NPY_FLOAT:
- cblas_sgemv(order, trans, m, n, 1.f, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_sgemv)(order, trans, m, n, 1.f, Adata, lda, Xdata, incX,
0.f, Rdata, 1);
break;
case NPY_CDOUBLE:
- cblas_zgemv(order, trans, m, n, oneD, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_zgemv)(order, trans, m, n, oneD, Adata, lda, Xdata, incX,
zeroD, Rdata, 1);
break;
case NPY_CFLOAT:
- cblas_cgemv(order, trans, m, n, oneF, Adata, lda, Xdata, incX,
+ CBLAS_FUNC(cblas_cgemv)(order, trans, m, n, oneF, Adata, lda, Xdata, incX,
zeroF, Rdata, 1);
break;
}
@@ -91,19 +101,19 @@ gemv(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
*/
static void
syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
- int n, int k,
- PyArrayObject *A, int lda, PyArrayObject *R)
+ npy_intp n, npy_intp k,
+ PyArrayObject *A, npy_intp lda, PyArrayObject *R)
{
const void *Adata = PyArray_DATA(A);
void *Rdata = PyArray_DATA(R);
- int ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
+ npy_intp ldc = PyArray_DIM(R, 1) > 1 ? PyArray_DIM(R, 1) : 1;
npy_intp i;
npy_intp j;
switch (typenum) {
case NPY_DOUBLE:
- cblas_dsyrk(order, CblasUpper, trans, n, k, 1.,
+ CBLAS_FUNC(cblas_dsyrk)(order, CblasUpper, trans, n, k, 1.,
Adata, lda, 0., Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -114,7 +124,7 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
}
break;
case NPY_FLOAT:
- cblas_ssyrk(order, CblasUpper, trans, n, k, 1.f,
+ CBLAS_FUNC(cblas_ssyrk)(order, CblasUpper, trans, n, k, 1.f,
Adata, lda, 0.f, Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -125,7 +135,7 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
}
break;
case NPY_CDOUBLE:
- cblas_zsyrk(order, CblasUpper, trans, n, k, oneD,
+ CBLAS_FUNC(cblas_zsyrk)(order, CblasUpper, trans, n, k, oneD,
Adata, lda, zeroD, Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -136,7 +146,7 @@ syrk(int typenum, enum CBLAS_ORDER order, enum CBLAS_TRANSPOSE trans,
}
break;
case NPY_CFLOAT:
- cblas_csyrk(order, CblasUpper, trans, n, k, oneF,
+ CBLAS_FUNC(cblas_csyrk)(order, CblasUpper, trans, n, k, oneF,
Adata, lda, zeroF, Rdata, ldc);
for (i = 0; i < n; i++) {
@@ -222,7 +232,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
PyArrayObject *out)
{
PyArrayObject *result = NULL, *out_buf = NULL;
- int j, lda, ldb;
+ npy_intp j, lda, ldb;
npy_intp l;
int nd;
npy_intp ap1stride = 0;
@@ -385,14 +395,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
*((double *)PyArray_DATA(ap1));
}
else if (ap1shape != _matrix) {
- cblas_daxpy(l,
+ CBLAS_FUNC(cblas_daxpy)(l,
*((double *)PyArray_DATA(ap2)),
(double *)PyArray_DATA(ap1),
ap1stride/sizeof(double),
(double *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
double val;
@@ -405,7 +416,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(double);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(double);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_daxpy(l, val, (double *)ptr, a1s,
+ CBLAS_FUNC(cblas_daxpy)(l, val, (double *)ptr, a1s,
(double *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -423,14 +434,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real;
}
else if (ap1shape != _matrix) {
- cblas_zaxpy(l,
+ CBLAS_FUNC(cblas_zaxpy)(l,
(double *)PyArray_DATA(ap2),
(double *)PyArray_DATA(ap1),
ap1stride/sizeof(npy_cdouble),
(double *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
double *pval;
@@ -443,7 +455,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(npy_cdouble);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(npy_cdouble);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_zaxpy(l, pval, (double *)ptr, a1s,
+ CBLAS_FUNC(cblas_zaxpy)(l, pval, (double *)ptr, a1s,
(double *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -456,14 +468,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
*((float *)PyArray_DATA(ap1));
}
else if (ap1shape != _matrix) {
- cblas_saxpy(l,
+ CBLAS_FUNC(cblas_saxpy)(l,
*((float *)PyArray_DATA(ap2)),
(float *)PyArray_DATA(ap1),
ap1stride/sizeof(float),
(float *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
float val;
@@ -476,7 +489,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(float);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(float);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_saxpy(l, val, (float *)ptr, a1s,
+ CBLAS_FUNC(cblas_saxpy)(l, val, (float *)ptr, a1s,
(float *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -494,14 +507,15 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
res->imag = ptr1->real * ptr2->imag + ptr1->imag * ptr2->real;
}
else if (ap1shape != _matrix) {
- cblas_caxpy(l,
+ CBLAS_FUNC(cblas_caxpy)(l,
(float *)PyArray_DATA(ap2),
(float *)PyArray_DATA(ap1),
ap1stride/sizeof(npy_cfloat),
(float *)PyArray_DATA(out_buf), 1);
}
else {
- int maxind, oind, i, a1s, outs;
+ int maxind, oind;
+ npy_intp i, a1s, outs;
char *ptr, *optr;
float *pval;
@@ -514,7 +528,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
a1s = PyArray_STRIDE(ap1, maxind) / sizeof(npy_cfloat);
outs = PyArray_STRIDE(out_buf, maxind) / sizeof(npy_cfloat);
for (i = 0; i < PyArray_DIM(ap1, oind); i++) {
- cblas_caxpy(l, pval, (float *)ptr, a1s,
+ CBLAS_FUNC(cblas_caxpy)(l, pval, (float *)ptr, a1s,
(float *)optr, outs);
ptr += PyArray_STRIDE(ap1, oind);
optr += PyArray_STRIDE(out_buf, oind);
@@ -537,7 +551,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
/* Matrix vector multiplication -- Level 2 BLAS */
/* lda must be MAX(M,1) */
enum CBLAS_ORDER Order;
- int ap2s;
+ npy_intp ap2s;
if (!PyArray_ISONESEGMENT(ap1)) {
PyObject *new;
@@ -564,7 +578,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
else if (ap1shape != _matrix && ap2shape == _matrix) {
/* Vector matrix multiplication -- Level 2 BLAS */
enum CBLAS_ORDER Order;
- int ap1s;
+ npy_intp ap1s;
if (!PyArray_ISONESEGMENT(ap2)) {
PyObject *new;
@@ -601,7 +615,7 @@ cblas_matrixproduct(int typenum, PyArrayObject *ap1, PyArrayObject *ap2,
*/
enum CBLAS_ORDER Order;
enum CBLAS_TRANSPOSE Trans1, Trans2;
- int M, N, L;
+ npy_intp M, N, L;
/* Optimization possible: */
/*
diff --git a/numpy/core/src/common/npy_cblas.h b/numpy/core/src/common/npy_cblas.h
index a083f3bcc..12db55bde 100644
--- a/numpy/core/src/common/npy_cblas.h
+++ b/numpy/core/src/common/npy_cblas.h
@@ -17,565 +17,21 @@ extern "C"
/*
* Enumerated and derived types
*/
-#define CBLAS_INDEX size_t /* this may vary between platforms */
-
enum CBLAS_ORDER {CblasRowMajor=101, CblasColMajor=102};
enum CBLAS_TRANSPOSE {CblasNoTrans=111, CblasTrans=112, CblasConjTrans=113};
enum CBLAS_UPLO {CblasUpper=121, CblasLower=122};
enum CBLAS_DIAG {CblasNonUnit=131, CblasUnit=132};
enum CBLAS_SIDE {CblasLeft=141, CblasRight=142};
-/*
- * ===========================================================================
- * Prototypes for level 1 BLAS functions (complex are recast as routines)
- * ===========================================================================
- */
-float cblas_sdsdot(const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY);
-double cblas_dsdot(const int N, const float *X, const int incX, const float *Y,
- const int incY);
-float cblas_sdot(const int N, const float *X, const int incX,
- const float *Y, const int incY);
-double cblas_ddot(const int N, const double *X, const int incX,
- const double *Y, const int incY);
-
-/*
- * Functions having prefixes Z and C only
- */
-void cblas_cdotu_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotu);
-void cblas_cdotc_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotc);
-
-void cblas_zdotu_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotu);
-void cblas_zdotc_sub(const int N, const void *X, const int incX,
- const void *Y, const int incY, void *dotc);
-
-
-/*
- * Functions having prefixes S D SC DZ
- */
-float cblas_snrm2(const int N, const float *X, const int incX);
-float cblas_sasum(const int N, const float *X, const int incX);
-
-double cblas_dnrm2(const int N, const double *X, const int incX);
-double cblas_dasum(const int N, const double *X, const int incX);
-
-float cblas_scnrm2(const int N, const void *X, const int incX);
-float cblas_scasum(const int N, const void *X, const int incX);
-
-double cblas_dznrm2(const int N, const void *X, const int incX);
-double cblas_dzasum(const int N, const void *X, const int incX);
-
-
-/*
- * Functions having standard 4 prefixes (S D C Z)
- */
-CBLAS_INDEX cblas_isamax(const int N, const float *X, const int incX);
-CBLAS_INDEX cblas_idamax(const int N, const double *X, const int incX);
-CBLAS_INDEX cblas_icamax(const int N, const void *X, const int incX);
-CBLAS_INDEX cblas_izamax(const int N, const void *X, const int incX);
-
-/*
- * ===========================================================================
- * Prototypes for level 1 BLAS routines
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (s, d, c, z)
- */
-void cblas_sswap(const int N, float *X, const int incX,
- float *Y, const int incY);
-void cblas_scopy(const int N, const float *X, const int incX,
- float *Y, const int incY);
-void cblas_saxpy(const int N, const float alpha, const float *X,
- const int incX, float *Y, const int incY);
-
-void cblas_dswap(const int N, double *X, const int incX,
- double *Y, const int incY);
-void cblas_dcopy(const int N, const double *X, const int incX,
- double *Y, const int incY);
-void cblas_daxpy(const int N, const double alpha, const double *X,
- const int incX, double *Y, const int incY);
-
-void cblas_cswap(const int N, void *X, const int incX,
- void *Y, const int incY);
-void cblas_ccopy(const int N, const void *X, const int incX,
- void *Y, const int incY);
-void cblas_caxpy(const int N, const void *alpha, const void *X,
- const int incX, void *Y, const int incY);
-
-void cblas_zswap(const int N, void *X, const int incX,
- void *Y, const int incY);
-void cblas_zcopy(const int N, const void *X, const int incX,
- void *Y, const int incY);
-void cblas_zaxpy(const int N, const void *alpha, const void *X,
- const int incX, void *Y, const int incY);
-
-
-/*
- * Routines with S and D prefix only
- */
-void cblas_srotg(float *a, float *b, float *c, float *s);
-void cblas_srotmg(float *d1, float *d2, float *b1, const float b2, float *P);
-void cblas_srot(const int N, float *X, const int incX,
- float *Y, const int incY, const float c, const float s);
-void cblas_srotm(const int N, float *X, const int incX,
- float *Y, const int incY, const float *P);
-
-void cblas_drotg(double *a, double *b, double *c, double *s);
-void cblas_drotmg(double *d1, double *d2, double *b1, const double b2, double *P);
-void cblas_drot(const int N, double *X, const int incX,
- double *Y, const int incY, const double c, const double s);
-void cblas_drotm(const int N, double *X, const int incX,
- double *Y, const int incY, const double *P);
-
-
-/*
- * Routines with S D C Z CS and ZD prefixes
- */
-void cblas_sscal(const int N, const float alpha, float *X, const int incX);
-void cblas_dscal(const int N, const double alpha, double *X, const int incX);
-void cblas_cscal(const int N, const void *alpha, void *X, const int incX);
-void cblas_zscal(const int N, const void *alpha, void *X, const int incX);
-void cblas_csscal(const int N, const float alpha, void *X, const int incX);
-void cblas_zdscal(const int N, const double alpha, void *X, const int incX);
-
-/*
- * ===========================================================================
- * Prototypes for level 2 BLAS
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (S, D, C, Z)
- */
-void cblas_sgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const float alpha, const float *A, const int lda,
- const float *X, const int incX, const float beta,
- float *Y, const int incY);
-void cblas_sgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const float alpha,
- const float *A, const int lda, const float *X,
- const int incX, const float beta, float *Y, const int incY);
-void cblas_strmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *Ap, float *X, const int incX);
-void cblas_strsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *A, const int lda, float *X,
- const int incX);
-void cblas_stbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const float *A, const int lda,
- float *X, const int incX);
-void cblas_stpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const float *Ap, float *X, const int incX);
-
-void cblas_dgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const double alpha, const double *A, const int lda,
- const double *X, const int incX, const double beta,
- double *Y, const int incY);
-void cblas_dgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const double alpha,
- const double *A, const int lda, const double *X,
- const int incX, const double beta, double *Y, const int incY);
-void cblas_dtrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *Ap, double *X, const int incX);
-void cblas_dtrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *A, const int lda, double *X,
- const int incX);
-void cblas_dtbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const double *A, const int lda,
- double *X, const int incX);
-void cblas_dtpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const double *Ap, double *X, const int incX);
-
-void cblas_cgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *X, const int incX, const void *beta,
- void *Y, const int incY);
-void cblas_cgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const void *alpha,
- const void *A, const int lda, const void *X,
- const int incX, const void *beta, void *Y, const int incY);
-void cblas_ctrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-void cblas_ctrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda, void *X,
- const int incX);
-void cblas_ctbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ctpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-
-void cblas_zgemv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *X, const int incX, const void *beta,
- void *Y, const int incY);
-void cblas_zgbmv(const enum CBLAS_ORDER order,
- const enum CBLAS_TRANSPOSE TransA, const int M, const int N,
- const int KL, const int KU, const void *alpha,
- const void *A, const int lda, const void *X,
- const int incX, const void *beta, void *Y, const int incY);
-void cblas_ztrmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-void cblas_ztrsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *A, const int lda, void *X,
- const int incX);
-void cblas_ztbsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const int K, const void *A, const int lda,
- void *X, const int incX);
-void cblas_ztpsv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
- const int N, const void *Ap, void *X, const int incX);
-
-
-/*
- * Routines with S and D prefixes only
- */
-void cblas_ssymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *A,
- const int lda, const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_ssbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const float alpha, const float *A,
- const int lda, const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_sspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *Ap,
- const float *X, const int incX,
- const float beta, float *Y, const int incY);
-void cblas_sger(const enum CBLAS_ORDER order, const int M, const int N,
- const float alpha, const float *X, const int incX,
- const float *Y, const int incY, float *A, const int lda);
-void cblas_ssyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, float *A, const int lda);
-void cblas_sspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, float *Ap);
-void cblas_ssyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY, float *A,
- const int lda);
-void cblas_sspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const float *X,
- const int incX, const float *Y, const int incY, float *A);
-
-void cblas_dsymv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *A,
- const int lda, const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dsbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const double alpha, const double *A,
- const int lda, const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dspmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *Ap,
- const double *X, const int incX,
- const double beta, double *Y, const int incY);
-void cblas_dger(const enum CBLAS_ORDER order, const int M, const int N,
- const double alpha, const double *X, const int incX,
- const double *Y, const int incY, double *A, const int lda);
-void cblas_dsyr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, double *A, const int lda);
-void cblas_dspr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, double *Ap);
-void cblas_dsyr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, const double *Y, const int incY, double *A,
- const int lda);
-void cblas_dspr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const double *X,
- const int incX, const double *Y, const int incY, double *A);
-
-
-/*
- * Routines with C and Z prefixes only
- */
-void cblas_chemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_chbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_chpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *Ap,
- const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_cgeru(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_cgerc(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_cher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const void *X, const int incX,
- void *A, const int lda);
-void cblas_chpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const float alpha, const void *X,
- const int incX, void *A);
-void cblas_cher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_chpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *Ap);
-
-void cblas_zhemv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zhbmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const int K, const void *alpha, const void *A,
- const int lda, const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zhpmv(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const void *alpha, const void *Ap,
- const void *X, const int incX,
- const void *beta, void *Y, const int incY);
-void cblas_zgeru(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zgerc(const enum CBLAS_ORDER order, const int M, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zher(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const void *X, const int incX,
- void *A, const int lda);
-void cblas_zhpr(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
- const int N, const double alpha, const void *X,
- const int incX, void *A);
-void cblas_zher2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *A, const int lda);
-void cblas_zhpr2(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const int N,
- const void *alpha, const void *X, const int incX,
- const void *Y, const int incY, void *Ap);
-
-/*
- * ===========================================================================
- * Prototypes for level 3 BLAS
- * ===========================================================================
- */
-
-/*
- * Routines with standard 4 prefixes (S, D, C, Z)
- */
-void cblas_sgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const float alpha, const float *A,
- const int lda, const float *B, const int ldb,
- const float beta, float *C, const int ldc);
-void cblas_ssymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const float alpha, const float *A, const int lda,
- const float *B, const int ldb, const float beta,
- float *C, const int ldc);
-void cblas_ssyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const float *A, const int lda,
- const float beta, float *C, const int ldc);
-void cblas_ssyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const float *A, const int lda,
- const float *B, const int ldb, const float beta,
- float *C, const int ldc);
-void cblas_strmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const float alpha, const float *A, const int lda,
- float *B, const int ldb);
-void cblas_strsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const float alpha, const float *A, const int lda,
- float *B, const int ldb);
-
-void cblas_dgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const double alpha, const double *A,
- const int lda, const double *B, const int ldb,
- const double beta, double *C, const int ldc);
-void cblas_dsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const double alpha, const double *A, const int lda,
- const double *B, const int ldb, const double beta,
- double *C, const int ldc);
-void cblas_dsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const double *A, const int lda,
- const double beta, double *C, const int ldc);
-void cblas_dsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const double *A, const int lda,
- const double *B, const int ldb, const double beta,
- double *C, const int ldc);
-void cblas_dtrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const double alpha, const double *A, const int lda,
- double *B, const int ldb);
-void cblas_dtrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const double alpha, const double *A, const int lda,
- double *B, const int ldb);
-
-void cblas_cgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const void *alpha, const void *A,
- const int lda, const void *B, const int ldb,
- const void *beta, void *C, const int ldc);
-void cblas_csymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_csyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *beta, void *C, const int ldc);
-void cblas_csyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_ctrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-void cblas_ctrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-
-void cblas_zgemm(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_TRANSPOSE TransB, const int M, const int N,
- const int K, const void *alpha, const void *A,
- const int lda, const void *B, const int ldb,
- const void *beta, void *C, const int ldc);
-void cblas_zsymm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_zsyrk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *beta, void *C, const int ldc);
-void cblas_zsyr2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_ztrmm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-void cblas_ztrsm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
- const enum CBLAS_DIAG Diag, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- void *B, const int ldb);
-
+#define CBLAS_INDEX size_t /* this may vary between platforms */
-/*
- * Routines with prefixes C and Z only
- */
-void cblas_chemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_cherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const float alpha, const void *A, const int lda,
- const float beta, void *C, const int ldc);
-void cblas_cher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const float beta,
- void *C, const int ldc);
+#define BLASINT int
+#define BLASNAME(name) name
-void cblas_zhemm(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
- const enum CBLAS_UPLO Uplo, const int M, const int N,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const void *beta,
- void *C, const int ldc);
-void cblas_zherk(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const double alpha, const void *A, const int lda,
- const double beta, void *C, const int ldc);
-void cblas_zher2k(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
- const enum CBLAS_TRANSPOSE Trans, const int N, const int K,
- const void *alpha, const void *A, const int lda,
- const void *B, const int ldb, const double beta,
- void *C, const int ldc);
+#include "npy_cblas_base.h"
-void cblas_xerbla(int p, const char *rout, const char *form, ...);
+#undef BLASINT
+#undef BLASNAME
#ifdef __cplusplus
}
diff --git a/numpy/core/src/common/npy_cblas64_.h b/numpy/core/src/common/npy_cblas64_.h
new file mode 100644
index 000000000..bbc4b3559
--- /dev/null
+++ b/numpy/core/src/common/npy_cblas64_.h
@@ -0,0 +1,31 @@
+/*
+ * This header provides numpy a consistent interface to CBLAS code. It is needed
+ * because not all providers of cblas provide cblas.h. For instance, MKL provides
+ * mkl_cblas.h and also typedefs the CBLAS_XXX enums.
+ */
+#ifndef _NPY_CBLAS64__H_
+#define _NPY_CBLAS64__H_
+
+#include <stddef.h>
+
+#include "npy_cblas.h"
+
+/* Allow the use in C++ code. */
+#ifdef __cplusplus
+extern "C"
+{
+#endif
+
+#define BLASINT npy_int64
+#define BLASNAME(name) name##64_
+
+#include "npy_cblas_base.h"
+
+#undef BLASINT
+#undef BLASNAME
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/numpy/core/src/common/npy_cblas_base.h b/numpy/core/src/common/npy_cblas_base.h
new file mode 100644
index 000000000..792b6f09e
--- /dev/null
+++ b/numpy/core/src/common/npy_cblas_base.h
@@ -0,0 +1,557 @@
+/*
+ * This header provides numpy a consistent interface to CBLAS code. It is needed
+ * because not all providers of cblas provide cblas.h. For instance, MKL provides
+ * mkl_cblas.h and also typedefs the CBLAS_XXX enums.
+ */
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS functions (complex are recast as routines)
+ * ===========================================================================
+ */
+float BLASNAME(cblas_sdsdot)(const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY);
+double BLASNAME(cblas_dsdot)(const BLASINT N, const float *X, const BLASINT incX, const float *Y,
+ const BLASINT incY);
+float BLASNAME(cblas_sdot)(const BLASINT N, const float *X, const BLASINT incX,
+ const float *Y, const BLASINT incY);
+double BLASNAME(cblas_ddot)(const BLASINT N, const double *X, const BLASINT incX,
+ const double *Y, const BLASINT incY);
+
+/*
+ * Functions having prefixes Z and C only
+ */
+void BLASNAME(cblas_cdotu_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotu);
+void BLASNAME(cblas_cdotc_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotc);
+
+void BLASNAME(cblas_zdotu_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotu);
+void BLASNAME(cblas_zdotc_sub)(const BLASINT N, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *dotc);
+
+
+/*
+ * Functions having prefixes S D SC DZ
+ */
+float BLASNAME(cblas_snrm2)(const BLASINT N, const float *X, const BLASINT incX);
+float BLASNAME(cblas_sasum)(const BLASINT N, const float *X, const BLASINT incX);
+
+double BLASNAME(cblas_dnrm2)(const BLASINT N, const double *X, const BLASINT incX);
+double BLASNAME(cblas_dasum)(const BLASINT N, const double *X, const BLASINT incX);
+
+float BLASNAME(cblas_scnrm2)(const BLASINT N, const void *X, const BLASINT incX);
+float BLASNAME(cblas_scasum)(const BLASINT N, const void *X, const BLASINT incX);
+
+double BLASNAME(cblas_dznrm2)(const BLASINT N, const void *X, const BLASINT incX);
+double BLASNAME(cblas_dzasum)(const BLASINT N, const void *X, const BLASINT incX);
+
+
+/*
+ * Functions having standard 4 prefixes (S D C Z)
+ */
+CBLAS_INDEX BLASNAME(cblas_isamax)(const BLASINT N, const float *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_idamax)(const BLASINT N, const double *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_icamax)(const BLASINT N, const void *X, const BLASINT incX);
+CBLAS_INDEX BLASNAME(cblas_izamax)(const BLASINT N, const void *X, const BLASINT incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 1 BLAS routines
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (s, d, c, z)
+ */
+void BLASNAME(cblas_sswap)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_scopy)(const BLASINT N, const float *X, const BLASINT incX,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_saxpy)(const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *Y, const BLASINT incY);
+
+void BLASNAME(cblas_dswap)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_dcopy)(const BLASINT N, const double *X, const BLASINT incX,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_daxpy)(const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *Y, const BLASINT incY);
+
+void BLASNAME(cblas_cswap)(const BLASINT N, void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_ccopy)(const BLASINT N, const void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_caxpy)(const BLASINT N, const void *alpha, const void *X,
+ const BLASINT incX, void *Y, const BLASINT incY);
+
+void BLASNAME(cblas_zswap)(const BLASINT N, void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zcopy)(const BLASINT N, const void *X, const BLASINT incX,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zaxpy)(const BLASINT N, const void *alpha, const void *X,
+ const BLASINT incX, void *Y, const BLASINT incY);
+
+
+/*
+ * Routines with S and D prefix only
+ */
+void BLASNAME(cblas_srotg)(float *a, float *b, float *c, float *s);
+void BLASNAME(cblas_srotmg)(float *d1, float *d2, float *b1, const float b2, float *P);
+void BLASNAME(cblas_srot)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY, const float c, const float s);
+void BLASNAME(cblas_srotm)(const BLASINT N, float *X, const BLASINT incX,
+ float *Y, const BLASINT incY, const float *P);
+
+void BLASNAME(cblas_drotg)(double *a, double *b, double *c, double *s);
+void BLASNAME(cblas_drotmg)(double *d1, double *d2, double *b1, const double b2, double *P);
+void BLASNAME(cblas_drot)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY, const double c, const double s);
+void BLASNAME(cblas_drotm)(const BLASINT N, double *X, const BLASINT incX,
+ double *Y, const BLASINT incY, const double *P);
+
+
+/*
+ * Routines with S D C Z CS and ZD prefixes
+ */
+void BLASNAME(cblas_sscal)(const BLASINT N, const float alpha, float *X, const BLASINT incX);
+void BLASNAME(cblas_dscal)(const BLASINT N, const double alpha, double *X, const BLASINT incX);
+void BLASNAME(cblas_cscal)(const BLASINT N, const void *alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_zscal)(const BLASINT N, const void *alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_csscal)(const BLASINT N, const float alpha, void *X, const BLASINT incX);
+void BLASNAME(cblas_zdscal)(const BLASINT N, const double alpha, void *X, const BLASINT incX);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 2 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void BLASNAME(cblas_sgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *X, const BLASINT incX, const float beta,
+ float *Y, const BLASINT incY);
+void BLASNAME(cblas_sgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const float alpha,
+ const float *A, const BLASINT lda, const float *X,
+ const BLASINT incX, const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_strmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *Ap, float *X, const BLASINT incX);
+void BLASNAME(cblas_strsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *A, const BLASINT lda, float *X,
+ const BLASINT incX);
+void BLASNAME(cblas_stbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const float *A, const BLASINT lda,
+ float *X, const BLASINT incX);
+void BLASNAME(cblas_stpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const float *Ap, float *X, const BLASINT incX);
+
+void BLASNAME(cblas_dgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *X, const BLASINT incX, const double beta,
+ double *Y, const BLASINT incY);
+void BLASNAME(cblas_dgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const double alpha,
+ const double *A, const BLASINT lda, const double *X,
+ const BLASINT incX, const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dtrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *Ap, double *X, const BLASINT incX);
+void BLASNAME(cblas_dtrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *A, const BLASINT lda, double *X,
+ const BLASINT incX);
+void BLASNAME(cblas_dtbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const double *A, const BLASINT lda,
+ double *X, const BLASINT incX);
+void BLASNAME(cblas_dtpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const double *Ap, double *X, const BLASINT incX);
+
+void BLASNAME(cblas_cgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *X, const BLASINT incX, const void *beta,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_cgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const void *alpha,
+ const void *A, const BLASINT lda, const void *X,
+ const BLASINT incX, const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_ctrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+void BLASNAME(cblas_ctrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda, void *X,
+ const BLASINT incX);
+void BLASNAME(cblas_ctbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ctpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+
+void BLASNAME(cblas_zgemv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *X, const BLASINT incX, const void *beta,
+ void *Y, const BLASINT incY);
+void BLASNAME(cblas_zgbmv)(const enum CBLAS_ORDER order,
+ const enum CBLAS_TRANSPOSE TransA, const BLASINT M, const BLASINT N,
+ const BLASINT KL, const BLASINT KU, const void *alpha,
+ const void *A, const BLASINT lda, const void *X,
+ const BLASINT incX, const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_ztrmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+void BLASNAME(cblas_ztrsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *A, const BLASINT lda, void *X,
+ const BLASINT incX);
+void BLASNAME(cblas_ztbsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const BLASINT K, const void *A, const BLASINT lda,
+ void *X, const BLASINT incX);
+void BLASNAME(cblas_ztpsv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE TransA, const enum CBLAS_DIAG Diag,
+ const BLASINT N, const void *Ap, void *X, const BLASINT incX);
+
+
+/*
+ * Routines with S and D prefixes only
+ */
+void BLASNAME(cblas_ssymv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *A,
+ const BLASINT lda, const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_ssbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const float alpha, const float *A,
+ const BLASINT lda, const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_sspmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *Ap,
+ const float *X, const BLASINT incX,
+ const float beta, float *Y, const BLASINT incY);
+void BLASNAME(cblas_sger)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const float alpha, const float *X, const BLASINT incX,
+ const float *Y, const BLASINT incY, float *A, const BLASINT lda);
+void BLASNAME(cblas_ssyr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *A, const BLASINT lda);
+void BLASNAME(cblas_sspr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, float *Ap);
+void BLASNAME(cblas_ssyr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY, float *A,
+ const BLASINT lda);
+void BLASNAME(cblas_sspr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const float *X,
+ const BLASINT incX, const float *Y, const BLASINT incY, float *A);
+
+void BLASNAME(cblas_dsymv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *A,
+ const BLASINT lda, const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dsbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const double alpha, const double *A,
+ const BLASINT lda, const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dspmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *Ap,
+ const double *X, const BLASINT incX,
+ const double beta, double *Y, const BLASINT incY);
+void BLASNAME(cblas_dger)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const double alpha, const double *X, const BLASINT incX,
+ const double *Y, const BLASINT incY, double *A, const BLASINT lda);
+void BLASNAME(cblas_dsyr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *A, const BLASINT lda);
+void BLASNAME(cblas_dspr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, double *Ap);
+void BLASNAME(cblas_dsyr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, const double *Y, const BLASINT incY, double *A,
+ const BLASINT lda);
+void BLASNAME(cblas_dspr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const double *X,
+ const BLASINT incX, const double *Y, const BLASINT incY, double *A);
+
+
+/*
+ * Routines with C and Z prefixes only
+ */
+void BLASNAME(cblas_chemv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_chbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_chpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *Ap,
+ const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_cgeru)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_cgerc)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_cher)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const void *X, const BLASINT incX,
+ void *A, const BLASINT lda);
+void BLASNAME(cblas_chpr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const float alpha, const void *X,
+ const BLASINT incX, void *A);
+void BLASNAME(cblas_cher2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_chpr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *Ap);
+
+void BLASNAME(cblas_zhemv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zhbmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zhpmv)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const void *alpha, const void *Ap,
+ const void *X, const BLASINT incX,
+ const void *beta, void *Y, const BLASINT incY);
+void BLASNAME(cblas_zgeru)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zgerc)(const enum CBLAS_ORDER order, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zher)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const void *X, const BLASINT incX,
+ void *A, const BLASINT lda);
+void BLASNAME(cblas_zhpr)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo,
+ const BLASINT N, const double alpha, const void *X,
+ const BLASINT incX, void *A);
+void BLASNAME(cblas_zher2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *A, const BLASINT lda);
+void BLASNAME(cblas_zhpr2)(const enum CBLAS_ORDER order, const enum CBLAS_UPLO Uplo, const BLASINT N,
+ const void *alpha, const void *X, const BLASINT incX,
+ const void *Y, const BLASINT incY, void *Ap);
+
+/*
+ * ===========================================================================
+ * Prototypes for level 3 BLAS
+ * ===========================================================================
+ */
+
+/*
+ * Routines with standard 4 prefixes (S, D, C, Z)
+ */
+void BLASNAME(cblas_sgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const float alpha, const float *A,
+ const BLASINT lda, const float *B, const BLASINT ldb,
+ const float beta, float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *B, const BLASINT ldb, const float beta,
+ float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const float *A, const BLASINT lda,
+ const float beta, float *C, const BLASINT ldc);
+void BLASNAME(cblas_ssyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const float *A, const BLASINT lda,
+ const float *B, const BLASINT ldb, const float beta,
+ float *C, const BLASINT ldc);
+void BLASNAME(cblas_strmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ float *B, const BLASINT ldb);
+void BLASNAME(cblas_strsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const float alpha, const float *A, const BLASINT lda,
+ float *B, const BLASINT ldb);
+
+void BLASNAME(cblas_dgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const double alpha, const double *A,
+ const BLASINT lda, const double *B, const BLASINT ldb,
+ const double beta, double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *B, const BLASINT ldb, const double beta,
+ double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const double *A, const BLASINT lda,
+ const double beta, double *C, const BLASINT ldc);
+void BLASNAME(cblas_dsyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const double *A, const BLASINT lda,
+ const double *B, const BLASINT ldb, const double beta,
+ double *C, const BLASINT ldc);
+void BLASNAME(cblas_dtrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ double *B, const BLASINT ldb);
+void BLASNAME(cblas_dtrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const double alpha, const double *A, const BLASINT lda,
+ double *B, const BLASINT ldb);
+
+void BLASNAME(cblas_cgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *B, const BLASINT ldb,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_csymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_csyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_csyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_ctrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+void BLASNAME(cblas_ctrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+
+void BLASNAME(cblas_zgemm)(const enum CBLAS_ORDER Order, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_TRANSPOSE TransB, const BLASINT M, const BLASINT N,
+ const BLASINT K, const void *alpha, const void *A,
+ const BLASINT lda, const void *B, const BLASINT ldb,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsymm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsyrk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zsyr2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_ztrmm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+void BLASNAME(cblas_ztrsm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const enum CBLAS_TRANSPOSE TransA,
+ const enum CBLAS_DIAG Diag, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ void *B, const BLASINT ldb);
+
+
+/*
+ * Routines with prefixes C and Z only
+ */
+void BLASNAME(cblas_chemm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_cherk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const float alpha, const void *A, const BLASINT lda,
+ const float beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_cher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const float beta,
+ void *C, const BLASINT ldc);
+
+void BLASNAME(cblas_zhemm)(const enum CBLAS_ORDER Order, const enum CBLAS_SIDE Side,
+ const enum CBLAS_UPLO Uplo, const BLASINT M, const BLASINT N,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const void *beta,
+ void *C, const BLASINT ldc);
+void BLASNAME(cblas_zherk)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const double alpha, const void *A, const BLASINT lda,
+ const double beta, void *C, const BLASINT ldc);
+void BLASNAME(cblas_zher2k)(const enum CBLAS_ORDER Order, const enum CBLAS_UPLO Uplo,
+ const enum CBLAS_TRANSPOSE Trans, const BLASINT N, const BLASINT K,
+ const void *alpha, const void *A, const BLASINT lda,
+ const void *B, const BLASINT ldb, const double beta,
+ void *C, const BLASINT ldc);
+
+void BLASNAME(cblas_xerbla)(BLASINT p, const char *rout, const char *form, ...);
diff --git a/numpy/core/src/common/python_xerbla.c b/numpy/core/src/common/python_xerbla.c
index bdf0b9058..d88562b7a 100644
--- a/numpy/core/src/common/python_xerbla.c
+++ b/numpy/core/src/common/python_xerbla.c
@@ -1,4 +1,5 @@
#include "Python.h"
+#include "numpy/npy_common.h"
/*
* From f2c.h, this should be safe unless fortran is set to use 64
@@ -49,3 +50,11 @@ int xerbla_(char *srname, integer *info)
return 0;
}
+
+
+/* Same for LAPACK64_ */
+npy_int64 xerbla_64_(char *srname, npy_int64 *info)
+{
+ integer info_int = (integer)*info;
+ return xerbla_(srname, &info_int);
+}
diff --git a/numpy/core/src/multiarray/arrayobject.c b/numpy/core/src/multiarray/arrayobject.c
index 5ed5b7635..a5cebfbd8 100644
--- a/numpy/core/src/multiarray/arrayobject.c
+++ b/numpy/core/src/multiarray/arrayobject.c
@@ -48,7 +48,7 @@ maintainer email: oliphant.travis@ieee.org
#include "mapping.h"
#include "getset.h"
#include "sequence.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "array_assign.h"
#include "alloc.h"
#include "mem_overlap.h"
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index a82ad7935..e36b95c00 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -36,7 +36,7 @@
#include "cblasfuncs.h"
#include "npy_cblas.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/* check for sequences, but ignore the types numpy considers scalars */
static NPY_INLINE npy_bool
@@ -1775,7 +1775,58 @@ BOOL_scan(FILE *fp, npy_bool *ip, void *NPY_UNUSED(ignore),
}
/**begin repeat
- * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = CFLOAT, CDOUBLE#
+ * #type = npy_cfloat, npy_cdouble#
+ */
+static int
+@fname@_scan(FILE *fp, @type@ *ip, void *NPY_UNUSED(ignore),
+ PyArray_Descr *NPY_UNUSED(ignored))
+{
+ double result;
+ int ret_real, ret_imag;
+
+ ret_real = NumPyOS_ascii_ftolf(fp, &result);
+ @type@ output;
+ // Peek next character
+ char next = getc(fp);
+ if ((next == '+') || (next == '-')) {
+ // Imaginary component specified
+ output.real = result;
+ // Revert peek and read imaginary component
+ ungetc(next, fp);
+ ret_imag = NumPyOS_ascii_ftolf(fp, &result);
+ // Peak next character
+ next = getc(fp);
+ if ((ret_imag == 1) && (next == 'j')) {
+ // If read is successful and the immediate following char is j
+ output.imag = result;
+ }
+ else {
+ output.imag = 0;
+ // Push an invalid char to trigger the not everything is read error
+ ungetc('a', fp);
+ }
+ }
+ else if (next == 'j') {
+ // Real component not specified
+ output.real = 0;
+ output.imag = result;
+ }
+ else {
+ // Imaginary component not specified
+ output.real = result;
+ output.imag = 0.;
+ // Next character is not + / - / j. Revert peek.
+ ungetc(next, fp);
+ }
+ *(@type@ *)ip = output;
+ return ret_real;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ * #fname = CLONGDOUBLE,
* OBJECT, STRING, UNICODE, VOID,
* DATETIME, TIMEDELTA#
*/
@@ -1867,7 +1918,60 @@ BOOL_fromstr(char *str, void *ip, char **endptr,
}
/**begin repeat
- * #fname = CFLOAT, CDOUBLE, CLONGDOUBLE,
+ * #fname = CFLOAT, CDOUBLE#
+ * #type = npy_cfloat, npy_cdouble#
+ */
+static int
+@fname@_fromstr(char *str, void *ip, char **endptr,
+ PyArray_Descr *NPY_UNUSED(ignore))
+{
+ double result;
+
+ result = NumPyOS_ascii_strtod(str, endptr);
+ @type@ output;
+
+ if (endptr && ((*endptr[0] == '+') || (*endptr[0] == '-'))) {
+ // Imaginary component specified
+ output.real = result;
+ // Reading imaginary component
+ char **prev = endptr;
+ str = *endptr;
+ result = NumPyOS_ascii_strtod(str, endptr);
+ if (endptr && *endptr[0] == 'j') {
+ // Read is successful if the immediate following char is j
+ output.imag = result;
+ // Skip j
+ ++*endptr;
+ }
+ else {
+ /*
+ * Set endptr to previous char to trigger the not everything is
+ * read error
+ */
+ endptr = prev;
+ output.imag = 0;
+ }
+ }
+ else if (endptr && *endptr[0] == 'j') {
+ // Real component not specified
+ output.real = 0;
+ output.imag = result;
+ // Skip j
+ ++*endptr;
+ }
+ else {
+ // Imaginary component not specified
+ output.real = result;
+ output.imag = 0.;
+ }
+ *(@type@ *)ip = output;
+ return 0;
+}
+/**end repeat**/
+
+
+/**begin repeat
+ * #fname = CLONGDOUBLE,
* OBJECT, STRING, UNICODE, VOID#
*/
diff --git a/numpy/core/src/multiarray/buffer.c b/numpy/core/src/multiarray/buffer.c
index b729027ad..0edadee98 100644
--- a/numpy/core/src/multiarray/buffer.c
+++ b/numpy/core/src/multiarray/buffer.c
@@ -11,7 +11,7 @@
#include "npy_pycompat.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "common.h"
#include "numpyos.h"
#include "arrayobject.h"
diff --git a/numpy/core/src/multiarray/common.c b/numpy/core/src/multiarray/common.c
index a71b0818c..c991f7428 100644
--- a/numpy/core/src/multiarray/common.c
+++ b/numpy/core/src/multiarray/common.c
@@ -12,7 +12,7 @@
#include "usertypes.h"
#include "common.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "get_attr_string.h"
#include "mem_overlap.h"
diff --git a/numpy/core/src/multiarray/conversion_utils.c b/numpy/core/src/multiarray/conversion_utils.c
index 5f0ad5817..ca126b4b1 100644
--- a/numpy/core/src/multiarray/conversion_utils.c
+++ b/numpy/core/src/multiarray/conversion_utils.c
@@ -16,7 +16,7 @@
#include "conversion_utils.h"
#include "alloc.h"
-#include "buffer.h"
+#include "npy_buffer.h"
static int
PyArray_PyIntAsInt_ErrMsg(PyObject *o, const char * msg) NPY_GCC_NONNULL(2);
diff --git a/numpy/core/src/multiarray/convert_datatype.c b/numpy/core/src/multiarray/convert_datatype.c
index 025c66013..4326448dc 100644
--- a/numpy/core/src/multiarray/convert_datatype.c
+++ b/numpy/core/src/multiarray/convert_datatype.c
@@ -877,7 +877,13 @@ PyArray_CanCastTypeTo(PyArray_Descr *from, PyArray_Descr *to,
from_order = dtype_kind_to_ordering(from->kind);
to_order = dtype_kind_to_ordering(to->kind);
- return from_order != -1 && from_order <= to_order;
+ if (to->kind == 'm') {
+ /* both types being timedelta is already handled before. */
+ int integer_order = dtype_kind_to_ordering('i');
+ return (from_order != -1) && (from_order <= integer_order);
+ }
+
+ return (from_order != -1) && (from_order <= to_order);
}
else {
return 0;
diff --git a/numpy/core/src/multiarray/ctors.c b/numpy/core/src/multiarray/ctors.c
index 88ce1e221..c69782cb6 100644
--- a/numpy/core/src/multiarray/ctors.c
+++ b/numpy/core/src/multiarray/ctors.c
@@ -19,7 +19,7 @@
#include "ctors.h"
#include "convert_datatype.h"
#include "shape.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include "lowlevel_strided_loops.h"
#include "methods.h"
#include "_datetime.h"
@@ -453,10 +453,6 @@ copy_and_swap(void *dst, void *src, int itemsize, npy_intp numitems,
}
}
-NPY_NO_EXPORT PyObject *
-_array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
- npy_bool writeable, PyObject *context);
-
/*
* adapted from Numarray,
* a: destination array
@@ -477,6 +473,11 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
if (dst == NULL)
dst = a;
+ /*
+ * This code is to ensure that the sequence access below will
+ * return a lower-dimensional sequence.
+ */
+
/* INCREF on entry DECREF on exit */
Py_INCREF(s);
@@ -502,11 +503,6 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
return 0;
}
- /*
- * This code is to ensure that the sequence access below will
- * return a lower-dimensional sequence.
- */
-
if (dim > PyArray_NDIM(a)) {
PyErr_Format(PyExc_ValueError,
"setArrayFromSequence: sequence/array dimensions mismatch.");
@@ -517,27 +513,6 @@ setArrayFromSequence(PyArrayObject *a, PyObject *s,
if (slen < 0) {
goto fail;
}
- if (slen > 0) {
- /* gh-13659: try __array__ before using s as a sequence */
- PyObject *tmp = _array_from_array_like(s, /*dtype*/NULL, /*writeable*/0,
- /*context*/NULL);
- if (tmp == NULL) {
- goto fail;
- }
- else if (tmp == Py_NotImplemented) {
- Py_DECREF(tmp);
- }
- else {
- int r = PyArray_CopyInto(dst, (PyArrayObject *)tmp);
- Py_DECREF(tmp);
- if (r < 0) {
- goto fail;
- }
- Py_DECREF(s);
- return 0;
- }
- }
-
/*
* Either the dimensions match, or the sequence has length 1 and can
* be broadcast to the destination.
@@ -1609,90 +1584,6 @@ fail:
}
-
-/*
- * Attempts to extract an array from an array-like object.
- *
- * array-like is defined as either
- *
- * * an object implementing the PEP 3118 buffer interface;
- * * an object with __array_struct__ or __array_interface__ attributes;
- * * an object with an __array__ function.
- *
- * Returns Py_NotImplemented if a given object is not array-like;
- * PyArrayObject* in case of success and NULL in case of failure.
- */
-NPY_NO_EXPORT PyObject *
-_array_from_array_like(PyObject *op, PyArray_Descr *requested_dtype,
- npy_bool writeable, PyObject *context) {
- PyObject* tmp;
-
- /* If op supports the PEP 3118 buffer interface */
- if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
- PyObject *memoryview = PyMemoryView_FromObject(op);
- if (memoryview == NULL) {
- PyErr_Clear();
- }
- else {
- tmp = _array_from_buffer_3118(memoryview);
- Py_DECREF(memoryview);
- if (tmp == NULL) {
- return NULL;
- }
-
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *) tmp, "PEP 3118 buffer") < 0) {
- Py_DECREF(tmp);
- return NULL;
- }
-
- return tmp;
- }
- }
-
- /* If op supports the __array_struct__ or __array_interface__ interface */
- tmp = PyArray_FromStructInterface(op);
- if (tmp == NULL) {
- return NULL;
- }
- if (tmp == Py_NotImplemented) {
- tmp = PyArray_FromInterface(op);
- if (tmp == NULL) {
- return NULL;
- }
- }
-
- /*
- * If op supplies the __array__ function.
- * The documentation says this should produce a copy, so
- * we skip this method if writeable is true, because the intent
- * of writeable is to modify the operand.
- * XXX: If the implementation is wrong, and/or if actual
- * usage requires this behave differently,
- * this should be changed!
- */
- if (!writeable && tmp == Py_NotImplemented) {
- tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
- if (tmp == NULL) {
- return NULL;
- }
- }
-
- if (tmp != Py_NotImplemented) {
- if (writeable
- && PyArray_FailUnlessWriteable((PyArrayObject *) tmp,
- "array interface object") < 0) {
- Py_DECREF(tmp);
- return NULL;
- }
- return tmp;
- }
-
- Py_INCREF(Py_NotImplemented);
- return Py_NotImplemented;
-}
-
-
/*NUMPY_API
* Retrieves the array parameters for viewing/converting an arbitrary
* PyObject* to a NumPy array. This allows the "innate type and shape"
@@ -1800,20 +1691,69 @@ PyArray_GetArrayParamsFromObject(PyObject *op,
return 0;
}
- /* If op is an array-like */
- tmp = _array_from_array_like(op, requested_dtype, writeable, context);
+ /* If op supports the PEP 3118 buffer interface */
+ if (!PyBytes_Check(op) && !PyUnicode_Check(op)) {
+
+ PyObject *memoryview = PyMemoryView_FromObject(op);
+ if (memoryview == NULL) {
+ PyErr_Clear();
+ }
+ else {
+ PyObject *arr = _array_from_buffer_3118(memoryview);
+ Py_DECREF(memoryview);
+ if (arr == NULL) {
+ return -1;
+ }
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)arr, "PEP 3118 buffer") < 0) {
+ Py_DECREF(arr);
+ return -1;
+ }
+ *out_arr = (PyArrayObject *)arr;
+ return 0;
+ }
+ }
+
+ /* If op supports the __array_struct__ or __array_interface__ interface */
+ tmp = PyArray_FromStructInterface(op);
if (tmp == NULL) {
return -1;
}
- else if (tmp != Py_NotImplemented) {
- *out_arr = (PyArrayObject*) tmp;
- return 0;
+ if (tmp == Py_NotImplemented) {
+ tmp = PyArray_FromInterface(op);
+ if (tmp == NULL) {
+ return -1;
+ }
}
- else {
- Py_DECREF(Py_NotImplemented);
+ if (tmp != Py_NotImplemented) {
+ if (writeable
+ && PyArray_FailUnlessWriteable((PyArrayObject *)tmp,
+ "array interface object") < 0) {
+ Py_DECREF(tmp);
+ return -1;
+ }
+ *out_arr = (PyArrayObject *)tmp;
+ return (*out_arr) == NULL ? -1 : 0;
+ }
+
+ /*
+ * If op supplies the __array__ function.
+ * The documentation says this should produce a copy, so
+ * we skip this method if writeable is true, because the intent
+ * of writeable is to modify the operand.
+ * XXX: If the implementation is wrong, and/or if actual
+ * usage requires this behave differently,
+ * this should be changed!
+ */
+ if (!writeable) {
+ tmp = PyArray_FromArrayAttr(op, requested_dtype, context);
+ if (tmp != Py_NotImplemented) {
+ *out_arr = (PyArrayObject *)tmp;
+ return (*out_arr) == NULL ? -1 : 0;
+ }
}
- /* Try to treat op as a list of lists or array-like objects. */
+ /* Try to treat op as a list of lists */
if (!writeable && PySequence_Check(op)) {
int check_it, stop_at_string, stop_at_tuple;
int type_num, type;
diff --git a/numpy/core/src/multiarray/descriptor.c b/numpy/core/src/multiarray/descriptor.c
index 522b69307..d4e18e457 100644
--- a/numpy/core/src/multiarray/descriptor.c
+++ b/numpy/core/src/multiarray/descriptor.c
@@ -19,7 +19,7 @@
#include "descriptor.h"
#include "alloc.h"
#include "assert.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/*
* offset: A starting offset.
diff --git a/numpy/core/src/multiarray/getset.c b/numpy/core/src/multiarray/getset.c
index 116e37ce5..6e5d480d0 100644
--- a/numpy/core/src/multiarray/getset.c
+++ b/numpy/core/src/multiarray/getset.c
@@ -20,7 +20,7 @@
#include "arrayobject.h"
#include "mem_overlap.h"
#include "alloc.h"
-#include "buffer.h"
+#include "npy_buffer.h"
/******************* array attribute get and set routines ******************/
diff --git a/numpy/core/src/multiarray/multiarraymodule.c b/numpy/core/src/multiarray/multiarraymodule.c
index ab70367c5..9169814c2 100644
--- a/numpy/core/src/multiarray/multiarraymodule.c
+++ b/numpy/core/src/multiarray/multiarraymodule.c
@@ -1115,6 +1115,14 @@ _pyarray_correlate(PyArrayObject *ap1, PyArrayObject *ap2, int typenum,
n1 = PyArray_DIMS(ap1)[0];
n2 = PyArray_DIMS(ap2)[0];
+ if (n1 == 0) {
+ PyErr_SetString(PyExc_ValueError, "first array argument cannot be empty");
+ return NULL;
+ }
+ if (n2 == 0) {
+ PyErr_SetString(PyExc_ValueError, "second array argument cannot be empty");
+ return NULL;
+ }
if (n1 < n2) {
ret = ap1;
ap1 = ap2;
diff --git a/numpy/core/src/multiarray/buffer.h b/numpy/core/src/multiarray/npy_buffer.h
index fae413c85..fae413c85 100644
--- a/numpy/core/src/multiarray/buffer.h
+++ b/numpy/core/src/multiarray/npy_buffer.h
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index 9adca6773..5da7f7738 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -28,7 +28,7 @@
#include "npy_import.h"
#include "dragon4.h"
#include "npy_longdouble.h"
-#include "buffer.h"
+#include "npy_buffer.h"
#include <stdlib.h>
@@ -4060,8 +4060,11 @@ initialize_casting_tables(void)
_npy_can_cast_safely_table[_FROM_NUM][NPY_STRING] = 1;
_npy_can_cast_safely_table[_FROM_NUM][NPY_UNICODE] = 1;
- /* Allow casts from any integer to the TIMEDELTA type */
-#if @from_isint@ || @from_isuint@
+#if @from_isint@ && NPY_SIZEOF_TIMEDELTA >= _FROM_BSIZE
+ /* Allow casts from smaller or equal signed integers to the TIMEDELTA type */
+ _npy_can_cast_safely_table[_FROM_NUM][NPY_TIMEDELTA] = 1;
+#elif @from_isuint@ && NPY_SIZEOF_TIMEDELTA > _FROM_BSIZE
+ /* Allow casts from smaller unsigned integers to the TIMEDELTA type */
_npy_can_cast_safely_table[_FROM_NUM][NPY_TIMEDELTA] = 1;
#endif
diff --git a/numpy/core/src/npysort/npysort_common.h b/numpy/core/src/npysort/npysort_common.h
index 5fd03b96f..30c0d47f3 100644
--- a/numpy/core/src/npysort/npysort_common.h
+++ b/numpy/core/src/npysort/npysort_common.h
@@ -329,6 +329,14 @@ UNICODE_LT(const npy_ucs4 *s1, const npy_ucs4 *s2, size_t len)
NPY_INLINE static int
DATETIME_LT(npy_datetime a, npy_datetime b)
{
+ if (a == NPY_DATETIME_NAT) {
+ return 0;
+ }
+
+ if (b == NPY_DATETIME_NAT) {
+ return 1;
+ }
+
return a < b;
}
diff --git a/numpy/core/src/umath/loops.c.src b/numpy/core/src/umath/loops.c.src
index e6d8eca0d..32aac3ff7 100644
--- a/numpy/core/src/umath/loops.c.src
+++ b/numpy/core/src/umath/loops.c.src
@@ -54,210 +54,123 @@
** GENERIC FLOAT LOOPS **
*****************************************************************************/
+/* direct loops using a suitable callback */
-typedef float halfUnaryFunc(npy_half x);
-typedef float floatUnaryFunc(float x);
-typedef double doubleUnaryFunc(double x);
-typedef npy_longdouble longdoubleUnaryFunc(npy_longdouble x);
-typedef npy_half halfBinaryFunc(npy_half x, npy_half y);
-typedef float floatBinaryFunc(float x, float y);
-typedef double doubleBinaryFunc(double x, double y);
-typedef npy_longdouble longdoubleBinaryFunc(npy_longdouble x, npy_longdouble y);
-
+/**begin repeat
+ * #c = e, f, d, g#
+ * #type = npy_half, npy_float, npy_double, npy_longdouble#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_e_e(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- halfUnaryFunc *f = (halfUnaryFunc *)func;
+ typedef @type@ func_type(@type@);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- const npy_half in1 = *(npy_half *)ip1;
- *(npy_half *)op1 = f(in1);
+ const @type@ in1 = *(@type@ *)ip1;
+ *(@type@ *)op1 = f(in1);
}
}
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_e_e_As_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- floatUnaryFunc *f = (floatUnaryFunc *)func;
- UNARY_LOOP {
- const float in1 = npy_half_to_float(*(npy_half *)ip1);
- *(npy_half *)op1 = npy_float_to_half(f(in1));
+ typedef @type@ func_type(@type@, @type@);
+ func_type *f = (func_type *)func;
+ BINARY_LOOP {
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ in2 = *(@type@ *)ip2;
+ *(@type@ *)op1 = f(in1, in2);
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_e_e_As_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
- UNARY_LOOP {
- const double in1 = npy_half_to_double(*(npy_half *)ip1);
- *(npy_half *)op1 = npy_double_to_half(f(in1));
- }
-}
+/**end repeat**/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_f_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatUnaryFunc *f = (floatUnaryFunc *)func;
- UNARY_LOOP {
- const float in1 = *(float *)ip1;
- *(float *)op1 = f(in1);
- }
-}
+/* indirect loops with casting */
+/**begin repeat
+ * #c1 = e, e, f#
+ * #type1 = npy_half, npy_half, npy_float#
+ * #c2 = f, d, d#
+ * #type2 = npy_float, npy_double, npy_double#
+ *
+ * #conv12 = npy_half_to_float, npy_half_to_double, (double)#
+ * #conv21 = npy_float_to_half, npy_double_to_half, (float)#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_f_f_As_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c1@_@c1@_As_@c2@_@c2@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
+ typedef @type2@ func_type(@type2@);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- const float in1 = *(float *)ip1;
- *(float *)op1 = (float)f((double)in1);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- halfBinaryFunc *f = (halfBinaryFunc *)func;
- BINARY_LOOP {
- npy_half in1 = *(npy_half *)ip1;
- npy_half in2 = *(npy_half *)ip2;
- *(npy_half *)op1 = f(in1, in2);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e_As_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- floatBinaryFunc *f = (floatBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = npy_half_to_float(*(npy_half *)ip1);
- float in2 = npy_half_to_float(*(npy_half *)ip2);
- *(npy_half *)op1 = npy_float_to_half(f(in1, in2));
+ const @type2@ in1 = @conv12@(*(@type1@ *)ip1);
+ *(@type1@ *)op1 = @conv21@(f(in1));
}
}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ee_e_As_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- double in1 = npy_half_to_double(*(npy_half *)ip1);
- double in2 = npy_half_to_double(*(npy_half *)ip2);
- *(npy_half *)op1 = npy_double_to_half(f(in1, in2));
- }
-}
-
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_ff_f(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c1@@c1@_@c1@_As_@c2@@c2@_@c2@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- floatBinaryFunc *f = (floatBinaryFunc *)func;
+ typedef @type2@ func_type(@type2@, @type2@);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
- float in1 = *(float *)ip1;
- float in2 = *(float *)ip2;
- *(float *)op1 = f(in1, in2);
+ const @type2@ in1 = @conv12@(*(@type1@ *)ip1);
+ const @type2@ in2 = @conv12@(*(@type1@ *)ip2);
+ *(@type1@ *)op1 = @conv21@(f(in1, in2));
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_ff_f_As_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- float in1 = *(float *)ip1;
- float in2 = *(float *)ip2;
- *(float *)op1 = (double)f((double)in1, (double)in2);
- }
-}
+/**end repeat**/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_d_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleUnaryFunc *f = (doubleUnaryFunc *)func;
- UNARY_LOOP {
- double in1 = *(double *)ip1;
- *(double *)op1 = f(in1);
- }
-}
+/******************************************************************************
+ ** GENERIC COMPLEX LOOPS **
+ *****************************************************************************/
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_dd_d(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- doubleBinaryFunc *f = (doubleBinaryFunc *)func;
- BINARY_LOOP {
- double in1 = *(double *)ip1;
- double in2 = *(double *)ip2;
- *(double *)op1 = f(in1, in2);
- }
-}
+/* direct loops using a suitable callback */
+/**begin repeat
+ * #c = F, D, G#
+ * #type = npy_cfloat, npy_cdouble, npy_clongdouble#
+ **/
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_g_g(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- longdoubleUnaryFunc *f = (longdoubleUnaryFunc *)func;
+ typedef void func_type(@type@ *, @type@ *);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
- npy_longdouble in1 = *(npy_longdouble *)ip1;
- *(npy_longdouble *)op1 = f(in1);
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ *out = (@type@ *)op1;
+ f(&in1, out);
}
}
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_gg_g(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
+PyUFunc_@c@@c@_@c@(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- longdoubleBinaryFunc *f = (longdoubleBinaryFunc *)func;
+ typedef void func_type(@type@ *, @type@ *, @type@ *);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
- npy_longdouble in1 = *(npy_longdouble *)ip1;
- npy_longdouble in2 = *(npy_longdouble *)ip2;
- *(npy_longdouble *)op1 = f(in1, in2);
+ @type@ in1 = *(@type@ *)ip1;
+ @type@ in2 = *(@type@ *)ip2;
+ @type@ *out = (@type@ *)op1;
+ f(&in1, &in2, out);
}
}
+/**end repeat**/
-
-/******************************************************************************
- ** GENERIC COMPLEX LOOPS **
- *****************************************************************************/
-
-
-typedef void cdoubleUnaryFunc(npy_cdouble *x, npy_cdouble *r);
-typedef void cfloatUnaryFunc(npy_cfloat *x, npy_cfloat *r);
-typedef void clongdoubleUnaryFunc(npy_clongdouble *x, npy_clongdouble *r);
-typedef void cdoubleBinaryFunc(npy_cdouble *x, npy_cdouble *y, npy_cdouble *r);
-typedef void cfloatBinaryFunc(npy_cfloat *x, npy_cfloat *y, npy_cfloat *r);
-typedef void clongdoubleBinaryFunc(npy_clongdouble *x, npy_clongdouble *y,
- npy_clongdouble *r);
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_F_F(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cfloatUnaryFunc *f = (cfloatUnaryFunc *)func;
- UNARY_LOOP {
- npy_cfloat in1 = *(npy_cfloat *)ip1;
- npy_cfloat *out = (npy_cfloat *)op1;
- f(&in1, out);
- }
-}
-
+/* indirect loops with casting */
/*UFUNC_API*/
NPY_NO_EXPORT void
PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func;
+ typedef void func_type(npy_cdouble *, npy_cdouble *);
+ func_type *f = (func_type *)func;
UNARY_LOOP {
npy_cdouble tmp, out;
tmp.real = (double)((float *)ip1)[0];
@@ -270,22 +183,10 @@ PyUFunc_F_F_As_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *fun
/*UFUNC_API*/
NPY_NO_EXPORT void
-PyUFunc_FF_F(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cfloatBinaryFunc *f = (cfloatBinaryFunc *)func;
- BINARY_LOOP {
- npy_cfloat in1 = *(npy_cfloat *)ip1;
- npy_cfloat in2 = *(npy_cfloat *)ip2;
- npy_cfloat *out = (npy_cfloat *)op1;
- f(&in1, &in2, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
{
- cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func;
+ typedef void func_type(npy_cdouble *, npy_cdouble *, npy_cdouble *);
+ func_type *f = (func_type *)func;
BINARY_LOOP {
npy_cdouble tmp1, tmp2, out;
tmp1.real = (double)((float *)ip1)[0];
@@ -298,56 +199,6 @@ PyUFunc_FF_F_As_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *f
}
}
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_D_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleUnaryFunc *f = (cdoubleUnaryFunc *)func;
- UNARY_LOOP {
- npy_cdouble in1 = *(npy_cdouble *)ip1;
- npy_cdouble *out = (npy_cdouble *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_DD_D(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- cdoubleBinaryFunc *f = (cdoubleBinaryFunc *)func;
- BINARY_LOOP {
- npy_cdouble in1 = *(npy_cdouble *)ip1;
- npy_cdouble in2 = *(npy_cdouble *)ip2;
- npy_cdouble *out = (npy_cdouble *)op1;
- f(&in1, &in2, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_G_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- clongdoubleUnaryFunc *f = (clongdoubleUnaryFunc *)func;
- UNARY_LOOP {
- npy_clongdouble in1 = *(npy_clongdouble *)ip1;
- npy_clongdouble *out = (npy_clongdouble *)op1;
- f(&in1, out);
- }
-}
-
-/*UFUNC_API*/
-NPY_NO_EXPORT void
-PyUFunc_GG_G(char **args, npy_intp *dimensions, npy_intp *steps, void *func)
-{
- clongdoubleBinaryFunc *f = (clongdoubleBinaryFunc *)func;
- BINARY_LOOP {
- npy_clongdouble in1 = *(npy_clongdouble *)ip1;
- npy_clongdouble in2 = *(npy_clongdouble *)ip2;
- npy_clongdouble *out = (npy_clongdouble *)op1;
- f(&in1, &in2, out);
- }
-}
-
/******************************************************************************
** GENERIC OBJECT lOOPS **
@@ -1246,6 +1097,12 @@ NPY_NO_EXPORT void
}
NPY_NO_EXPORT void
+@TYPE@_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ UNARY_LOOP_FAST(npy_bool, npy_bool, (void)in; *out = NPY_FALSE);
+}
+
+NPY_NO_EXPORT void
@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data))
{
OUTPUT_LOOP {
@@ -1306,6 +1163,29 @@ NPY_NO_EXPORT void
}
/**end repeat1**/
+/**begin repeat1
+ * #kind = fmax, fmin#
+ * #OP = >=, <=#
+ **/
+NPY_NO_EXPORT void
+@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func))
+{
+ BINARY_LOOP {
+ const @type@ in1 = *(@type@ *)ip1;
+ const @type@ in2 = *(@type@ *)ip2;
+ if (in1 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in2;
+ }
+ else if (in2 == NPY_DATETIME_NAT) {
+ *((@type@ *)op1) = in1;
+ }
+ else {
+ *((@type@ *)op1) = in1 @OP@ in2 ? in1 : in2;
+ }
+ }
+}
+/**end repeat1**/
+
/**end repeat**/
NPY_NO_EXPORT void
diff --git a/numpy/core/src/umath/loops.h.src b/numpy/core/src/umath/loops.h.src
index 0ef14a809..7558de0bb 100644
--- a/numpy/core/src/umath/loops.h.src
+++ b/numpy/core/src/umath/loops.h.src
@@ -478,6 +478,11 @@ NPY_NO_EXPORT void
@TYPE@_isfinite(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
NPY_NO_EXPORT void
+@TYPE@_isinf(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
+
+#define @TYPE@_isnan @TYPE@_isnat
+
+NPY_NO_EXPORT void
@TYPE@__ones_like(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(data));
/**begin repeat1
@@ -489,8 +494,7 @@ NPY_NO_EXPORT void
/**end repeat1**/
/**begin repeat1
- * #kind = maximum, minimum#
- * #OP = >, <#
+ * #kind = maximum, minimum, fmin, fmax#
**/
NPY_NO_EXPORT void
@TYPE@_@kind@(char **args, npy_intp *dimensions, npy_intp *steps, void *NPY_UNUSED(func));
@@ -554,10 +558,6 @@ TIMEDELTA_mm_qm_divmod(char **args, npy_intp *dimensions, npy_intp *steps, void
#define TIMEDELTA_mq_m_floor_divide TIMEDELTA_mq_m_divide
#define TIMEDELTA_md_m_floor_divide TIMEDELTA_md_m_divide
/* #define TIMEDELTA_mm_d_floor_divide TIMEDELTA_mm_d_divide */
-#define TIMEDELTA_fmin TIMEDELTA_minimum
-#define TIMEDELTA_fmax TIMEDELTA_maximum
-#define DATETIME_fmin DATETIME_minimum
-#define DATETIME_fmax DATETIME_maximum
/*
*****************************************************************************
diff --git a/numpy/core/tests/test_api.py b/numpy/core/tests/test_api.py
index 32e2ea537..89fc2b0b9 100644
--- a/numpy/core/tests/test_api.py
+++ b/numpy/core/tests/test_api.py
@@ -296,7 +296,7 @@ def test_array_astype():
)
def test_array_astype_warning(t):
# test ComplexWarning when casting from complex to float or int
- a = np.array(10, dtype=np.complex)
+ a = np.array(10, dtype=np.complex_)
assert_warns(np.ComplexWarning, a.astype, t)
def test_copyto_fromscalar():
diff --git a/numpy/core/tests/test_datetime.py b/numpy/core/tests/test_datetime.py
index a756dc7e7..41b84a69f 100644
--- a/numpy/core/tests/test_datetime.py
+++ b/numpy/core/tests/test_datetime.py
@@ -75,6 +75,15 @@ class TestDateTime(object):
# Can cast safely/same_kind from integer to timedelta
assert_(np.can_cast('i8', 'm8', casting='same_kind'))
assert_(np.can_cast('i8', 'm8', casting='safe'))
+ assert_(np.can_cast('i4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('i4', 'm8', casting='safe'))
+ assert_(np.can_cast('u4', 'm8', casting='same_kind'))
+ assert_(np.can_cast('u4', 'm8', casting='safe'))
+
+ # Cannot cast safely from unsigned integer of the same size, which
+ # could overflow
+ assert_(np.can_cast('u8', 'm8', casting='same_kind'))
+ assert_(not np.can_cast('u8', 'm8', casting='safe'))
# Cannot cast safely/same_kind from float to timedelta
assert_(not np.can_cast('f4', 'm8', casting='same_kind'))
@@ -136,6 +145,38 @@ class TestDateTime(object):
assert_(np.datetime64('NaT') != np.datetime64('NaT', 'us'))
assert_(np.datetime64('NaT', 'us') != np.datetime64('NaT'))
+
+
+ @pytest.mark.parametrize("size", [
+ 3, 21, 217, 1000])
+ def test_nat_argsort_stability(self, size):
+ # NaT < NaT should be False internally for
+ # sort stability
+ expected = np.arange(size)
+ arr = np.tile(np.datetime64('NaT'), size)
+ assert_equal(np.argsort(arr, kind='mergesort'), expected)
+
+ @pytest.mark.parametrize("arr, expected", [
+ # the example provided in gh-12629
+ (np.array(['NaT', 1, 2, 3], dtype='M8[ns]'),
+ np.array([1, 2, 3, 'NaT'], dtype='M8[ns]')),
+ # multiple NaTs
+ (np.array(['NaT', 9, 'NaT', -707], dtype='M8[s]'),
+ np.array([-707, 9, 'NaT', 'NaT'], dtype='M8[s]')),
+ # this sort explores another code path for NaT
+ (np.array([1, -2, 3, 'NaT'], dtype='M8[ns]'),
+ np.array([-2, 1, 3, 'NaT'], dtype='M8[ns]')),
+ # 2-D array
+ (np.array([[51, -220, 'NaT'],
+ [-17, 'NaT', -90]], dtype='M8[us]'),
+ np.array([[-220, 51, 'NaT'],
+ [-90, -17, 'NaT']], dtype='M8[us]')),
+ ])
+ def test_sort_nat(self, arr, expected):
+ # fix for gh-12629; NaT sorting to end of array
+ arr.sort()
+ assert_equal(arr, expected)
+
def test_datetime_scalar_construction(self):
# Construct with different units
assert_equal(np.datetime64('1950-03-12', 'D'),
@@ -1361,6 +1402,10 @@ class TestDateTime(object):
assert_equal(np.minimum(dtnat, a), dtnat)
assert_equal(np.maximum(a, dtnat), dtnat)
assert_equal(np.maximum(dtnat, a), dtnat)
+ assert_equal(np.fmin(dtnat, a), a)
+ assert_equal(np.fmin(a, dtnat), a)
+ assert_equal(np.fmax(dtnat, a), a)
+ assert_equal(np.fmax(a, dtnat), a)
# Also do timedelta
a = np.array(3, dtype='m8[h]')
@@ -2232,7 +2277,7 @@ class TestDateTime(object):
continue
assert_raises(TypeError, np.isnat, np.zeros(10, t))
- def test_isfinite(self):
+ def test_isfinite_scalar(self):
assert_(not np.isfinite(np.datetime64('NaT', 'ms')))
assert_(not np.isfinite(np.datetime64('NaT', 'ns')))
assert_(np.isfinite(np.datetime64('2038-01-19T03:14:07')))
@@ -2240,18 +2285,25 @@ class TestDateTime(object):
assert_(not np.isfinite(np.timedelta64('NaT', "ms")))
assert_(np.isfinite(np.timedelta64(34, "ms")))
- res = np.array([True, True, False])
- for unit in ['Y', 'M', 'W', 'D',
- 'h', 'm', 's', 'ms', 'us',
- 'ns', 'ps', 'fs', 'as']:
- arr = np.array([123, -321, "NaT"], dtype='<datetime64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='>datetime64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='<timedelta64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
- arr = np.array([123, -321, "NaT"], dtype='>timedelta64[%s]' % unit)
- assert_equal(np.isfinite(arr), res)
+ @pytest.mark.parametrize('unit', ['Y', 'M', 'W', 'D', 'h', 'm', 's', 'ms',
+ 'us', 'ns', 'ps', 'fs', 'as'])
+ @pytest.mark.parametrize('dstr', ['<datetime64[%s]', '>datetime64[%s]',
+ '<timedelta64[%s]', '>timedelta64[%s]'])
+ def test_isfinite_isinf_isnan_units(self, unit, dstr):
+ '''check isfinite, isinf, isnan for all units of <M, >M, <m, >m dtypes
+ '''
+ arr_val = [123, -321, "NaT"]
+ arr = np.array(arr_val, dtype= dstr % unit)
+ pos = np.array([True, True, False])
+ neg = np.array([False, False, True])
+ false = np.array([False, False, False])
+ assert_equal(np.isfinite(arr), pos)
+ assert_equal(np.isinf(arr), false)
+ assert_equal(np.isnan(arr), neg)
+
+ def test_assert_equal(self):
+ assert_raises(AssertionError, assert_equal,
+ np.datetime64('nat'), np.timedelta64('nat'))
def test_corecursive_input(self):
# construct a co-recursive list
diff --git a/numpy/core/tests/test_deprecations.py b/numpy/core/tests/test_deprecations.py
index 214372524..de1d76857 100644
--- a/numpy/core/tests/test_deprecations.py
+++ b/numpy/core/tests/test_deprecations.py
@@ -172,7 +172,7 @@ class TestComparisonDeprecations(_DeprecationTestCase):
# (warning is issued a couple of times here)
self.assert_deprecated(op, args=(a, a[:-1]), num=None)
- # Element comparison error (numpy array can't be compared).
+ # ragged array comparison returns True/False
a = np.array([1, np.array([1,2,3])], dtype=object)
b = np.array([1, np.array([1,2,3])], dtype=object)
self.assert_deprecated(op, args=(a, b), num=None)
diff --git a/numpy/core/tests/test_dtype.py b/numpy/core/tests/test_dtype.py
index d2fbbae5b..e18e66c64 100644
--- a/numpy/core/tests/test_dtype.py
+++ b/numpy/core/tests/test_dtype.py
@@ -25,7 +25,7 @@ def assert_dtype_not_equal(a, b):
class TestBuiltin(object):
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
- np.unicode])
+ np.compat.unicode])
def test_run(self, t):
"""Only test hash runs at all."""
dt = np.dtype(t)
@@ -986,7 +986,7 @@ class TestPickling(object):
assert_equal(x[0], y[0])
@pytest.mark.parametrize('t', [int, float, complex, np.int32, str, object,
- np.unicode, bool])
+ np.compat.unicode, bool])
def test_builtin(self, t):
self.check_pickling(np.dtype(t))
diff --git a/numpy/core/tests/test_function_base.py b/numpy/core/tests/test_function_base.py
index 84b60b19c..c8a7cb6ce 100644
--- a/numpy/core/tests/test_function_base.py
+++ b/numpy/core/tests/test_function_base.py
@@ -351,14 +351,20 @@ class TestLinspace(object):
arange(j+1, dtype=int))
def test_retstep(self):
- y = linspace(0, 1, 2, retstep=True)
- assert_(isinstance(y, tuple) and len(y) == 2)
- for num in (0, 1):
- for ept in (False, True):
+ for num in [0, 1, 2]:
+ for ept in [False, True]:
y = linspace(0, 1, num, endpoint=ept, retstep=True)
- assert_(isinstance(y, tuple) and len(y) == 2 and
- len(y[0]) == num and isnan(y[1]),
- 'num={0}, endpoint={1}'.format(num, ept))
+ assert isinstance(y, tuple) and len(y) == 2
+ if num == 2:
+ y0_expect = [0.0, 1.0] if ept else [0.0, 0.5]
+ assert_array_equal(y[0], y0_expect)
+ assert_equal(y[1], y0_expect[1])
+ elif num == 1 and not ept:
+ assert_array_equal(y[0], [0.0])
+ assert_equal(y[1], 1.0)
+ else:
+ assert_array_equal(y[0], [0.0][:num])
+ assert isnan(y[1])
def test_object(self):
start = array(1, dtype='O')
diff --git a/numpy/core/tests/test_longdouble.py b/numpy/core/tests/test_longdouble.py
index 59ac5923c..2b6e1c5a2 100644
--- a/numpy/core/tests/test_longdouble.py
+++ b/numpy/core/tests/test_longdouble.py
@@ -71,6 +71,38 @@ def test_fromstring():
err_msg="reading '%s'" % s)
+def test_fromstring_complex():
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator
+ assert_equal(np.fromstring("1, 2 , 3 ,4", sep=",", dtype=ctype),
+ np.array([1., 2., 3., 4.]))
+ # Real component not specified
+ assert_equal(np.fromstring("1j, -2j, 3j, 4e1j", sep=",", dtype=ctype),
+ np.array([1.j, -2.j, 3.j, 40.j]))
+ # Both components specified
+ assert_equal(np.fromstring("1+1j,2-2j, -3+3j, -4e1+4j", sep=",", dtype=ctype),
+ np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+ # Spaces at wrong places
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+2 j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+ 2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1 +2j,3", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+j", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1+", dtype=ctype, sep=","),
+ np.array([1.]))
+ with assert_warns(DeprecationWarning):
+ assert_equal(np.fromstring("1j+1", dtype=ctype, sep=","),
+ np.array([1j]))
+
+
def test_fromstring_bogus():
with assert_warns(DeprecationWarning):
assert_equal(np.fromstring("1. 2. 3. flop 4.", dtype=float, sep=" "),
@@ -104,6 +136,88 @@ class TestFileBased(object):
res = np.fromfile(path, dtype=float, sep=" ")
assert_equal(res, np.array([1., 2., 3.]))
+ def test_fromfile_complex(self):
+ for ctype in ["complex", "cdouble", "cfloat"]:
+ # Check spacing between separator and only real component specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1, 2 , 3 ,4\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1., 2., 3., 4.]))
+
+ # Real component not specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j, -2j, 3j, 4e1j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j, -2.j, 3.j, 40.j]))
+
+ # Both components specified
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+1j,2-2j, -3+3j, -4e1+4j\n")
+
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1. + 1.j, 2. - 2.j, - 3. + 3.j, - 40. + 4j]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+2 j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+ 2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1 +2j,3\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+j\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1+\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.]))
+
+ # Spaces at wrong places
+ with temppath() as path:
+ with open(path, 'wt') as f:
+ f.write("1j+1\n")
+
+ with assert_warns(DeprecationWarning):
+ res = np.fromfile(path, dtype=ctype, sep=",")
+ assert_equal(res, np.array([1.j]))
+
+
+
@pytest.mark.skipif(string_to_longdouble_inaccurate,
reason="Need strtold_l")
def test_fromfile(self):
diff --git a/numpy/core/tests/test_multiarray.py b/numpy/core/tests/test_multiarray.py
index 59afe8e5e..0235c5063 100644
--- a/numpy/core/tests/test_multiarray.py
+++ b/numpy/core/tests/test_multiarray.py
@@ -20,6 +20,7 @@ import gc
import weakref
import pytest
from contextlib import contextmanager
+from test.support import no_tracing
from numpy.compat import pickle
@@ -964,34 +965,11 @@ class TestCreation(object):
@pytest.mark.skipif(sys.version_info[0] >= 3, reason="Not Python 2")
def test_sequence_long(self):
- assert_equal(np.array([long(4), long(4)]).dtype, np.long)
+ assert_equal(np.array([long(4), long(4)]).dtype, long)
assert_equal(np.array([long(4), 2**80]).dtype, object)
assert_equal(np.array([long(4), 2**80, long(4)]).dtype, object)
assert_equal(np.array([2**80, long(4)]).dtype, object)
- def test_sequence_of_array_like(self):
- class ArrayLike:
- def __init__(self):
- self.__array_interface__ = {
- "shape": (42,),
- "typestr": "<i1",
- "data": bytes(42)
- }
-
- # Make sure __array_*__ is used instead of Sequence methods.
- def __iter__(self):
- raise AssertionError("__iter__ was called")
-
- def __getitem__(self, idx):
- raise AssertionError("__getitem__ was called")
-
- def __len__(self):
- return 42
-
- assert_equal(
- np.array([ArrayLike()]),
- np.zeros((1, 42), dtype=np.byte))
-
def test_non_sequence_sequence(self):
"""Should not segfault.
@@ -1833,7 +1811,7 @@ class TestMethods(object):
# test unicode sorts.
s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1].copy()
for kind in self.sort_kinds:
msg = "unicode sort, kind=%s" % kind
@@ -2085,7 +2063,7 @@ class TestMethods(object):
# test unicode argsorts.
s = 'aaaaaaaa'
- a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode)
+ a = np.array([s + chr(i) for i in range(101)], dtype=np.unicode_)
b = a[::-1]
r = np.arange(101)
rr = r[::-1]
@@ -2168,7 +2146,7 @@ class TestMethods(object):
a = np.array(['aaaaaaaaa' for i in range(100)])
assert_equal(a.argsort(kind='m'), r)
# unicode
- a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode)
+ a = np.array(['aaaaaaaaa' for i in range(100)], dtype=np.unicode_)
assert_equal(a.argsort(kind='m'), r)
def test_sort_unicode_kind(self):
@@ -2297,7 +2275,7 @@ class TestMethods(object):
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100197_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100198_1',
'P:\\20x_dapi_cy3\\20x_dapi_cy3_20100199_1'],
- dtype=np.unicode)
+ dtype=np.unicode_)
ind = np.arange(len(a))
assert_equal([a.searchsorted(v, 'left') for v in a], ind)
assert_equal([a.searchsorted(v, 'right') for v in a], ind + 1)
@@ -5145,6 +5123,8 @@ class TestFlat(object):
class TestResize(object):
+
+ @no_tracing
def test_basic(self):
x = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
if IS_PYPY:
@@ -5161,6 +5141,7 @@ class TestResize(object):
assert_raises(ValueError, x.resize, (5, 1))
del y # avoid pyflakes unused variable warning.
+ @no_tracing
def test_int_shape(self):
x = np.eye(3)
if IS_PYPY:
@@ -5194,6 +5175,7 @@ class TestResize(object):
assert_raises(TypeError, np.eye(3).resize, order=1)
assert_raises(TypeError, np.eye(3).resize, refcheck='hi')
+ @no_tracing
def test_freeform_shape(self):
x = np.eye(3)
if IS_PYPY:
@@ -5202,6 +5184,7 @@ class TestResize(object):
x.resize(3, 2, 1)
assert_(x.shape == (3, 2, 1))
+ @no_tracing
def test_zeros_appended(self):
x = np.eye(3)
if IS_PYPY:
@@ -5211,6 +5194,7 @@ class TestResize(object):
assert_array_equal(x[0], np.eye(3))
assert_array_equal(x[1], np.zeros((3, 3)))
+ @no_tracing
def test_obj_obj(self):
# check memory is initialized on resize, gh-4857
a = np.ones(10, dtype=[('k', object, 2)])
@@ -7822,6 +7806,7 @@ if not IS_PYPY:
d = np.ones(100)
assert_(sys.getsizeof(d) < sys.getsizeof(d.reshape(100, 1, 1).copy()))
+ @no_tracing
def test_resize(self):
d = np.ones(100)
old = sys.getsizeof(d)
@@ -7956,20 +7941,20 @@ class TestBytestringArrayNonzero(object):
class TestUnicodeArrayNonzero(object):
def test_empty_ustring_array_is_falsey(self):
- assert_(not np.array([''], dtype=np.unicode))
+ assert_(not np.array([''], dtype=np.unicode_))
def test_whitespace_ustring_array_is_falsey(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0\0'
assert_(not a)
def test_all_null_ustring_array_is_falsey(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = '\0\0\0\0'
assert_(not a)
def test_null_inside_ustring_array_is_truthy(self):
- a = np.array(['eggs'], dtype=np.unicode)
+ a = np.array(['eggs'], dtype=np.unicode_)
a[0] = ' \0 \0'
assert_(a)
diff --git a/numpy/core/tests/test_nditer.py b/numpy/core/tests/test_nditer.py
index cf66751f8..daec9ce6d 100644
--- a/numpy/core/tests/test_nditer.py
+++ b/numpy/core/tests/test_nditer.py
@@ -2104,7 +2104,7 @@ def test_iter_buffering_string():
assert_equal(i[0], b'abc')
assert_equal(i[0].dtype, np.dtype('S6'))
- a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode)
+ a = np.array(['abc', 'a', 'abcd'], dtype=np.unicode_)
assert_equal(a.dtype, np.dtype('U4'))
assert_raises(TypeError, nditer, a, ['buffered'], ['readonly'],
op_dtypes='U2')
diff --git a/numpy/core/tests/test_numeric.py b/numpy/core/tests/test_numeric.py
index c0e20f420..1f8c7e98d 100644
--- a/numpy/core/tests/test_numeric.py
+++ b/numpy/core/tests/test_numeric.py
@@ -2567,6 +2567,11 @@ class TestCorrelate(object):
z = np.correlate(y, x, mode='full')
assert_array_almost_equal(z, r_z)
+ def test_zero_size(self):
+ with pytest.raises(ValueError):
+ np.correlate(np.array([]), np.ones(1000), mode='full')
+ with pytest.raises(ValueError):
+ np.correlate(np.ones(1000), np.array([]), mode='full')
class TestConvolve(object):
def test_object(self):
@@ -2948,7 +2953,7 @@ class TestIndices(object):
assert_array_equal(x, np.array([[0], [1], [2], [3]]))
assert_array_equal(y, np.array([[0, 1, 2]]))
- @pytest.mark.parametrize("dtype", [np.int, np.float32, np.float64])
+ @pytest.mark.parametrize("dtype", [np.int32, np.int64, np.float32, np.float64])
@pytest.mark.parametrize("dims", [(), (0,), (4, 3)])
def test_return_type(self, dtype, dims):
inds = np.indices(dims, dtype=dtype)
diff --git a/numpy/core/tests/test_regression.py b/numpy/core/tests/test_regression.py
index 444cbd79c..f3e170470 100644
--- a/numpy/core/tests/test_regression.py
+++ b/numpy/core/tests/test_regression.py
@@ -17,6 +17,7 @@ from numpy.testing import (
_assert_valid_refcount, HAS_REFCOUNT,
)
from numpy.compat import asbytes, asunicode, long, pickle
+from test.support import no_tracing
try:
RecursionError
@@ -1316,6 +1317,7 @@ class TestRegression(object):
assert_(pickle.loads(
pickle.dumps(test_record, protocol=proto)) == test_record)
+ @no_tracing
def test_blasdot_uninitialized_memory(self):
# Ticket #950
for m in [0, 1, 2]:
@@ -1511,7 +1513,7 @@ class TestRegression(object):
min //= -1
with np.errstate(divide="ignore"):
- for t in (np.int8, np.int16, np.int32, np.int64, int, np.long):
+ for t in (np.int8, np.int16, np.int32, np.int64, int, np.compat.long):
test_type(t)
def test_buffer_hashlib(self):
@@ -2112,7 +2114,7 @@ class TestRegression(object):
# Ticket #1578, the mismatch only showed up when running
# python-debug for python versions >= 2.7, and then as
# a core dump and error message.
- a = np.array(['abc'], dtype=np.unicode)[0]
+ a = np.array(['abc'], dtype=np.unicode_)[0]
del a
def test_refcount_error_in_clip(self):
@@ -2483,26 +2485,6 @@ class TestRegression(object):
np.array([T()])
- def test_2d__array__shape(self):
- class T(object):
- def __array__(self):
- return np.ndarray(shape=(0,0))
-
- # Make sure __array__ is used instead of Sequence methods.
- def __iter__(self):
- return iter([])
-
- def __getitem__(self, idx):
- raise AssertionError("__getitem__ was called")
-
- def __len__(self):
- return 0
-
-
- t = T()
- #gh-13659, would raise in broadcasting [x=t for x in result]
- np.array([t])
-
@pytest.mark.skipif(sys.maxsize < 2 ** 31 + 1, reason='overflows 32-bit python')
@pytest.mark.skipif(sys.platform == 'win32' and sys.version_info[:2] < (3, 8),
reason='overflows on windows, fixed in bpo-16865')
diff --git a/numpy/core/tests/test_scalarinherit.py b/numpy/core/tests/test_scalarinherit.py
index 9e32cf624..6a5c4fde9 100644
--- a/numpy/core/tests/test_scalarinherit.py
+++ b/numpy/core/tests/test_scalarinherit.py
@@ -68,8 +68,7 @@ class TestCharacter(object):
def test_char_repeat(self):
np_s = np.string_('abc')
np_u = np.unicode_('abc')
- np_i = np.int(5)
res_s = b'abc' * 5
res_u = u'abc' * 5
- assert_(np_s * np_i == res_s)
- assert_(np_u * np_i == res_u)
+ assert_(np_s * 5 == res_s)
+ assert_(np_u * 5 == res_u)
diff --git a/numpy/core/tests/test_scalarmath.py b/numpy/core/tests/test_scalarmath.py
index 854df5590..c84380cd9 100644
--- a/numpy/core/tests/test_scalarmath.py
+++ b/numpy/core/tests/test_scalarmath.py
@@ -11,7 +11,7 @@ import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_almost_equal,
assert_array_equal, IS_PYPY, suppress_warnings, _gen_alignment_data,
- assert_warns
+ assert_warns, assert_raises_regex,
)
types = [np.bool_, np.byte, np.ubyte, np.short, np.ushort, np.intc, np.uintc,
@@ -293,6 +293,16 @@ class TestModulus(object):
rem = operator.mod(finf, fone)
assert_(np.isnan(rem), 'dt: %s' % dt)
+ def test_inplace_floordiv_handling(self):
+ # issue gh-12927
+ # this only applies to in-place floordiv //=, because the output type
+ # promotes to float which does not fit
+ a = np.array([1, 2], np.int64)
+ b = np.array([1, 2], np.uint64)
+ pattern = 'could not be coerced to provided output parameter'
+ with assert_raises_regex(TypeError, pattern):
+ a //= b
+
class TestComplexDivision(object):
def test_zero_division(self):
diff --git a/numpy/core/tests/test_ufunc.py b/numpy/core/tests/test_ufunc.py
index a3b564b11..d0173c76d 100644
--- a/numpy/core/tests/test_ufunc.py
+++ b/numpy/core/tests/test_ufunc.py
@@ -980,7 +980,7 @@ class TestUfunc(object):
assert_array_equal(out, mm_row_col_vec.squeeze())
def test_matrix_multiply(self):
- self.compare_matrix_multiply_results(np.long)
+ self.compare_matrix_multiply_results(np.int64)
self.compare_matrix_multiply_results(np.double)
def test_matrix_multiply_umath_empty(self):
@@ -1092,7 +1092,6 @@ class TestUfunc(object):
arr0d = np.array(HasComparisons())
assert_equal(arr0d == arr0d, True)
assert_equal(np.equal(arr0d, arr0d), True) # normal behavior is a cast
- assert_equal(np.equal(arr0d, arr0d, dtype=object), '==')
arr1d = np.array([HasComparisons()])
assert_equal(arr1d == arr1d, np.array([True]))
diff --git a/numpy/core/tests/test_umath.py b/numpy/core/tests/test_umath.py
index 9b4ce9e47..e892e81d2 100644
--- a/numpy/core/tests/test_umath.py
+++ b/numpy/core/tests/test_umath.py
@@ -170,7 +170,7 @@ class TestOut(object):
class TestComparisons(object):
def test_ignore_object_identity_in_equal(self):
- # Check error raised when comparing identical objects whose comparison
+ # Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.equal, a, a)
@@ -188,7 +188,7 @@ class TestComparisons(object):
assert_equal(np.equal(a, a), [False])
def test_ignore_object_identity_in_not_equal(self):
- # Check error raised when comparing identical objects whose comparison
+ # Check comparing identical objects whose comparison
# is not a simple boolean, e.g., arrays that are compared elementwise.
a = np.array([np.array([1, 2, 3]), None], dtype=object)
assert_raises(ValueError, np.not_equal, a, a)
@@ -792,7 +792,7 @@ class TestAVXFloat32Transcendental(object):
def test_sincos_float32(self):
np.random.seed(42)
N = 1000000
- M = np.int(N/20)
+ M = np.int_(N/20)
index = np.random.randint(low=0, high=N, size=M)
x_f32 = np.float32(np.random.uniform(low=-100.,high=100.,size=N))
# test coverage for elements > 117435.992f for which glibc is used
diff --git a/numpy/core/tests/test_umath_accuracy.py b/numpy/core/tests/test_umath_accuracy.py
index 0bab04df2..fec180786 100644
--- a/numpy/core/tests/test_umath_accuracy.py
+++ b/numpy/core/tests/test_umath_accuracy.py
@@ -38,7 +38,7 @@ class TestAccuracy(object):
with open(filepath) as fid:
file_without_comments = (r for r in fid if not r[0] in ('$', '#'))
data = np.genfromtxt(file_without_comments,
- dtype=('|S39','|S39','|S39',np.int),
+ dtype=('|S39','|S39','|S39',int),
names=('type','input','output','ulperr'),
delimiter=',',
skip_header=1)
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index 7ba8ad862..d46ff8981 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -2329,8 +2329,11 @@ def generate_config_py(target):
extra_dll_dir = os.path.join(os.path.dirname(__file__), '.libs')
if sys.platform == 'win32' and os.path.isdir(extra_dll_dir):
- os.environ.setdefault('PATH', '')
- os.environ['PATH'] += os.pathsep + extra_dll_dir
+ if sys.version_info >= (3, 8):
+ os.add_dll_directory(extra_dll_dir)
+ else:
+ os.environ.setdefault('PATH', '')
+ os.environ['PATH'] += os.pathsep + extra_dll_dir
"""))
diff --git a/numpy/distutils/system_info.py b/numpy/distutils/system_info.py
index c2b3e118b..1bc1b1391 100644
--- a/numpy/distutils/system_info.py
+++ b/numpy/distutils/system_info.py
@@ -21,9 +21,12 @@ classes are available:
blas_info
lapack_info
openblas_info
+ openblas64__info
blis_info
blas_opt_info # usage recommended
lapack_opt_info # usage recommended
+ blas64__opt_info # usage recommended
+ lapack64__opt_info # usage recommended
fftw_info,dfftw_info,sfftw_info
fftw_threads_info,dfftw_threads_info,sfftw_threads_info
djbfft_info
@@ -406,6 +409,8 @@ def get_info(name, notfound_action=0):
'lapack_mkl': lapack_mkl_info, # use lapack_opt instead
'blas_mkl': blas_mkl_info, # use blas_opt instead
'accelerate': accelerate_info, # use blas_opt instead
+ 'openblas64_': openblas64__info,
+ 'openblas64__lapack': openblas64__lapack_info,
'x11': x11_info,
'fft_opt': fft_opt_info,
'fftw': fftw_info,
@@ -428,7 +433,9 @@ def get_info(name, notfound_action=0):
'numarray': numarray_info,
'numerix': numerix_info,
'lapack_opt': lapack_opt_info,
+ 'lapack64__opt': lapack64__opt_info,
'blas_opt': blas_opt_info,
+ 'blas64__opt': blas64__opt_info,
'boost_python': boost_python_info,
'agg2': agg2_info,
'wx': wx_info,
@@ -494,6 +501,14 @@ class LapackSrcNotFoundError(LapackNotFoundError):
the LAPACK_SRC environment variable."""
+class Lapack64_NotFoundError(NotFoundError):
+ """
+ 64-bit Lapack libraries with '64_' symbol suffix not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_
+ """
+
+
class BlasOptNotFoundError(NotFoundError):
"""
Optimized (vendor) Blas libraries are not found.
@@ -508,6 +523,12 @@ class BlasNotFoundError(NotFoundError):
numpy/distutils/site.cfg file (section [blas]) or by setting
the BLAS environment variable."""
+class Blas64_NotFoundError(NotFoundError):
+ """
+ 64-bit Blas libraries with '64_' symbol suffix not found.
+ Known libraries in numpy/distutils/site.cfg file are:
+ openblas64_
+ """
class BlasSrcNotFoundError(BlasNotFoundError):
"""
@@ -1600,10 +1621,10 @@ def get_atlas_version(**config):
class lapack_opt_info(system_info):
-
notfounderror = LapackNotFoundError
- # Default order of LAPACK checks
+ # List of all known BLAS libraries, in the default order
lapack_order = ['mkl', 'openblas', 'flame', 'atlas', 'accelerate', 'lapack']
+ order_env_var_name = 'NPY_LAPACK_ORDER'
def _calc_info_mkl(self):
info = get_info('lapack_mkl')
@@ -1696,7 +1717,7 @@ class lapack_opt_info(system_info):
return False
def calc_info(self):
- user_order = os.environ.get('NPY_LAPACK_ORDER', None)
+ user_order = os.environ.get(self.order_env_var_name, None)
if user_order is None:
lapack_order = self.lapack_order
else:
@@ -1725,12 +1746,24 @@ class lapack_opt_info(system_info):
warnings.warn(LapackNotFoundError.__doc__ or '', stacklevel=2)
warnings.warn(LapackSrcNotFoundError.__doc__ or '', stacklevel=2)
+class lapack64__opt_info(lapack_opt_info):
+ notfounderror = Lapack64_NotFoundError
+ lapack_order = ['openblas64_']
+ order_env_var_name = 'NPY_LAPACK64__ORDER'
-class blas_opt_info(system_info):
+ def _calc_info_openblas64_(self):
+ info = get_info('openblas64__lapack')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+class blas_opt_info(system_info):
notfounderror = BlasNotFoundError
- # Default order of BLAS checks
+ # List of all known BLAS libraries, in the default order
blas_order = ['mkl', 'blis', 'openblas', 'atlas', 'accelerate', 'blas']
+ order_env_var_name = 'NPY_BLAS_ORDER'
def _calc_info_mkl(self):
info = get_info('blas_mkl')
@@ -1796,7 +1829,7 @@ class blas_opt_info(system_info):
return True
def calc_info(self):
- user_order = os.environ.get('NPY_BLAS_ORDER', None)
+ user_order = os.environ.get(self.order_env_var_name, None)
if user_order is None:
blas_order = self.blas_order
else:
@@ -1824,6 +1857,19 @@ class blas_opt_info(system_info):
warnings.warn(BlasSrcNotFoundError.__doc__ or '', stacklevel=2)
+class blas64__opt_info(blas_opt_info):
+ notfounderror = Blas64_NotFoundError
+ blas_order = ['openblas64_']
+ order_env_var_name = 'NPY_BLAS64__ORDER'
+
+ def _calc_info_openblas64_(self):
+ info = get_info('openblas64_')
+ if info:
+ self.set_info(**info)
+ return True
+ return False
+
+
class blas_info(system_info):
section = 'blas'
dir_env_var = 'BLAS'
@@ -1923,12 +1969,10 @@ class openblas_info(blas_info):
section = 'openblas'
dir_env_var = 'OPENBLAS'
_lib_names = ['openblas']
+ _require_symbols = []
notfounderror = BlasNotFoundError
- def check_embedded_lapack(self, info):
- return True
-
- def calc_info(self):
+ def _calc_info(self):
c = customized_ccompiler()
lib_dirs = self.get_lib_dirs()
@@ -1946,23 +1990,29 @@ class openblas_info(blas_info):
# Try gfortran-compatible library files
info = self.check_msvc_gfortran_libs(lib_dirs, openblas_libs)
# Skip lapack check, we'd need build_ext to do it
- assume_lapack = True
+ skip_symbol_check = True
elif info:
- assume_lapack = False
+ skip_symbol_check = False
info['language'] = 'c'
if info is None:
- return
+ return None
# Add extra info for OpenBLAS
extra_info = self.calc_extra_info()
dict_append(info, **extra_info)
- if not (assume_lapack or self.check_embedded_lapack(info)):
- return
+ if not (skip_symbol_check or self.check_symbols(info)):
+ return None
info['define_macros'] = [('HAVE_CBLAS', None)]
- self.set_info(**info)
+
+ return info
+
+ def calc_info(self):
+ info = self._calc_info()
+ if info is not None:
+ self.set_info(**info)
def check_msvc_gfortran_libs(self, library_dirs, libraries):
# First, find the full path to each library directory
@@ -1995,24 +2045,24 @@ class openblas_info(blas_info):
return info
-class openblas_lapack_info(openblas_info):
- section = 'openblas'
- dir_env_var = 'OPENBLAS'
- _lib_names = ['openblas']
- notfounderror = BlasNotFoundError
-
- def check_embedded_lapack(self, info):
+ def check_symbols(self, info):
res = False
c = customized_ccompiler()
tmpdir = tempfile.mkdtemp()
+
+ prototypes = "\n".join("void %s();" % symbol_name
+ for symbol_name in self._require_symbols)
+ calls = "\n".join("%s();" % symbol_name
+ for symbol_name in self._require_symbols)
s = textwrap.dedent("""\
- void zungqr_();
+ %(prototypes)s
int main(int argc, const char *argv[])
{
- zungqr_();
+ %(calls)s
return 0;
- }""")
+ }""") % dict(prototypes=prototypes, calls=calls)
+ print(s)
src = os.path.join(tmpdir, 'source.c')
out = os.path.join(tmpdir, 'a.out')
# Add the additional "extra" arguments
@@ -2037,9 +2087,38 @@ class openblas_lapack_info(openblas_info):
shutil.rmtree(tmpdir)
return res
+class openblas_lapack_info(openblas_info):
+ section = 'openblas'
+ dir_env_var = 'OPENBLAS'
+ _lib_names = ['openblas']
+ _require_symbols = ['zungqr_']
+ notfounderror = BlasNotFoundError
+
class openblas_clapack_info(openblas_lapack_info):
_lib_names = ['openblas', 'lapack']
+class openblas64__info(openblas_info):
+ section = 'openblas64_'
+ dir_env_var = 'OPENBLAS64_'
+ _lib_names = ['openblas64_']
+ _require_symbols = ['dgemm_64_', 'cblas_dgemm64_']
+ notfounderror = Blas64_NotFoundError
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info is not None:
+ info['define_macros'] = [('HAVE_CBLAS64_', None)]
+ return info
+
+class openblas64__lapack_info(openblas64__info):
+ _require_symbols = ['dgemm_64_', 'cblas_dgemm64_', 'zungqr_64_', 'LAPACKE_zungqr64_']
+
+ def _calc_info(self):
+ info = super()._calc_info()
+ if info:
+ info['define_macros'] += [('HAVE_LAPACKE64_', None)]
+ return info
+
class blis_info(blas_info):
section = 'blis'
dir_env_var = 'BLIS'
diff --git a/numpy/doc/basics.py b/numpy/doc/basics.py
index 1871512bf..c05f347a1 100644
--- a/numpy/doc/basics.py
+++ b/numpy/doc/basics.py
@@ -18,7 +18,7 @@ The primitive types supported are tied closely to those in C:
- C type
- Description
- * - `np.bool`
+ * - `np.bool_`
- ``bool``
- Boolean (True or False) stored as a byte
@@ -283,7 +283,7 @@ NumPy provides `numpy.iinfo` and `numpy.finfo` to verify the
minimum or maximum values of NumPy integer and floating point values
respectively ::
- >>> np.iinfo(np.int) # Bounds of the default integer on this system.
+ >>> np.iinfo(int) # Bounds of the default integer on this system.
iinfo(min=-9223372036854775808, max=9223372036854775807, dtype=int64)
>>> np.iinfo(np.int32) # Bounds of a 32-bit integer
iinfo(min=-2147483648, max=2147483647, dtype=int32)
diff --git a/numpy/fft/__init__.py b/numpy/fft/__init__.py
index fe95d8b17..37b3f0da6 100644
--- a/numpy/fft/__init__.py
+++ b/numpy/fft/__init__.py
@@ -118,8 +118,16 @@ The inverse DFT is defined as
It differs from the forward transform by the sign of the exponential
argument and the default normalization by :math:`1/n`.
+Type Promotion
+--------------
+
+`numpy.fft` promotes ``float32`` and ``complex64`` arrays to ``float64`` and
+``complex128`` arrays respectively. For an FFT implementation that does not
+promote input arrays, see `scipy.fftpack`.
+
Normalization
-------------
+
The default normalization has the direct transforms unscaled and the inverse
transforms are scaled by :math:`1/n`. It is possible to obtain unitary
transforms by setting the keyword argument ``norm`` to ``"ortho"`` (default is
diff --git a/numpy/lib/format.py b/numpy/lib/format.py
index 1ecd72815..20e2e9c72 100644
--- a/numpy/lib/format.py
+++ b/numpy/lib/format.py
@@ -242,6 +242,16 @@ def read_magic(fp):
major, minor = magic_str[-2:]
return major, minor
+def _has_metadata(dt):
+ if dt.metadata is not None:
+ return True
+ elif dt.names is not None:
+ return any(_has_metadata(dt[k]) for k in dt.names)
+ elif dt.subdtype is not None:
+ return _has_metadata(dt.base)
+ else:
+ return False
+
def dtype_to_descr(dtype):
"""
Get a serializable descriptor from the dtype.
@@ -265,6 +275,10 @@ def dtype_to_descr(dtype):
replicate the input dtype.
"""
+ if _has_metadata(dtype):
+ warnings.warn("metadata on a dtype may be saved or ignored, but will "
+ "raise if saved when read. Use another form of storage.",
+ UserWarning, stacklevel=2)
if dtype.names is not None:
# This is a record array. The .descr is fine. XXX: parts of the
# record array with an empty name, like padding bytes, still get
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py
index 3ad630a7d..499120630 100644
--- a/numpy/lib/function_base.py
+++ b/numpy/lib/function_base.py
@@ -2305,7 +2305,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None,
>>> m = np.arange(10, dtype=np.float64)
>>> f = np.arange(10) * 2
>>> a = np.arange(10) ** 2.
- >>> ddof = 9 # N - 1
+ >>> ddof = 1
>>> w = f * a
>>> v1 = np.sum(w)
>>> v2 = np.sum(w * a)
diff --git a/numpy/lib/nanfunctions.py b/numpy/lib/nanfunctions.py
index 18ccab3b8..457cca146 100644
--- a/numpy/lib/nanfunctions.py
+++ b/numpy/lib/nanfunctions.py
@@ -99,7 +99,7 @@ def _replace_nan(a, val):
if a.dtype == np.object_:
# object arrays do not support `isnan` (gh-9009), so make a guess
- mask = a != a
+ mask = np.not_equal(a, a, dtype=bool)
elif issubclass(a.dtype.type, np.inexact):
mask = np.isnan(a)
else:
diff --git a/numpy/lib/npyio.py b/numpy/lib/npyio.py
index 7e1d4db4f..3e54ff10c 100644
--- a/numpy/lib/npyio.py
+++ b/numpy/lib/npyio.py
@@ -833,7 +833,7 @@ def loadtxt(fname, dtype=float, comments='#', delimiter=None,
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
- generators should return byte strings for Python 3k.
+ generators should return byte strings.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
@@ -1529,9 +1529,9 @@ def fromregex(file, regexp, dtype, encoding=None):
dtype = np.dtype(dtype)
content = file.read()
- if isinstance(content, bytes) and isinstance(regexp, np.unicode):
+ if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
regexp = asbytes(regexp)
- elif isinstance(content, np.unicode) and isinstance(regexp, bytes):
+ elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
@@ -1578,7 +1578,7 @@ def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
- that generators must return byte strings in Python 3k. The strings
+ that generators must return byte strings. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
diff --git a/numpy/lib/shape_base.py b/numpy/lib/shape_base.py
index 92d52109e..dbb61c225 100644
--- a/numpy/lib/shape_base.py
+++ b/numpy/lib/shape_base.py
@@ -1,7 +1,6 @@
from __future__ import division, absolute_import, print_function
import functools
-import warnings
import numpy.core.numeric as _nx
from numpy.core.numeric import (
@@ -11,6 +10,7 @@ from numpy.core.fromnumeric import reshape, transpose
from numpy.core.multiarray import normalize_axis_index
from numpy.core import overrides
from numpy.core import vstack, atleast_3d
+from numpy.core.numeric import normalize_axis_tuple
from numpy.core.shape_base import _arrays_for_stack_dispatcher
from numpy.lib.index_tricks import ndindex
from numpy.matrixlib.defmatrix import matrix # this raises all the right alarm bells
@@ -29,7 +29,7 @@ array_function_dispatch = functools.partial(
def _make_along_axis_idx(arr_shape, indices, axis):
- # compute dimensions to iterate over
+ # compute dimensions to iterate over
if not _nx.issubdtype(indices.dtype, _nx.integer):
raise IndexError('`indices` must be an integer array')
if len(arr_shape) != indices.ndim:
@@ -517,22 +517,26 @@ def expand_dims(a, axis):
Insert a new axis that will appear at the `axis` position in the expanded
array shape.
- .. note:: Previous to NumPy 1.13.0, neither ``axis < -a.ndim - 1`` nor
- ``axis > a.ndim`` raised errors or put the new axis where documented.
- Those axis values are now deprecated and will raise an AxisError in the
- future.
-
Parameters
----------
a : array_like
Input array.
- axis : int
- Position in the expanded axes where the new axis is placed.
+ axis : int or tuple of ints
+ Position in the expanded axes where the new axis (or axes) is placed.
+
+ .. deprecated:: 1.13.0
+ Passing an axis where ``axis > a.ndim`` will be treated as
+ ``axis == a.ndim``, and passing ``axis < -a.ndim - 1`` will
+ be treated as ``axis == 0``. This behavior is deprecated.
+
+ .. versionchanged:: 1.18.0
+ A tuple of axes is now supported. Out of range axes as
+ described above are now forbidden and raise an `AxisError`.
Returns
-------
- res : ndarray
- View of `a` with the number of dimensions increased by one.
+ result : ndarray
+ View of `a` with the number of dimensions increased.
See Also
--------
@@ -542,11 +546,11 @@ def expand_dims(a, axis):
Examples
--------
- >>> x = np.array([1,2])
+ >>> x = np.array([1, 2])
>>> x.shape
(2,)
- The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
+ The following is equivalent to ``x[np.newaxis, :]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
@@ -554,13 +558,26 @@ def expand_dims(a, axis):
>>> y.shape
(1, 2)
- >>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,np.newaxis]
+ The following is equivalent to ``x[:, np.newaxis]``:
+
+ >>> y = np.expand_dims(x, axis=1)
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
+ ``axis`` may also be a tuple:
+
+ >>> y = np.expand_dims(x, axis=(0, 1))
+ >>> y
+ array([[[1, 2]]])
+
+ >>> y = np.expand_dims(x, axis=(2, 0))
+ >>> y
+ array([[[1],
+ [2]]])
+
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
@@ -573,18 +590,16 @@ def expand_dims(a, axis):
else:
a = asanyarray(a)
- shape = a.shape
- if axis > a.ndim or axis < -a.ndim - 1:
- # 2017-05-17, 1.13.0
- warnings.warn("Both axis > a.ndim and axis < -a.ndim - 1 are "
- "deprecated and will raise an AxisError in the future.",
- DeprecationWarning, stacklevel=3)
- # When the deprecation period expires, delete this if block,
- if axis < 0:
- axis = axis + a.ndim + 1
- # and uncomment the following line.
- # axis = normalize_axis_index(axis, a.ndim + 1)
- return a.reshape(shape[:axis] + (1,) + shape[axis:])
+ if type(axis) not in (tuple, list):
+ axis = (axis,)
+
+ out_ndim = len(axis) + a.ndim
+ axis = normalize_axis_tuple(axis, out_ndim)
+
+ shape_it = iter(a.shape)
+ shape = [1 if ax in axis else next(shape_it) for ax in range(out_ndim)]
+
+ return a.reshape(shape)
row_stack = vstack
diff --git a/numpy/lib/tests/test_format.py b/numpy/lib/tests/test_format.py
index 062c21725..0592e0b12 100644
--- a/numpy/lib/tests/test_format.py
+++ b/numpy/lib/tests/test_format.py
@@ -963,3 +963,33 @@ def test_unicode_field_names():
with open(fname, 'wb') as f:
with assert_warns(UserWarning):
format.write_array(f, arr, version=None)
+
+
+@pytest.mark.parametrize('dt, fail', [
+ (np.dtype({'names': ['a', 'b'], 'formats': [float, np.dtype('S3',
+ metadata={'some': 'stuff'})]}), True),
+ (np.dtype(int, metadata={'some': 'stuff'}), False),
+ (np.dtype([('subarray', (int, (2,)))], metadata={'some': 'stuff'}), False),
+ # recursive: metadata on the field of a dtype
+ (np.dtype({'names': ['a', 'b'], 'formats': [
+ float, np.dtype({'names': ['c'], 'formats': [np.dtype(int, metadata={})]})
+ ]}), False)
+ ])
+def test_metadata_dtype(dt, fail):
+ # gh-14142
+ arr = np.ones(10, dtype=dt)
+ buf = BytesIO()
+ with assert_warns(UserWarning):
+ np.save(buf, arr)
+ buf.seek(0)
+ if fail:
+ with assert_raises(ValueError):
+ np.load(buf)
+ else:
+ arr2 = np.load(buf)
+ # BUG: assert_array_equal does not check metadata
+ from numpy.lib.format import _has_metadata
+ assert_array_equal(arr, arr2)
+ assert _has_metadata(arr.dtype)
+ assert not _has_metadata(arr2.dtype)
+
diff --git a/numpy/lib/tests/test_function_base.py b/numpy/lib/tests/test_function_base.py
index 1eae8ccfb..9075ff538 100644
--- a/numpy/lib/tests/test_function_base.py
+++ b/numpy/lib/tests/test_function_base.py
@@ -2523,7 +2523,7 @@ class TestPercentile(object):
assert_equal(np.percentile(x, 0, interpolation='nearest'), np.nan)
def test_fraction(self):
- x = [Fraction(i, 2) for i in np.arange(8)]
+ x = [Fraction(i, 2) for i in range(8)]
p = np.percentile(x, Fraction(0))
assert_equal(p, Fraction(0))
@@ -2943,7 +2943,7 @@ class TestQuantile(object):
def test_fraction(self):
# fractional input, integral quantile
- x = [Fraction(i, 2) for i in np.arange(8)]
+ x = [Fraction(i, 2) for i in range(8)]
q = np.quantile(x, 0)
assert_equal(q, 0)
diff --git a/numpy/lib/tests/test_io.py b/numpy/lib/tests/test_io.py
index 4a5eec92c..4188265e8 100644
--- a/numpy/lib/tests/test_io.py
+++ b/numpy/lib/tests/test_io.py
@@ -24,6 +24,7 @@ from numpy.testing import (
assert_allclose, assert_array_equal, temppath, tempdir, IS_PYPY,
HAS_REFCOUNT, suppress_warnings, assert_no_gc_cycles, assert_no_warnings
)
+from numpy.testing._private.utils import requires_memory
class TextIO(BytesIO):
@@ -518,7 +519,7 @@ class TestSaveTxt(object):
def test_unicode(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
with tempdir() as tmpdir:
# set encoding as on windows it may not be unicode even on py3
np.savetxt(os.path.join(tmpdir, 'test.csv'), a, fmt=['%s'],
@@ -526,7 +527,7 @@ class TestSaveTxt(object):
def test_unicode_roundtrip(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
# our gz wrapper support encoding
suffixes = ['', '.gz']
# stdlib 2 versions do not support encoding
@@ -540,12 +541,12 @@ class TestSaveTxt(object):
np.savetxt(os.path.join(tmpdir, 'test.csv' + suffix), a,
fmt=['%s'], encoding='UTF-16-LE')
b = np.loadtxt(os.path.join(tmpdir, 'test.csv' + suffix),
- encoding='UTF-16-LE', dtype=np.unicode)
+ encoding='UTF-16-LE', dtype=np.unicode_)
assert_array_equal(a, b)
def test_unicode_bytestream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
s = BytesIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -553,7 +554,7 @@ class TestSaveTxt(object):
def test_unicode_stringstream(self):
utf8 = b'\xcf\x96'.decode('UTF-8')
- a = np.array([utf8], dtype=np.unicode)
+ a = np.array([utf8], dtype=np.unicode_)
s = StringIO()
np.savetxt(s, a, fmt=['%s'], encoding='UTF-8')
s.seek(0)
@@ -575,13 +576,9 @@ class TestSaveTxt(object):
@pytest.mark.skipif(sys.platform=='win32',
reason="large files cause problems")
@pytest.mark.slow
+ @requires_memory(7e9)
def test_large_zip(self):
# The test takes at least 6GB of memory, writes a file larger than 4GB
- try:
- a = 'a' * 6 * 1024 * 1024 * 1024
- del a
- except (MemoryError, OverflowError):
- pytest.skip("Cannot allocate enough memory for test")
test_data = np.asarray([np.random.rand(np.random.randint(50,100),4)
for i in range(800000)], dtype=object)
with tempdir() as tmpdir:
@@ -632,12 +629,12 @@ class LoadTxtBase(object):
with temppath() as path:
with open(path, "wb") as f:
f.write(nonascii.encode("UTF-16"))
- x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode)
+ x = self.loadfunc(path, encoding="UTF-16", dtype=np.unicode_)
assert_array_equal(x, nonascii)
def test_binary_decode(self):
utf16 = b'\xff\xfeh\x04 \x00i\x04 \x00j\x04'
- v = self.loadfunc(BytesIO(utf16), dtype=np.unicode, encoding='UTF-16')
+ v = self.loadfunc(BytesIO(utf16), dtype=np.unicode_, encoding='UTF-16')
assert_array_equal(v, np.array(utf16.decode('UTF-16').split()))
def test_converters_decode(self):
@@ -645,7 +642,7 @@ class LoadTxtBase(object):
c = TextIO()
c.write(b'\xcf\x96')
c.seek(0)
- x = self.loadfunc(c, dtype=np.unicode,
+ x = self.loadfunc(c, dtype=np.unicode_,
converters={0: lambda x: x.decode('UTF-8')})
a = np.array([b'\xcf\x96'.decode('UTF-8')])
assert_array_equal(x, a)
@@ -656,7 +653,7 @@ class LoadTxtBase(object):
with temppath() as path:
with io.open(path, 'wt', encoding='UTF-8') as f:
f.write(utf8)
- x = self.loadfunc(path, dtype=np.unicode,
+ x = self.loadfunc(path, dtype=np.unicode_,
converters={0: lambda x: x + 't'},
encoding='UTF-8')
a = np.array([utf8 + 't'])
@@ -1104,7 +1101,7 @@ class TestLoadTxt(LoadTxtBase):
with open(path, "wb") as f:
f.write(butf8)
with open(path, "rb") as f:
- x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode)
+ x = np.loadtxt(f, encoding="UTF-8", dtype=np.unicode_)
assert_array_equal(x, sutf8)
# test broken latin1 conversion people now rely on
with open(path, "rb") as f:
@@ -1587,7 +1584,7 @@ M 33 21.99
with open(path, 'wb') as f:
f.write(b'skip,skip,2001-01-01' + utf8 + b',1.0,skip')
test = np.genfromtxt(path, delimiter=",", names=None, dtype=float,
- usecols=(2, 3), converters={2: np.unicode},
+ usecols=(2, 3), converters={2: np.compat.unicode},
encoding='UTF-8')
control = np.array([('2001-01-01' + utf8.decode('UTF-8'), 1.)],
dtype=[('', '|U11'), ('', float)])
@@ -2126,7 +2123,7 @@ M 33 21.99
ctl = np.array([
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"],
["test1", "testNonethe" + utf8.decode("UTF-8"), "test3"]],
- dtype=np.unicode)
+ dtype=np.unicode_)
assert_array_equal(test, ctl)
# test a mixed dtype
@@ -2169,7 +2166,7 @@ M 33 21.99
["norm1", "norm2", "norm3"],
["norm1", latin1, "norm3"],
["test1", "testNonethe" + utf8, "test3"]],
- dtype=np.unicode)
+ dtype=np.unicode_)
assert_array_equal(test, ctl)
def test_recfromtxt(self):
diff --git a/numpy/lib/tests/test_shape_base.py b/numpy/lib/tests/test_shape_base.py
index 01ea028bb..be1604a75 100644
--- a/numpy/lib/tests/test_shape_base.py
+++ b/numpy/lib/tests/test_shape_base.py
@@ -289,14 +289,26 @@ class TestExpandDims(object):
assert_(b.shape[axis] == 1)
assert_(np.squeeze(b).shape == s)
- def test_deprecations(self):
- # 2017-05-17, 1.13.0
+ def test_axis_tuple(self):
+ a = np.empty((3, 3, 3))
+ assert np.expand_dims(a, axis=(0, 1, 2)).shape == (1, 1, 1, 3, 3, 3)
+ assert np.expand_dims(a, axis=(0, -1, -2)).shape == (1, 3, 3, 3, 1, 1)
+ assert np.expand_dims(a, axis=(0, 3, 5)).shape == (1, 3, 3, 1, 3, 1)
+ assert np.expand_dims(a, axis=(0, -3, -5)).shape == (1, 1, 3, 1, 3, 3)
+
+ def test_axis_out_of_range(self):
s = (2, 3, 4, 5)
a = np.empty(s)
- with warnings.catch_warnings():
- warnings.simplefilter("always")
- assert_warns(DeprecationWarning, expand_dims, a, -6)
- assert_warns(DeprecationWarning, expand_dims, a, 5)
+ assert_raises(np.AxisError, expand_dims, a, -6)
+ assert_raises(np.AxisError, expand_dims, a, 5)
+
+ a = np.empty((3, 3, 3))
+ assert_raises(np.AxisError, expand_dims, a, (0, -6))
+ assert_raises(np.AxisError, expand_dims, a, (0, 5))
+
+ def test_repeated_axis(self):
+ a = np.empty((3, 3, 3))
+ assert_raises(ValueError, expand_dims, a, axis=(1, 1))
def test_subclasses(self):
a = np.arange(10).reshape((2, 5))
diff --git a/numpy/lib/tests/test_ufunclike.py b/numpy/lib/tests/test_ufunclike.py
index 0f06876a1..64280616f 100644
--- a/numpy/lib/tests/test_ufunclike.py
+++ b/numpy/lib/tests/test_ufunclike.py
@@ -21,7 +21,7 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
- a = a.astype(np.complex)
+ a = a.astype(np.complex_)
with assert_raises(TypeError):
ufl.isposinf(a)
@@ -36,7 +36,7 @@ class TestUfunclike(object):
assert_equal(res, tgt)
assert_equal(out, tgt)
- a = a.astype(np.complex)
+ a = a.astype(np.complex_)
with assert_raises(TypeError):
ufl.isneginf(a)
diff --git a/numpy/lib/type_check.py b/numpy/lib/type_check.py
index 586824743..8b9e4b8ab 100644
--- a/numpy/lib/type_check.py
+++ b/numpy/lib/type_check.py
@@ -68,16 +68,14 @@ def mintypecode(typechars, typeset='GDFgdf', default='d'):
'G'
"""
- typecodes = [(isinstance(t, str) and t) or asarray(t).dtype.char
- for t in typechars]
- intersection = [t for t in typecodes if t in typeset]
+ typecodes = ((isinstance(t, str) and t) or asarray(t).dtype.char
+ for t in typechars)
+ intersection = set(t for t in typecodes if t in typeset)
if not intersection:
return default
if 'F' in intersection and 'd' in intersection:
return 'D'
- l = [(_typecodes_by_elsize.index(t), t) for t in intersection]
- l.sort()
- return l[0][1]
+ return min((_typecodes_by_elsize.index(t), t) for t in intersection)[1]
def _asfarray_dispatcher(a, dtype=None):
diff --git a/numpy/linalg/lapack_lite/python_xerbla.c b/numpy/linalg/lapack_lite/python_xerbla.c
index dfc195556..c239a3620 100644
--- a/numpy/linalg/lapack_lite/python_xerbla.c
+++ b/numpy/linalg/lapack_lite/python_xerbla.c
@@ -1,4 +1,5 @@
#include "Python.h"
+#include "numpy/npy_common.h"
#undef c_abs
#include "f2c.h"
@@ -46,3 +47,11 @@ int xerbla_(char *srname, integer *info)
return 0;
}
+
+
+/* Same for LAPACK64_ */
+npy_int64 xerbla_64_(char *srname, npy_int64 *info)
+{
+ integer info_int = (integer)*info;
+ return xerbla_(srname, &info_int);
+}
diff --git a/numpy/linalg/lapack_litemodule.c b/numpy/linalg/lapack_litemodule.c
index 696a6d874..c80179fdf 100644
--- a/numpy/linalg/lapack_litemodule.c
+++ b/numpy/linalg/lapack_litemodule.c
@@ -7,45 +7,80 @@ More modifications by Jeff Whitaker
#include "Python.h"
#include "numpy/arrayobject.h"
+
+#ifndef NPY_UMATH_USE_BLAS64_
+
+/*
+ * Standard BLAS
+ */
#ifdef NO_APPEND_FORTRAN
# define FNAME(x) x
#else
# define FNAME(x) x##_
#endif
+#define FINT_PYFMT "i"
+typedef int fortran_int;
+
+#else
+
+/*
+ * BLAS64_
+ */
+
+#define FNAME(x) x##_64_
+
+typedef npy_int64 fortran_int;
+
+#if NPY_BITSOF_SHORT == 64
+#define FINT_PYFMT "h"
+#elif NPY_BITSOF_INT == 64
+#define FINT_PYFMT "i"
+#elif NPY_BITSOF_LONG == 64
+#define FINT_PYFMT "l"
+#elif NPY_BITSOF_LONGLONG == 64
+#define FINT_PYFMT "L"
+#else
+#error No compatible 64-bit integer size. \
+ Please contact NumPy maintainers and give detailed information about your \
+ compiler and platform, or set NPY_USE_BLAS64_=0
+#endif
+
+#endif
+
typedef struct { float r, i; } f2c_complex;
typedef struct { double r, i; } f2c_doublecomplex;
/* typedef long int (*L_fp)(); */
-extern int FNAME(dgelsd)(int *m, int *n, int *nrhs,
- double a[], int *lda, double b[], int *ldb,
- double s[], double *rcond, int *rank,
- double work[], int *lwork, int iwork[], int *info);
+extern fortran_int FNAME(dgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda, double b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ double work[], fortran_int *lwork, fortran_int iwork[], fortran_int *info);
-extern int FNAME(zgelsd)(int *m, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- double s[], double *rcond, int *rank,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[], int *info);
+extern fortran_int FNAME(zgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[], fortran_int *info);
-extern int FNAME(dgeqrf)(int *m, int *n, double a[], int *lda,
+extern fortran_int FNAME(dgeqrf)(fortran_int *m, fortran_int *n, double a[], fortran_int *lda,
double tau[], double work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(zgeqrf)(int *m, int *n, f2c_doublecomplex a[], int *lda,
+extern fortran_int FNAME(zgeqrf)(fortran_int *m, fortran_int *n, f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex tau[], f2c_doublecomplex work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(dorgqr)(int *m, int *n, int *k, double a[], int *lda,
+extern fortran_int FNAME(dorgqr)(fortran_int *m, fortran_int *n, fortran_int *k, double a[], fortran_int *lda,
double tau[], double work[],
- int *lwork, int *info);
+ fortran_int *lwork, fortran_int *info);
-extern int FNAME(zungqr)(int *m, int *n, int *k, f2c_doublecomplex a[],
- int *lda, f2c_doublecomplex tau[],
- f2c_doublecomplex work[], int *lwork, int *info);
+extern fortran_int FNAME(zungqr)(fortran_int *m, fortran_int *n, fortran_int *k, f2c_doublecomplex a[],
+ fortran_int *lda, f2c_doublecomplex tau[],
+ f2c_doublecomplex work[], fortran_int *lwork, fortran_int *info);
-extern int FNAME(xerbla)(char *srname, int *info);
+extern fortran_int FNAME(xerbla)(char *srname, fortran_int *info);
static PyObject *LapackError;
@@ -90,27 +125,31 @@ check_object(PyObject *ob, int t, char *obname,
#define FDATA(p) ((float *) PyArray_DATA((PyArrayObject *)p))
#define CDATA(p) ((f2c_complex *) PyArray_DATA((PyArrayObject *)p))
#define ZDATA(p) ((f2c_doublecomplex *) PyArray_DATA((PyArrayObject *)p))
-#define IDATA(p) ((int *) PyArray_DATA((PyArrayObject *)p))
+#define IDATA(p) ((fortran_int *) PyArray_DATA((PyArrayObject *)p))
static PyObject *
lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m;
- int n;
- int nrhs;
+ fortran_int lapack_lite_status;
+ fortran_int m;
+ fortran_int n;
+ fortran_int nrhs;
PyObject *a;
- int lda;
+ fortran_int lda;
PyObject *b;
- int ldb;
+ fortran_int ldb;
PyObject *s;
double rcond;
- int rank;
+ fortran_int rank;
PyObject *work;
PyObject *iwork;
- int lwork;
- int info;
- TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOi:dgelsd",
+ fortran_int lwork;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "O"
+ FINT_PYFMT "O" "d" FINT_PYFMT "O" FINT_PYFMT "O"
+ FINT_PYFMT ":dgelsd"),
&m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
&rank,&work,&lwork,&iwork,&info));
@@ -118,7 +157,11 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
TRY(check_object(b,NPY_DOUBLE,"b","NPY_DOUBLE","dgelsd"));
TRY(check_object(s,NPY_DOUBLE,"s","NPY_DOUBLE","dgelsd"));
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dgelsd"));
+#ifndef NPY_UMATH_USE_BLAS64_
TRY(check_object(iwork,NPY_INT,"iwork","NPY_INT","dgelsd"));
+#else
+ TRY(check_object(iwork,NPY_INT64,"iwork","NPY_INT64","dgelsd"));
+#endif
lapack_lite_status =
FNAME(dgelsd)(&m,&n,&nrhs,DDATA(a),&lda,DDATA(b),&ldb,
@@ -128,8 +171,11 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:d,s:i,s:i,s:i}","dgelsd_",
- lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:d,s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT "}"),
+ "dgelsd_",lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,
"lda",lda,"ldb",ldb,"rcond",rcond,"rank",rank,
"lwork",lwork,"info",info);
}
@@ -137,13 +183,16 @@ lapack_lite_dgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
+ fortran_int lda;
+ fortran_int info;
- TRY(PyArg_ParseTuple(args,"iiOiOOii:dgeqrf",&m,&n,&a,&lda,&tau,&work,&lwork,&info));
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "OO"
+ FINT_PYFMT FINT_PYFMT ":dgeqrf"),
+ &m,&n,&a,&lda,&tau,&work,&lwork,&info));
/* check objects and convert to right storage order */
TRY(check_object(a,NPY_DOUBLE,"a","NPY_DOUBLE","dgeqrf"));
@@ -157,7 +206,9 @@ lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","dgeqrf_",
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "dgeqrf_",
lapack_lite_status,"m",m,"n",n,"lda",lda,
"lwork",lwork,"info",info);
}
@@ -166,13 +217,17 @@ lapack_lite_dgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, k, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, k, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
-
- TRY(PyArg_ParseTuple(args,"iiiOiOOii:dorgqr", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
+ fortran_int lda;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O"
+ FINT_PYFMT "OO" FINT_PYFMT FINT_PYFMT
+ ":dorgqr"),
+ &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
TRY(check_object(a,NPY_DOUBLE,"a","NPY_DOUBLE","dorgqr"));
TRY(check_object(tau,NPY_DOUBLE,"tau","NPY_DOUBLE","dorgqr"));
TRY(check_object(work,NPY_DOUBLE,"work","NPY_DOUBLE","dorgqr"));
@@ -191,23 +246,26 @@ lapack_lite_dorgqr(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m;
- int n;
- int nrhs;
+ fortran_int lapack_lite_status;
+ fortran_int m;
+ fortran_int n;
+ fortran_int nrhs;
PyObject *a;
- int lda;
+ fortran_int lda;
PyObject *b;
- int ldb;
+ fortran_int ldb;
PyObject *s;
double rcond;
- int rank;
+ fortran_int rank;
PyObject *work;
- int lwork;
+ fortran_int lwork;
PyObject *rwork;
PyObject *iwork;
- int info;
- TRY(PyArg_ParseTuple(args,"iiiOiOiOdiOiOOi:zgelsd",
+ fortran_int info;
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT
+ "O" FINT_PYFMT "Od" FINT_PYFMT "O" FINT_PYFMT
+ "OO" FINT_PYFMT ":zgelsd"),
&m,&n,&nrhs,&a,&lda,&b,&ldb,&s,&rcond,
&rank,&work,&lwork,&rwork,&iwork,&info));
@@ -216,7 +274,11 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
TRY(check_object(s,NPY_DOUBLE,"s","NPY_DOUBLE","zgelsd"));
TRY(check_object(work,NPY_CDOUBLE,"work","NPY_CDOUBLE","zgelsd"));
TRY(check_object(rwork,NPY_DOUBLE,"rwork","NPY_DOUBLE","zgelsd"));
+#ifndef NPY_UMATH_USE_BLAS64_
TRY(check_object(iwork,NPY_INT,"iwork","NPY_INT","zgelsd"));
+#else
+ TRY(check_object(iwork,NPY_INT64,"iwork","NPY_INT64","zgelsd"));
+#endif
lapack_lite_status =
FNAME(zgelsd)(&m,&n,&nrhs,ZDATA(a),&lda,ZDATA(b),&ldb,DDATA(s),&rcond,
@@ -225,7 +287,11 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i,s:i}","zgelsd_",
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ "}"),
+ "zgelsd_",
lapack_lite_status,"m",m,"n",n,"nrhs",nrhs,"lda",lda,
"ldb",ldb,"rank",rank,"lwork",lwork,"info",info);
}
@@ -233,13 +299,16 @@ lapack_lite_zgelsd(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
+ fortran_int lda;
+ fortran_int info;
- TRY(PyArg_ParseTuple(args,"iiOiOOii:zgeqrf",&m,&n,&a,&lda,&tau,&work,&lwork,&info));
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT "O" FINT_PYFMT "OO"
+ FINT_PYFMT "" FINT_PYFMT ":zgeqrf"),
+ &m,&n,&a,&lda,&tau,&work,&lwork,&info));
/* check objects and convert to right storage order */
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zgeqrf"));
@@ -253,20 +322,27 @@ lapack_lite_zgeqrf(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i,s:i,s:i,s:i,s:i}","zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT
+ ",s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "zgeqrf_",lapack_lite_status,"m",m,"n",n,"lda",lda,"lwork",lwork,"info",info);
}
static PyObject *
lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int lapack_lite_status;
- int m, n, k, lwork;
+ fortran_int lapack_lite_status;
+ fortran_int m, n, k, lwork;
PyObject *a, *tau, *work;
- int lda;
- int info;
-
- TRY(PyArg_ParseTuple(args,"iiiOiOOii:zungqr", &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
+ fortran_int lda;
+ fortran_int info;
+
+ TRY(PyArg_ParseTuple(args,
+ (FINT_PYFMT FINT_PYFMT FINT_PYFMT "O"
+ FINT_PYFMT "OO" FINT_PYFMT "" FINT_PYFMT
+ ":zungqr"),
+ &m, &n, &k, &a, &lda, &tau, &work, &lwork, &info));
TRY(check_object(a,NPY_CDOUBLE,"a","NPY_CDOUBLE","zungqr"));
TRY(check_object(tau,NPY_CDOUBLE,"tau","NPY_CDOUBLE","zungqr"));
TRY(check_object(work,NPY_CDOUBLE,"work","NPY_CDOUBLE","zungqr"));
@@ -279,7 +355,8 @@ lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
return NULL;
}
- return Py_BuildValue("{s:i,s:i}","zungqr_",lapack_lite_status,
+ return Py_BuildValue(("{s:" FINT_PYFMT ",s:" FINT_PYFMT "}"),
+ "zungqr_",lapack_lite_status,
"info",info);
}
@@ -287,7 +364,7 @@ lapack_lite_zungqr(PyObject *NPY_UNUSED(self), PyObject *args)
static PyObject *
lapack_lite_xerbla(PyObject *NPY_UNUSED(self), PyObject *args)
{
- int info = -1;
+ fortran_int info = -1;
NPY_BEGIN_THREADS_DEF;
NPY_BEGIN_THREADS;
diff --git a/numpy/linalg/linalg.py b/numpy/linalg/linalg.py
index 816a200eb..f1b2c2228 100644
--- a/numpy/linalg/linalg.py
+++ b/numpy/linalg/linalg.py
@@ -194,37 +194,33 @@ def _fastCopyAndTranspose(type, *arrays):
else:
return cast_arrays
-def _assertRank2(*arrays):
+def _assert_2d(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
-def _assertRankAtLeast2(*arrays):
+def _assert_stacked_2d(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
-def _assertNdSquareness(*arrays):
+def _assert_stacked_square(*arrays):
for a in arrays:
m, n = a.shape[-2:]
if m != n:
raise LinAlgError('Last 2 dimensions of the array must be square')
-def _assertFinite(*arrays):
+def _assert_finite(*arrays):
for a in arrays:
- if not (isfinite(a).all()):
+ if not isfinite(a).all():
raise LinAlgError("Array must not contain infs or NaNs")
-def _isEmpty2d(arr):
+def _is_empty_2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
-def _assertNoEmpty2d(*arrays):
- for a in arrays:
- if _isEmpty2d(a):
- raise LinAlgError("Arrays cannot be empty")
def transpose(a):
"""
@@ -386,8 +382,8 @@ def solve(a, b):
"""
a, _ = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
@@ -542,8 +538,8 @@ def inv(a):
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
@@ -622,8 +618,8 @@ def matrix_power(a, n):
"""
a = asanyarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
try:
n = operator.index(n)
@@ -752,8 +748,8 @@ def cholesky(a):
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
@@ -895,7 +891,7 @@ def qr(a, mode='reduced'):
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
- _assertRank2(a)
+ _assert_2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
@@ -1047,9 +1043,9 @@ def eigvals(a):
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
@@ -1157,8 +1153,8 @@ def eigvalsh(a, UPLO='L'):
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
@@ -1294,9 +1290,9 @@ def eig(a):
"""
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
- _assertFinite(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
+ _assert_finite(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
@@ -1435,8 +1431,8 @@ def eigh(a, UPLO='L'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(
@@ -1608,7 +1604,7 @@ def svd(a, full_matrices=True, compute_uv=True, hermitian=False):
s = abs(s)
return s
- _assertRankAtLeast2(a)
+ _assert_stacked_2d(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
@@ -1729,7 +1725,8 @@ def cond(x, p=None):
"""
x = asarray(x) # in case we have a matrix
- _assertNoEmpty2d(x)
+ if _is_empty_2d(x):
+ raise LinAlgError("cond is not defined on empty arrays")
if p is None or p == 2 or p == -2:
s = svd(x, compute_uv=False)
with errstate(all='ignore'):
@@ -1740,8 +1737,8 @@ def cond(x, p=None):
else:
# Call inv(x) ignoring errors. The result array will
# contain nans in the entries where inversion failed.
- _assertRankAtLeast2(x)
- _assertNdSquareness(x)
+ _assert_stacked_2d(x)
+ _assert_stacked_square(x)
t, result_t = _commonType(x)
signature = 'D->D' if isComplexType(t) else 'd->d'
with errstate(all='ignore'):
@@ -1956,7 +1953,7 @@ def pinv(a, rcond=1e-15, hermitian=False):
"""
a, wrap = _makearray(a)
rcond = asarray(rcond)
- if _isEmpty2d(a):
+ if _is_empty_2d(a):
m, n = a.shape[-2:]
res = empty(a.shape[:-2] + (n, m), dtype=a.dtype)
return wrap(res)
@@ -2052,8 +2049,8 @@ def slogdet(a):
"""
a = asarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
@@ -2112,8 +2109,8 @@ def det(a):
"""
a = asarray(a)
- _assertRankAtLeast2(a)
- _assertNdSquareness(a)
+ _assert_stacked_2d(a)
+ _assert_stacked_square(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
@@ -2224,7 +2221,7 @@ def lstsq(a, b, rcond="warn"):
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
- _assertRank2(a, b)
+ _assert_2d(a, b)
m, n = a.shape[-2:]
m2, n_rhs = b.shape[-2:]
if m != m2:
@@ -2328,16 +2325,19 @@ def norm(x, ord=None, axis=None, keepdims=False):
Parameters
----------
x : array_like
- Input array. If `axis` is None, `x` must be 1-D or 2-D.
+ Input array. If `axis` is None, `x` must be 1-D or 2-D, unless `ord`
+ is None. If both `axis` and `ord` are None, the 2-norm of
+ ``x.ravel`` will be returned.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
- `inf` object.
- axis : {int, 2-tuple of ints, None}, optional
+ `inf` object. The default is None.
+ axis : {None, int, 2-tuple of ints}, optional.
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
- is 1-D) or a matrix norm (when `x` is 2-D) is returned.
+ is 1-D) or a matrix norm (when `x` is 2-D) is returned. The default
+ is None.
.. versionadded:: 1.8.0
@@ -2657,7 +2657,7 @@ def multi_dot(arrays):
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
- _assertRank2(*arrays)
+ _assert_2d(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
diff --git a/numpy/linalg/setup.py b/numpy/linalg/setup.py
index 66c07c9e1..f5cb04e89 100644
--- a/numpy/linalg/setup.py
+++ b/numpy/linalg/setup.py
@@ -26,7 +26,12 @@ def configuration(parent_package='', top_path=None):
]
all_sources = config.paths(lapack_lite_src)
- lapack_info = get_info('lapack_opt', 0) # and {}
+ if (os.environ.get('NPY_USE_BLAS64_', "0") != "0"):
+ lapack_info = get_info('lapack64__opt', 2)
+ lapack_info.setdefault('define_macros', [])
+ lapack_info['define_macros'] += [('NPY_UMATH_USE_BLAS64_', None)]
+ else:
+ lapack_info = get_info('lapack_opt', 0) # and {}
def get_lapack_lite_sources(ext, build_dir):
if not lapack_info:
diff --git a/numpy/linalg/tests/test_linalg.py b/numpy/linalg/tests/test_linalg.py
index 173e81e9c..e1590f1e7 100644
--- a/numpy/linalg/tests/test_linalg.py
+++ b/numpy/linalg/tests/test_linalg.py
@@ -20,8 +20,9 @@ from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
- assert_raises_regex,
+ assert_raises_regex, HAS_LAPACK64,
)
+from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
@@ -2002,3 +2003,16 @@ def test_unsupported_commontype():
arr = np.array([[1, -2], [2, 5]], dtype='float16')
with assert_raises_regex(TypeError, "unsupported in linalg"):
linalg.cholesky(arr)
+
+
+@pytest.mark.slow
+@pytest.mark.xfail(not HAS_LAPACK64, run=False,
+ reason="Numpy not compiled with 64-bit BLAS/LAPACK")
+@requires_memory(16e9)
+def test_blas64_dot():
+ n = 2**32
+ a = np.zeros([1, n], dtype=np.float32)
+ b = np.ones([1, 1], dtype=np.float32)
+ a[0,-1] = 1
+ c = np.dot(b, a)
+ assert_equal(c[0,-1], 1)
diff --git a/numpy/linalg/umath_linalg.c.src b/numpy/linalg/umath_linalg.c.src
index ee103c327..b111f75d5 100644
--- a/numpy/linalg/umath_linalg.c.src
+++ b/numpy/linalg/umath_linalg.c.src
@@ -62,301 +62,318 @@ dbg_stack_trace()
*****************************************************************************
*/
+#ifndef NPY_UMATH_USE_BLAS64_
+
#ifdef NO_APPEND_FORTRAN
# define FNAME(x) x
#else
# define FNAME(x) x##_
#endif
+typedef int fortran_int;
+
+#else
+
+#define FNAME(x) x##_64_
+
+typedef npy_int64 fortran_int;
+
+#endif
+
typedef struct { float r, i; } f2c_complex;
typedef struct { double r, i; } f2c_doublecomplex;
/* typedef long int (*L_fp)(); */
-extern int
-FNAME(sgeev)(char *jobvl, char *jobvr, int *n,
- float a[], int *lda, float wr[], float wi[],
- float vl[], int *ldvl, float vr[], int *ldvr,
- float work[], int lwork[],
- int *info);
-extern int
-FNAME(dgeev)(char *jobvl, char *jobvr, int *n,
- double a[], int *lda, double wr[], double wi[],
- double vl[], int *ldvl, double vr[], int *ldvr,
- double work[], int lwork[],
- int *info);
-extern int
-FNAME(cgeev)(char *jobvl, char *jobvr, int *n,
- f2c_doublecomplex a[], int *lda,
+typedef float fortran_real;
+typedef double fortran_doublereal;
+typedef f2c_complex fortran_complex;
+typedef f2c_doublecomplex fortran_doublecomplex;
+
+extern fortran_int
+FNAME(sgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ float a[], fortran_int *lda, float wr[], float wi[],
+ float vl[], fortran_int *ldvl, float vr[], fortran_int *ldvr,
+ float work[], fortran_int lwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ double a[], fortran_int *lda, double wr[], double wi[],
+ double vl[], fortran_int *ldvl, double vr[], fortran_int *ldvr,
+ double work[], fortran_int lwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex w[],
- f2c_doublecomplex vl[], int *ldvl,
- f2c_doublecomplex vr[], int *ldvr,
- f2c_doublecomplex work[], int *lwork,
+ f2c_doublecomplex vl[], fortran_int *ldvl,
+ f2c_doublecomplex vr[], fortran_int *ldvr,
+ f2c_doublecomplex work[], fortran_int *lwork,
double rwork[],
- int *info);
-extern int
-FNAME(zgeev)(char *jobvl, char *jobvr, int *n,
- f2c_doublecomplex a[], int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zgeev)(char *jobvl, char *jobvr, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
f2c_doublecomplex w[],
- f2c_doublecomplex vl[], int *ldvl,
- f2c_doublecomplex vr[], int *ldvr,
- f2c_doublecomplex work[], int *lwork,
+ f2c_doublecomplex vl[], fortran_int *ldvl,
+ f2c_doublecomplex vr[], fortran_int *ldvr,
+ f2c_doublecomplex work[], fortran_int *lwork,
double rwork[],
- int *info);
-
-extern int
-FNAME(ssyevd)(char *jobz, char *uplo, int *n,
- float a[], int *lda, float w[], float work[],
- int *lwork, int iwork[], int *liwork,
- int *info);
-extern int
-FNAME(dsyevd)(char *jobz, char *uplo, int *n,
- double a[], int *lda, double w[], double work[],
- int *lwork, int iwork[], int *liwork,
- int *info);
-extern int
-FNAME(cheevd)(char *jobz, char *uplo, int *n,
- f2c_complex a[], int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(ssyevd)(char *jobz, char *uplo, fortran_int *n,
+ float a[], fortran_int *lda, float w[], float work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(dsyevd)(char *jobz, char *uplo, fortran_int *n,
+ double a[], fortran_int *lda, double w[], double work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(cheevd)(char *jobz, char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
float w[], f2c_complex work[],
- int *lwork, float rwork[], int *lrwork, int iwork[],
- int *liwork,
- int *info);
-extern int
-FNAME(zheevd)(char *jobz, char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
+ fortran_int *lwork, float rwork[], fortran_int *lrwork, fortran_int iwork[],
+ fortran_int *liwork,
+ fortran_int *info);
+extern fortran_int
+FNAME(zheevd)(char *jobz, char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
double w[], f2c_doublecomplex work[],
- int *lwork, double rwork[], int *lrwork, int iwork[],
- int *liwork,
- int *info);
-
-extern int
-FNAME(sgelsd)(int *m, int *n, int *nrhs,
- float a[], int *lda, float b[], int *ldb,
- float s[], float *rcond, int *rank,
- float work[], int *lwork, int iwork[],
- int *info);
-extern int
-FNAME(dgelsd)(int *m, int *n, int *nrhs,
- double a[], int *lda, double b[], int *ldb,
- double s[], double *rcond, int *rank,
- double work[], int *lwork, int iwork[],
- int *info);
-extern int
-FNAME(cgelsd)(int *m, int *n, int *nrhs,
- f2c_complex a[], int *lda,
- f2c_complex b[], int *ldb,
- float s[], float *rcond, int *rank,
- f2c_complex work[], int *lwork,
- float rwork[], int iwork[],
- int *info);
-extern int
-FNAME(zgelsd)(int *m, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- double s[], double *rcond, int *rank,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[],
- int *info);
-
-extern int
-FNAME(sgesv)(int *n, int *nrhs,
- float a[], int *lda,
- int ipiv[],
- float b[], int *ldb,
- int *info);
-extern int
-FNAME(dgesv)(int *n, int *nrhs,
- double a[], int *lda,
- int ipiv[],
- double b[], int *ldb,
- int *info);
-extern int
-FNAME(cgesv)(int *n, int *nrhs,
- f2c_complex a[], int *lda,
- int ipiv[],
- f2c_complex b[], int *ldb,
- int *info);
-extern int
-FNAME(zgesv)(int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- int ipiv[],
- f2c_doublecomplex b[], int *ldb,
- int *info);
-
-extern int
-FNAME(sgetrf)(int *m, int *n,
- float a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(dgetrf)(int *m, int *n,
- double a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(cgetrf)(int *m, int *n,
- f2c_complex a[], int *lda,
- int ipiv[],
- int *info);
-extern int
-FNAME(zgetrf)(int *m, int *n,
- f2c_doublecomplex a[], int *lda,
- int ipiv[],
- int *info);
-
-extern int
-FNAME(spotrf)(char *uplo, int *n,
- float a[], int *lda,
- int *info);
-extern int
-FNAME(dpotrf)(char *uplo, int *n,
- double a[], int *lda,
- int *info);
-extern int
-FNAME(cpotrf)(char *uplo, int *n,
- f2c_complex a[], int *lda,
- int *info);
-extern int
-FNAME(zpotrf)(char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
- int *info);
-
-extern int
-FNAME(sgesdd)(char *jobz, int *m, int *n,
- float a[], int *lda, float s[], float u[],
- int *ldu, float vt[], int *ldvt, float work[],
- int *lwork, int iwork[], int *info);
-extern int
-FNAME(dgesdd)(char *jobz, int *m, int *n,
- double a[], int *lda, double s[], double u[],
- int *ldu, double vt[], int *ldvt, double work[],
- int *lwork, int iwork[], int *info);
-extern int
-FNAME(cgesdd)(char *jobz, int *m, int *n,
- f2c_complex a[], int *lda,
- float s[], f2c_complex u[], int *ldu,
- f2c_complex vt[], int *ldvt,
- f2c_complex work[], int *lwork,
- float rwork[], int iwork[], int *info);
-extern int
-FNAME(zgesdd)(char *jobz, int *m, int *n,
- f2c_doublecomplex a[], int *lda,
- double s[], f2c_doublecomplex u[], int *ldu,
- f2c_doublecomplex vt[], int *ldvt,
- f2c_doublecomplex work[], int *lwork,
- double rwork[], int iwork[], int *info);
-
-extern int
-FNAME(spotrs)(char *uplo, int *n, int *nrhs,
- float a[], int *lda,
- float b[], int *ldb,
- int *info);
-extern int
-FNAME(dpotrs)(char *uplo, int *n, int *nrhs,
- double a[], int *lda,
- double b[], int *ldb,
- int *info);
-extern int
-FNAME(cpotrs)(char *uplo, int *n, int *nrhs,
- f2c_complex a[], int *lda,
- f2c_complex b[], int *ldb,
- int *info);
-extern int
-FNAME(zpotrs)(char *uplo, int *n, int *nrhs,
- f2c_doublecomplex a[], int *lda,
- f2c_doublecomplex b[], int *ldb,
- int *info);
-
-extern int
-FNAME(spotri)(char *uplo, int *n,
- float a[], int *lda,
- int *info);
-extern int
-FNAME(dpotri)(char *uplo, int *n,
- double a[], int *lda,
- int *info);
-extern int
-FNAME(cpotri)(char *uplo, int *n,
- f2c_complex a[], int *lda,
- int *info);
-extern int
-FNAME(zpotri)(char *uplo, int *n,
- f2c_doublecomplex a[], int *lda,
- int *info);
-
-extern int
-FNAME(scopy)(int *n,
- float *sx, int *incx,
- float *sy, int *incy);
-extern int
-FNAME(dcopy)(int *n,
- double *sx, int *incx,
- double *sy, int *incy);
-extern int
-FNAME(ccopy)(int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
-extern int
-FNAME(zcopy)(int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+ fortran_int *lwork, double rwork[], fortran_int *lrwork, fortran_int iwork[],
+ fortran_int *liwork,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda, float b[], fortran_int *ldb,
+ float s[], float *rcond, fortran_int *rank,
+ float work[], fortran_int *lwork, fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda, double b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ double work[], fortran_int *lwork, fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ f2c_complex b[], fortran_int *ldb,
+ float s[], float *rcond, fortran_int *rank,
+ f2c_complex work[], fortran_int *lwork,
+ float rwork[], fortran_int iwork[],
+ fortran_int *info);
+extern fortran_int
+FNAME(zgelsd)(fortran_int *m, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ double s[], double *rcond, fortran_int *rank,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[],
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgesv)(fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda,
+ fortran_int ipiv[],
+ float b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(dgesv)(fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda,
+ fortran_int ipiv[],
+ double b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(cgesv)(fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ f2c_complex b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(zgesv)(fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ f2c_doublecomplex b[], fortran_int *ldb,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgetrf)(fortran_int *m, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(dgetrf)(fortran_int *m, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(cgetrf)(fortran_int *m, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+extern fortran_int
+FNAME(zgetrf)(fortran_int *m, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int ipiv[],
+ fortran_int *info);
+
+extern fortran_int
+FNAME(spotrf)(char *uplo, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotrf)(char *uplo, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotrf)(char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotrf)(char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(sgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ float a[], fortran_int *lda, float s[], float u[],
+ fortran_int *ldu, float vt[], fortran_int *ldvt, float work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(dgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ double a[], fortran_int *lda, double s[], double u[],
+ fortran_int *ldu, double vt[], fortran_int *ldvt, double work[],
+ fortran_int *lwork, fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(cgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ float s[], f2c_complex u[], fortran_int *ldu,
+ f2c_complex vt[], fortran_int *ldvt,
+ f2c_complex work[], fortran_int *lwork,
+ float rwork[], fortran_int iwork[], fortran_int *info);
+extern fortran_int
+FNAME(zgesdd)(char *jobz, fortran_int *m, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ double s[], f2c_doublecomplex u[], fortran_int *ldu,
+ f2c_doublecomplex vt[], fortran_int *ldvt,
+ f2c_doublecomplex work[], fortran_int *lwork,
+ double rwork[], fortran_int iwork[], fortran_int *info);
+
+extern fortran_int
+FNAME(spotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ float a[], fortran_int *lda,
+ float b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ double a[], fortran_int *lda,
+ double b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ f2c_complex a[], fortran_int *lda,
+ f2c_complex b[], fortran_int *ldb,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotrs)(char *uplo, fortran_int *n, fortran_int *nrhs,
+ f2c_doublecomplex a[], fortran_int *lda,
+ f2c_doublecomplex b[], fortran_int *ldb,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(spotri)(char *uplo, fortran_int *n,
+ float a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(dpotri)(char *uplo, fortran_int *n,
+ double a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(cpotri)(char *uplo, fortran_int *n,
+ f2c_complex a[], fortran_int *lda,
+ fortran_int *info);
+extern fortran_int
+FNAME(zpotri)(char *uplo, fortran_int *n,
+ f2c_doublecomplex a[], fortran_int *lda,
+ fortran_int *info);
+
+extern fortran_int
+FNAME(scopy)(fortran_int *n,
+ float *sx, fortran_int *incx,
+ float *sy, fortran_int *incy);
+extern fortran_int
+FNAME(dcopy)(fortran_int *n,
+ double *sx, fortran_int *incx,
+ double *sy, fortran_int *incy);
+extern fortran_int
+FNAME(ccopy)(fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
+extern fortran_int
+FNAME(zcopy)(fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
extern float
-FNAME(sdot)(int *n,
- float *sx, int *incx,
- float *sy, int *incy);
+FNAME(sdot)(fortran_int *n,
+ float *sx, fortran_int *incx,
+ float *sy, fortran_int *incy);
extern double
-FNAME(ddot)(int *n,
- double *sx, int *incx,
- double *sy, int *incy);
+FNAME(ddot)(fortran_int *n,
+ double *sx, fortran_int *incx,
+ double *sy, fortran_int *incy);
extern void
-FNAME(cdotu)(f2c_complex *ret, int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
+FNAME(cdotu)(f2c_complex *ret, fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
extern void
-FNAME(zdotu)(f2c_doublecomplex *ret, int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+FNAME(zdotu)(f2c_doublecomplex *ret, fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
extern void
-FNAME(cdotc)(f2c_complex *ret, int *n,
- f2c_complex *sx, int *incx,
- f2c_complex *sy, int *incy);
+FNAME(cdotc)(f2c_complex *ret, fortran_int *n,
+ f2c_complex *sx, fortran_int *incx,
+ f2c_complex *sy, fortran_int *incy);
extern void
-FNAME(zdotc)(f2c_doublecomplex *ret, int *n,
- f2c_doublecomplex *sx, int *incx,
- f2c_doublecomplex *sy, int *incy);
+FNAME(zdotc)(f2c_doublecomplex *ret, fortran_int *n,
+ f2c_doublecomplex *sx, fortran_int *incx,
+ f2c_doublecomplex *sy, fortran_int *incy);
-extern int
+extern fortran_int
FNAME(sgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
float *alpha,
- float *a, int *lda,
- float *b, int *ldb,
+ float *a, fortran_int *lda,
+ float *b, fortran_int *ldb,
float *beta,
- float *c, int *ldc);
-extern int
+ float *c, fortran_int *ldc);
+extern fortran_int
FNAME(dgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
double *alpha,
- double *a, int *lda,
- double *b, int *ldb,
+ double *a, fortran_int *lda,
+ double *b, fortran_int *ldb,
double *beta,
- double *c, int *ldc);
-extern int
+ double *c, fortran_int *ldc);
+extern fortran_int
FNAME(cgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
f2c_complex *alpha,
- f2c_complex *a, int *lda,
- f2c_complex *b, int *ldb,
+ f2c_complex *a, fortran_int *lda,
+ f2c_complex *b, fortran_int *ldb,
f2c_complex *beta,
- f2c_complex *c, int *ldc);
-extern int
+ f2c_complex *c, fortran_int *ldc);
+extern fortran_int
FNAME(zgemm)(char *transa, char *transb,
- int *m, int *n, int *k,
+ fortran_int *m, fortran_int *n, fortran_int *k,
f2c_doublecomplex *alpha,
- f2c_doublecomplex *a, int *lda,
- f2c_doublecomplex *b, int *ldb,
+ f2c_doublecomplex *a, fortran_int *lda,
+ f2c_doublecomplex *b, fortran_int *ldb,
f2c_doublecomplex *beta,
- f2c_doublecomplex *c, int *ldc);
+ f2c_doublecomplex *c, fortran_int *ldc);
#define LAPACK_T(FUNC) \
@@ -369,12 +386,6 @@ FNAME(zgemm)(char *transa, char *transb,
#define LAPACK(FUNC) \
FNAME(FUNC)
-typedef int fortran_int;
-typedef float fortran_real;
-typedef double fortran_doublereal;
-typedef f2c_complex fortran_complex;
-typedef f2c_doublecomplex fortran_doublecomplex;
-
/*
*****************************************************************************
diff --git a/numpy/ma/core.py b/numpy/ma/core.py
index dd7e39b71..5aced840a 100644
--- a/numpy/ma/core.py
+++ b/numpy/ma/core.py
@@ -602,8 +602,10 @@ def filled(a, fill_value=None):
----------
a : MaskedArray or array_like
An input object.
- fill_value : scalar, optional
- Filling value. Default is None.
+ fill_value : array_like, optional.
+ Can be scalar or non-scalar. If non-scalar, the
+ resulting filled array should be broadcastable
+ over input array. Default is None.
Returns
-------
@@ -623,10 +625,19 @@ def filled(a, fill_value=None):
array([[999999, 1, 2],
[999999, 4, 5],
[ 6, 7, 8]])
+ >>> x.filled(fill_value=333)
+ array([[333, 1, 2],
+ [333, 4, 5],
+ [ 6, 7, 8]])
+ >>> x.filled(fill_value=np.arange(3))
+ array([[0, 1, 2],
+ [0, 4, 5],
+ [6, 7, 8]])
"""
if hasattr(a, 'filled'):
return a.filled(fill_value)
+
elif isinstance(a, ndarray):
# Should we check for contiguity ? and a.flags['CONTIGUOUS']:
return a
@@ -3653,6 +3664,14 @@ class MaskedArray(ndarray):
@fill_value.setter
def fill_value(self, value=None):
target = _check_fill_value(value, self.dtype)
+ if not target.ndim == 0:
+ # 2019-11-12, 1.18.0
+ warnings.warn(
+ "Non-scalar arrays for the fill value are deprecated. Use "
+ "arrays with scalar values instead. The filled function "
+ "still supports any array as `fill_value`.",
+ DeprecationWarning, stacklevel=2)
+
_fill_value = self._fill_value
if _fill_value is None:
# Create the attribute if it was undefined
@@ -3673,9 +3692,11 @@ class MaskedArray(ndarray):
Parameters
----------
- fill_value : scalar, optional
- The value to use for invalid entries (None by default).
- If None, the `fill_value` attribute of the array is used instead.
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or non-scalar.
+ If non-scalar, the resulting ndarray must be broadcastable over
+ input array. Default is None, in which case, the `fill_value`
+ attribute of the array is used instead.
Returns
-------
@@ -3694,6 +3715,8 @@ class MaskedArray(ndarray):
>>> x = np.ma.array([1,2,3,4,5], mask=[0,0,1,0,1], fill_value=-999)
>>> x.filled()
array([ 1, 2, -999, 4, -999])
+ >>> x.filled(fill_value=1000)
+ array([ 1, 2, 1000, 4, 1000])
>>> type(x.filled())
<class 'numpy.ndarray'>
@@ -6220,9 +6243,11 @@ class mvoid(MaskedArray):
Parameters
----------
- fill_value : scalar, optional
- The value to use for invalid entries (None by default).
- If None, the `fill_value` attribute is used instead.
+ fill_value : array_like, optional
+ The value to use for invalid entries. Can be scalar or
+ non-scalar. If latter is the case, the filled array should
+ be broadcastable over input array. Default is None, in
+ which case the `fill_value` attribute is used instead.
Returns
-------
diff --git a/numpy/ma/extras.py b/numpy/ma/extras.py
index 4a83ac781..f4a914471 100644
--- a/numpy/ma/extras.py
+++ b/numpy/ma/extras.py
@@ -937,7 +937,7 @@ def compress_cols(a):
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
-def mask_rows(a, axis=None):
+def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
@@ -979,9 +979,15 @@ def mask_rows(a, axis=None):
fill_value=1)
"""
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
-def mask_cols(a, axis=None):
+def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
@@ -1022,6 +1028,12 @@ def mask_cols(a, axis=None):
fill_value=1)
"""
+ if axis is not np._NoValue:
+ # remove the axis argument when this deprecation expires
+ # NumPy 1.18.0, 2019-11-28
+ warnings.warn(
+ "The axis argument has always been ignored, in future passing it "
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
diff --git a/numpy/ma/tests/test_extras.py b/numpy/ma/tests/test_extras.py
index 836770378..c75c47801 100644
--- a/numpy/ma/tests/test_extras.py
+++ b/numpy/ma/tests/test_extras.py
@@ -11,6 +11,7 @@ from __future__ import division, absolute_import, print_function
import warnings
import itertools
+import pytest
import numpy as np
from numpy.testing import (
@@ -552,6 +553,18 @@ class TestCompressFunctions(object):
assert_(mask_rowcols(x, 0).mask.all())
assert_(mask_rowcols(x, 1).mask.all())
+ @pytest.mark.parametrize("axis", [None, 0, 1])
+ @pytest.mark.parametrize(["func", "rowcols_axis"],
+ [(np.ma.mask_rows, 0), (np.ma.mask_cols, 1)])
+ def test_mask_row_cols_axis_deprecation(self, axis, func, rowcols_axis):
+ # Test deprecation of the axis argument to `mask_rows` and `mask_cols`
+ x = array(np.arange(9).reshape(3, 3),
+ mask=[[1, 0, 0], [0, 0, 0], [0, 0, 0]])
+
+ with assert_warns(DeprecationWarning):
+ res = func(x, axis=axis)
+ assert_equal(res, mask_rowcols(x, rowcols_axis))
+
def test_dot(self):
# Tests dot product
n = np.arange(1, 7)
diff --git a/numpy/random/.gitignore b/numpy/random/.gitignore
new file mode 100644
index 000000000..fea3f955a
--- /dev/null
+++ b/numpy/random/.gitignore
@@ -0,0 +1,3 @@
+# generated files
+_bounded_integers.pyx
+_bounded_integers.pxd
diff --git a/numpy/random/__init__.pxd b/numpy/random/__init__.pxd
new file mode 100644
index 000000000..05e073876
--- /dev/null
+++ b/numpy/random/__init__.pxd
@@ -0,0 +1,14 @@
+cimport numpy as np
+from libc.stdint cimport uint32_t, uint64_t
+
+cdef extern from "numpy/random/bitgen.h":
+ struct bitgen:
+ void *state
+ uint64_t (*next_uint64)(void *st) nogil
+ uint32_t (*next_uint32)(void *st) nogil
+ double (*next_double)(void *st) nogil
+ uint64_t (*next_raw)(void *st) nogil
+
+ ctypedef bitgen bitgen_t
+
+from numpy.random._bit_generator cimport BitGenerator, SeedSequence
diff --git a/numpy/random/_bit_generator.pxd b/numpy/random/_bit_generator.pxd
index 30fa4a27d..bd5e47a20 100644
--- a/numpy/random/_bit_generator.pxd
+++ b/numpy/random/_bit_generator.pxd
@@ -1,7 +1,7 @@
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
-cdef extern from "include/bitgen.h":
+cdef extern from "numpy/random/bitgen.h":
struct bitgen:
void *state
uint64_t (*next_uint64)(void *st) nogil
diff --git a/numpy/random/_bounded_integers.pxd b/numpy/random/_bounded_integers.pxd
deleted file mode 100644
index d3ee97a70..000000000
--- a/numpy/random/_bounded_integers.pxd
+++ /dev/null
@@ -1,29 +0,0 @@
-from libc.stdint cimport (uint8_t, uint16_t, uint32_t, uint64_t,
- int8_t, int16_t, int32_t, int64_t, intptr_t)
-import numpy as np
-cimport numpy as np
-ctypedef np.npy_bool bool_t
-
-from ._bit_generator cimport bitgen_t
-
-cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
- """Mask generator for use in bounded random numbers"""
- # Smallest bit mask >= max
- cdef uint64_t mask = max_val
- mask |= mask >> 1
- mask |= mask >> 2
- mask |= mask >> 4
- mask |= mask >> 8
- mask |= mask >> 16
- mask |= mask >> 32
- return mask
-
-cdef object _rand_uint64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_uint32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_uint16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_uint8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_bool(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_int64(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_int32(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_int16(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
-cdef object _rand_int8(object low, object high, object size, bint use_masked, bint closed, bitgen_t *state, object lock)
diff --git a/numpy/random/_bounded_integers.pxd.in b/numpy/random/_bounded_integers.pxd.in
index 320d35774..5ae5a8067 100644
--- a/numpy/random/_bounded_integers.pxd.in
+++ b/numpy/random/_bounded_integers.pxd.in
@@ -4,7 +4,7 @@ import numpy as np
cimport numpy as np
ctypedef np.npy_bool bool_t
-from ._bit_generator cimport bitgen_t
+from numpy.random cimport bitgen_t
cdef inline uint64_t _gen_mask(uint64_t max_val) nogil:
"""Mask generator for use in bounded random numbers"""
diff --git a/numpy/random/_bounded_integers.pyx b/numpy/random/_bounded_integers.pyx
deleted file mode 100644
index d6a534b43..000000000
--- a/numpy/random/_bounded_integers.pyx
+++ /dev/null
@@ -1,1564 +0,0 @@
-#!python
-#cython: wraparound=False, nonecheck=False, boundscheck=False, cdivision=True
-
-import numpy as np
-cimport numpy as np
-
-__all__ = []
-
-np.import_array()
-
-cdef extern from "include/distributions.h":
- # Generate random numbers in closed interval [off, off + rng].
- uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
- uint64_t off, uint64_t rng,
- uint64_t mask, bint use_masked) nogil
- uint32_t random_buffered_bounded_uint32(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng,
- uint32_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- uint16_t random_buffered_bounded_uint16(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng,
- uint16_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- uint8_t random_buffered_bounded_uint8(bitgen_t *bitgen_state,
- uint8_t off, uint8_t rng,
- uint8_t mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- np.npy_bool random_buffered_bounded_bool(bitgen_t *bitgen_state,
- np.npy_bool off, np.npy_bool rng,
- np.npy_bool mask, bint use_masked,
- int *bcnt, uint32_t *buf) nogil
- void random_bounded_uint64_fill(bitgen_t *bitgen_state,
- uint64_t off, uint64_t rng, np.npy_intp cnt,
- bint use_masked,
- uint64_t *out) nogil
- void random_bounded_uint32_fill(bitgen_t *bitgen_state,
- uint32_t off, uint32_t rng, np.npy_intp cnt,
- bint use_masked,
- uint32_t *out) nogil
- void random_bounded_uint16_fill(bitgen_t *bitgen_state,
- uint16_t off, uint16_t rng, np.npy_intp cnt,
- bint use_masked,
- uint16_t *out) nogil
- void random_bounded_uint8_fill(bitgen_t *bitgen_state,
- uint8_t off, uint8_t rng, np.npy_intp cnt,
- bint use_masked,
- uint8_t *out) nogil
- void random_bounded_bool_fill(bitgen_t *bitgen_state,
- np.npy_bool off, np.npy_bool rng, np.npy_intp cnt,
- bint use_masked,
- np.npy_bool *out) nogil
-
-
-
-_integers_types = {'bool': (0, 2),
- 'int8': (-2**7, 2**7),
- 'int16': (-2**15, 2**15),
- 'int32': (-2**31, 2**31),
- 'int64': (-2**63, 2**63),
- 'uint8': (0, 2**8),
- 'uint16': (0, 2**16),
- 'uint32': (0, 2**32),
- 'uint64': (0, 2**64)}
-
-
-cdef object _rand_uint32_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint64. Here we case to
- this type for checking and the recast to uint32 when producing the
- random integers.
- """
- cdef uint32_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef uint32_t *out_data
- cdef uint64_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, 0)):
- raise ValueError('low is out of bounds for uint32')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0X100000000ULL)):
- raise ValueError('high is out of bounds for uint32')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.uint32)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.uint32)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint32_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <uint32_t>((high_v - is_open) - low_v)
- off = <uint32_t>(<uint64_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <uint32_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_uint32(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-cdef object _rand_uint16_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint32. Here we case to
- this type for checking and the recast to uint16 when producing the
- random integers.
- """
- cdef uint16_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef uint16_t *out_data
- cdef uint32_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, 0)):
- raise ValueError('low is out of bounds for uint16')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0X10000UL)):
- raise ValueError('high is out of bounds for uint16')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT32, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT32, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.uint16)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.uint16)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint16_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <uint16_t>((high_v - is_open) - low_v)
- off = <uint16_t>(<uint32_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <uint16_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_uint16(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-cdef object _rand_uint8_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint16. Here we case to
- this type for checking and the recast to uint8 when producing the
- random integers.
- """
- cdef uint8_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef uint8_t *out_data
- cdef uint16_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, 0)):
- raise ValueError('low is out of bounds for uint8')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0X100UL)):
- raise ValueError('high is out of bounds for uint8')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT16, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT16, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.uint8)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.uint8)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint8_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <uint8_t>((high_v - is_open) - low_v)
- off = <uint8_t>(<uint16_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <uint8_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_uint8(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-cdef object _rand_bool_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint8. Here we case to
- this type for checking and the recast to bool when producing the
- random integers.
- """
- cdef bool_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef bool_t *out_data
- cdef uint8_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, 0)):
- raise ValueError('low is out of bounds for bool')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0x2UL)):
- raise ValueError('high is out of bounds for bool')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT8, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_UINT8, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.bool_)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.bool_)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <bool_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint8_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint8_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <bool_t>((high_v - is_open) - low_v)
- off = <bool_t>(<uint8_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <bool_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_bool(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-cdef object _rand_int32_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint64. Here we case to
- this type for checking and the recast to int32 when producing the
- random integers.
- """
- cdef uint32_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef uint32_t *out_data
- cdef uint64_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, -0x80000000LL)):
- raise ValueError('low is out of bounds for int32')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0x80000000LL)):
- raise ValueError('high is out of bounds for int32')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.int32)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.int32)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint32_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <uint32_t>((high_v - is_open) - low_v)
- off = <uint32_t>(<uint64_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <uint32_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_uint32(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-cdef object _rand_int16_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint32. Here we case to
- this type for checking and the recast to int16 when producing the
- random integers.
- """
- cdef uint16_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef uint16_t *out_data
- cdef uint32_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, -0x8000LL)):
- raise ValueError('low is out of bounds for int16')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0x8000LL)):
- raise ValueError('high is out of bounds for int16')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT32, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_INT32, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.int16)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.int16)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint16_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint32_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <uint16_t>((high_v - is_open) - low_v)
- off = <uint16_t>(<uint32_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <uint16_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_uint16(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-cdef object _rand_int8_broadcast(np.ndarray low, np.ndarray high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for smaller integer types
-
- This path is simpler since the high value in the open interval [low, high)
- must be in-range for the next larger type, uint16. Here we case to
- this type for checking and the recast to int8 when producing the
- random integers.
- """
- cdef uint8_t rng, last_rng, off, val, mask, out_val, is_open
- cdef uint32_t buf
- cdef uint8_t *out_data
- cdef uint16_t low_v, high_v
- cdef np.ndarray low_arr, high_arr, out_arr
- cdef np.npy_intp i, cnt
- cdef np.broadcast it
- cdef int buf_rem = 0
-
- # Array path
- is_open = not closed
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
- if np.any(np.less(low_arr, -0x80LL)):
- raise ValueError('low is out of bounds for int8')
- if closed:
- high_comp = np.greater_equal
- low_high_comp = np.greater
- else:
- high_comp = np.greater
- low_high_comp = np.greater_equal
-
- if np.any(high_comp(high_arr, 0x80LL)):
- raise ValueError('high is out of bounds for int8')
- if np.any(low_high_comp(low_arr, high_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT16, np.NPY_ALIGNED | np.NPY_FORCECAST)
- high_arr = <np.ndarray>np.PyArray_FROM_OTF(high, np.NPY_INT16, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.int8)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.int8)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint8_t *>np.PyArray_DATA(out_arr)
- cnt = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(cnt):
- low_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint16_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- rng = <uint8_t>((high_v - is_open) - low_v)
- off = <uint8_t>(<uint16_t>low_v)
-
- if rng != last_rng:
- # Smallest bit mask >= max
- mask = <uint8_t>_gen_mask(rng)
-
- out_data[i] = random_buffered_bounded_uint8(state, off, rng, mask, use_masked, &buf_rem, &buf)
-
- np.PyArray_MultiIter_NEXT(it)
- return out_arr
-
-
-cdef object _rand_uint64_broadcast(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for 64-bit integer types
-
- Requires special treatment since the high value can be out-of-range for
- the largest (64 bit) integer type since the generator is specified on the
- interval [low,high).
-
- The internal generator does not have this issue since it generates from
- the closes interval [low, high-1] and high-1 is always in range for the
- 64 bit integer type.
- """
-
- cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr
- cdef np.npy_intp i, cnt, n
- cdef np.broadcast it
- cdef object closed_upper
- cdef uint64_t *out_data
- cdef uint64_t *highm1_data
- cdef uint64_t low_v, high_v
- cdef uint64_t rng, last_rng, val, mask, off, out_val
-
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
-
- if np.any(np.less(low_arr, 0x0ULL)):
- raise ValueError('low is out of bounds for uint64')
- dt = high_arr.dtype
- if closed or np.issubdtype(dt, np.integer):
- # Avoid object dtype path if already an integer
- high_lower_comp = np.less if closed else np.less_equal
- if np.any(high_lower_comp(high_arr, 0x0ULL)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
- high_m1 = high_arr if closed else high_arr - dt.type(1)
- if np.any(np.greater(high_m1, 0xFFFFFFFFFFFFFFFFULL)):
- raise ValueError('high is out of bounds for uint64')
- highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
- else:
- # If input is object or a floating type
- highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.uint64)
- highm1_data = <uint64_t *>np.PyArray_DATA(highm1_arr)
- cnt = np.PyArray_SIZE(high_arr)
- flat = high_arr.flat
- for i in range(cnt):
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- closed_upper = int(flat[i]) - 1
- if closed_upper > 0xFFFFFFFFFFFFFFFFULL:
- raise ValueError('high is out of bounds for uint64')
- if closed_upper < 0x0ULL:
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
- highm1_data[i] = <uint64_t>closed_upper
-
- if np.any(np.greater(low_arr, highm1_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- high_arr = highm1_arr
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_UINT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.uint64)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.uint64)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint64_t *>np.PyArray_DATA(out_arr)
- n = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(n):
- low_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<uint64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Generator produces values on the closed int [off, off+rng], -1 subtracted above
- rng = <uint64_t>(high_v - low_v)
- off = <uint64_t>(<uint64_t>low_v)
-
- if rng != last_rng:
- mask = _gen_mask(rng)
- out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return out_arr
-
-cdef object _rand_int64_broadcast(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- Array path for 64-bit integer types
-
- Requires special treatment since the high value can be out-of-range for
- the largest (64 bit) integer type since the generator is specified on the
- interval [low,high).
-
- The internal generator does not have this issue since it generates from
- the closes interval [low, high-1] and high-1 is always in range for the
- 64 bit integer type.
- """
-
- cdef np.ndarray low_arr, high_arr, out_arr, highm1_arr
- cdef np.npy_intp i, cnt, n
- cdef np.broadcast it
- cdef object closed_upper
- cdef uint64_t *out_data
- cdef int64_t *highm1_data
- cdef int64_t low_v, high_v
- cdef uint64_t rng, last_rng, val, mask, off, out_val
-
- low_arr = <np.ndarray>low
- high_arr = <np.ndarray>high
-
- if np.any(np.less(low_arr, -0x8000000000000000LL)):
- raise ValueError('low is out of bounds for int64')
- dt = high_arr.dtype
- if closed or np.issubdtype(dt, np.integer):
- # Avoid object dtype path if already an integer
- high_lower_comp = np.less if closed else np.less_equal
- if np.any(high_lower_comp(high_arr, -0x8000000000000000LL)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
- high_m1 = high_arr if closed else high_arr - dt.type(1)
- if np.any(np.greater(high_m1, 0x7FFFFFFFFFFFFFFFLL)):
- raise ValueError('high is out of bounds for int64')
- highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
- else:
- # If input is object or a floating type
- highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.int64)
- highm1_data = <int64_t *>np.PyArray_DATA(highm1_arr)
- cnt = np.PyArray_SIZE(high_arr)
- flat = high_arr.flat
- for i in range(cnt):
- # Subtract 1 since generator produces values on the closed int [off, off+rng]
- closed_upper = int(flat[i]) - 1
- if closed_upper > 0x7FFFFFFFFFFFFFFFLL:
- raise ValueError('high is out of bounds for int64')
- if closed_upper < -0x8000000000000000LL:
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
- highm1_data[i] = <int64_t>closed_upper
-
- if np.any(np.greater(low_arr, highm1_arr)):
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- high_arr = highm1_arr
- low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.NPY_INT64, np.NPY_ALIGNED | np.NPY_FORCECAST)
-
- if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.int64)
- else:
- it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.int64)
-
- it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
- out_data = <uint64_t *>np.PyArray_DATA(out_arr)
- n = np.PyArray_SIZE(out_arr)
- mask = last_rng = 0
- with lock, nogil:
- for i in range(n):
- low_v = (<int64_t*>np.PyArray_MultiIter_DATA(it, 0))[0]
- high_v = (<int64_t*>np.PyArray_MultiIter_DATA(it, 1))[0]
- # Generator produces values on the closed int [off, off+rng], -1 subtracted above
- rng = <uint64_t>(high_v - low_v)
- off = <uint64_t>(<int64_t>low_v)
-
- if rng != last_rng:
- mask = _gen_mask(rng)
- out_data[i] = random_bounded_uint64(state, off, rng, mask, use_masked)
-
- np.PyArray_MultiIter_NEXT(it)
-
- return out_arr
-
-
-cdef object _rand_uint64(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_uint64(low, high, size, use_masked, *state, lock)
-
- Return random np.uint64 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.uint64 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.uint64
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint64. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint64_t rng, off, out_val
- cdef uint64_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.uint64)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < 0x0ULL:
- raise ValueError("low is out of bounds for uint64")
- if high > 0xFFFFFFFFFFFFFFFFULL:
- raise ValueError("high is out of bounds for uint64")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint64_t>(high - low)
- off = <uint64_t>(<uint64_t>low)
- if size is None:
- with lock:
- random_bounded_uint64_fill(state, off, rng, 1, use_masked, &out_val)
- return np.uint64(<uint64_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.uint64)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint64_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint64_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_uint64_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_uint32(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_uint32(low, high, size, use_masked, *state, lock)
-
- Return random np.uint32 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.uint32 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.uint32
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint32. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint32_t rng, off, out_val
- cdef uint32_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.uint32)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < 0x0UL:
- raise ValueError("low is out of bounds for uint32")
- if high > 0XFFFFFFFFUL:
- raise ValueError("high is out of bounds for uint32")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint32_t>(high - low)
- off = <uint32_t>(<uint32_t>low)
- if size is None:
- with lock:
- random_bounded_uint32_fill(state, off, rng, 1, use_masked, &out_val)
- return np.uint32(<uint32_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.uint32)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint32_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint32_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_uint32_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_uint16(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_uint16(low, high, size, use_masked, *state, lock)
-
- Return random np.uint16 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.uint16 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.uint16
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint16. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint16_t rng, off, out_val
- cdef uint16_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.uint16)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < 0x0UL:
- raise ValueError("low is out of bounds for uint16")
- if high > 0XFFFFUL:
- raise ValueError("high is out of bounds for uint16")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint16_t>(high - low)
- off = <uint16_t>(<uint16_t>low)
- if size is None:
- with lock:
- random_bounded_uint16_fill(state, off, rng, 1, use_masked, &out_val)
- return np.uint16(<uint16_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.uint16)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint16_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint16_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_uint16_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_uint8(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_uint8(low, high, size, use_masked, *state, lock)
-
- Return random np.uint8 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.uint8 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.uint8
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint8. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint8_t rng, off, out_val
- cdef uint8_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.uint8)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < 0x0UL:
- raise ValueError("low is out of bounds for uint8")
- if high > 0XFFUL:
- raise ValueError("high is out of bounds for uint8")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint8_t>(high - low)
- off = <uint8_t>(<uint8_t>low)
- if size is None:
- with lock:
- random_bounded_uint8_fill(state, off, rng, 1, use_masked, &out_val)
- return np.uint8(<uint8_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.uint8)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint8_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint8_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_uint8_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_bool(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_bool(low, high, size, use_masked, *state, lock)
-
- Return random np.bool integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.bool type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.bool
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for bool. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef bool_t rng, off, out_val
- cdef bool_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.bool)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < 0x0UL:
- raise ValueError("low is out of bounds for bool")
- if high > 0x1UL:
- raise ValueError("high is out of bounds for bool")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <bool_t>(high - low)
- off = <bool_t>(<bool_t>low)
- if size is None:
- with lock:
- random_bounded_bool_fill(state, off, rng, 1, use_masked, &out_val)
- return np.bool_(<bool_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.bool)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <bool_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_bool_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_bool_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_int64(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_int64(low, high, size, use_masked, *state, lock)
-
- Return random np.int64 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.int64 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.int64
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint64. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint64_t rng, off, out_val
- cdef uint64_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.int64)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < -0x8000000000000000LL:
- raise ValueError("low is out of bounds for int64")
- if high > 0x7FFFFFFFFFFFFFFFL:
- raise ValueError("high is out of bounds for int64")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint64_t>(high - low)
- off = <uint64_t>(<int64_t>low)
- if size is None:
- with lock:
- random_bounded_uint64_fill(state, off, rng, 1, use_masked, &out_val)
- return np.int64(<int64_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.int64)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint64_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint64_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_int64_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_int32(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_int32(low, high, size, use_masked, *state, lock)
-
- Return random np.int32 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.int32 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.int32
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint32. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint32_t rng, off, out_val
- cdef uint32_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.int32)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < -0x80000000L:
- raise ValueError("low is out of bounds for int32")
- if high > 0x7FFFFFFFL:
- raise ValueError("high is out of bounds for int32")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint32_t>(high - low)
- off = <uint32_t>(<int32_t>low)
- if size is None:
- with lock:
- random_bounded_uint32_fill(state, off, rng, 1, use_masked, &out_val)
- return np.int32(<int32_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.int32)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint32_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint32_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_int32_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_int16(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_int16(low, high, size, use_masked, *state, lock)
-
- Return random np.int16 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.int16 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.int16
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint16. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint16_t rng, off, out_val
- cdef uint16_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.int16)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < -0x8000L:
- raise ValueError("low is out of bounds for int16")
- if high > 0x7FFFL:
- raise ValueError("high is out of bounds for int16")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint16_t>(high - low)
- off = <uint16_t>(<int16_t>low)
- if size is None:
- with lock:
- random_bounded_uint16_fill(state, off, rng, 1, use_masked, &out_val)
- return np.int16(<int16_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.int16)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint16_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint16_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_int16_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
-
-cdef object _rand_int8(object low, object high, object size,
- bint use_masked, bint closed,
- bitgen_t *state, object lock):
- """
- _rand_int8(low, high, size, use_masked, *state, lock)
-
- Return random np.int8 integers from `low` (inclusive) to `high` (exclusive).
-
- Return random integers from the "discrete uniform" distribution in the
- interval [`low`, `high`). If `high` is None (the default),
- then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.int8 type.
-
- Parameters
- ----------
- low : int or array-like
- Lowest (signed) integer to be drawn from the distribution (unless
- ``high=None``, in which case this parameter is the *highest* such
- integer).
- high : int or array-like
- If provided, one above the largest (signed) integer to be drawn from the
- distribution (see above for behavior if ``high=None``).
- size : int or tuple of ints
- Output shape. If the given shape is, e.g., ``(m, n, k)``, then
- ``m * n * k`` samples are drawn. Default is None, in which case a
- single value is returned.
- use_masked : bool
- If True then rejection sampling with a range mask is used else Lemire's algorithm is used.
- closed : bool
- If True then sample from [low, high]. If False, sample [low, high)
- state : bit generator
- Bit generator state to use in the core random number generators
- lock : threading.Lock
- Lock to prevent multiple using a single generator simultaneously
-
- Returns
- -------
- out : python scalar or ndarray of np.int8
- `size`-shaped array of random integers from the appropriate
- distribution, or a single such random int if `size` not provided.
-
- Notes
- -----
- The internal integer generator produces values from the closed
- interval [low, high-(not closed)]. This requires some care since
- high can be out-of-range for uint8. The scalar path leaves
- integers as Python integers until the 1 has been subtracted to
- avoid needing to cast to a larger type.
- """
- cdef np.ndarray out_arr, low_arr, high_arr
- cdef uint8_t rng, off, out_val
- cdef uint8_t *out_data
- cdef np.npy_intp i, n, cnt
-
- if size is not None:
- if (np.prod(size) == 0):
- return np.empty(size, dtype=np.int8)
-
- low_arr = <np.ndarray>np.array(low, copy=False)
- high_arr = <np.ndarray>np.array(high, copy=False)
- low_ndim = np.PyArray_NDIM(low_arr)
- high_ndim = np.PyArray_NDIM(high_arr)
- if ((low_ndim == 0 or (low_ndim == 1 and low_arr.size == 1 and size is not None)) and
- (high_ndim == 0 or (high_ndim == 1 and high_arr.size == 1 and size is not None))):
- low = int(low_arr)
- high = int(high_arr)
- # Subtract 1 since internal generator produces on closed interval [low, high]
- if not closed:
- high -= 1
-
- if low < -0x80L:
- raise ValueError("low is out of bounds for int8")
- if high > 0x7FL:
- raise ValueError("high is out of bounds for int8")
- if low > high: # -1 already subtracted, closed interval
- comp = '>' if closed else '>='
- raise ValueError('low {comp} high'.format(comp=comp))
-
- rng = <uint8_t>(high - low)
- off = <uint8_t>(<int8_t>low)
- if size is None:
- with lock:
- random_bounded_uint8_fill(state, off, rng, 1, use_masked, &out_val)
- return np.int8(<int8_t>out_val)
- else:
- out_arr = <np.ndarray>np.empty(size, np.int8)
- cnt = np.PyArray_SIZE(out_arr)
- out_data = <uint8_t *>np.PyArray_DATA(out_arr)
- with lock, nogil:
- random_bounded_uint8_fill(state, off, rng, cnt, use_masked, out_data)
- return out_arr
- return _rand_int8_broadcast(low_arr, high_arr, size, use_masked, closed, state, lock)
diff --git a/numpy/random/_bounded_integers.pyx.in b/numpy/random/_bounded_integers.pyx.in
index 47cb13b3a..7e19471e4 100644
--- a/numpy/random/_bounded_integers.pyx.in
+++ b/numpy/random/_bounded_integers.pyx.in
@@ -8,7 +8,7 @@ __all__ = []
np.import_array()
-cdef extern from "include/distributions.h":
+cdef extern from "numpy/random/distributions.h":
# Generate random numbers in closed interval [off, off + rng].
uint64_t random_bounded_uint64(bitgen_t *bitgen_state,
uint64_t off, uint64_t rng,
@@ -191,7 +191,7 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
highm1_arr = <np.ndarray>np.PyArray_FROM_OTF(high_m1, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
else:
# If input is object or a floating type
- highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{nptype}})
+ highm1_arr = <np.ndarray>np.empty_like(high_arr, dtype=np.{{otype}})
highm1_data = <{{nptype}}_t *>np.PyArray_DATA(highm1_arr)
cnt = np.PyArray_SIZE(high_arr)
flat = high_arr.flat
@@ -213,10 +213,10 @@ cdef object _rand_{{nptype}}_broadcast(object low, object high, object size,
low_arr = <np.ndarray>np.PyArray_FROM_OTF(low, np.{{npctype}}, np.NPY_ALIGNED | np.NPY_FORCECAST)
if size is not None:
- out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
else:
it = np.PyArray_MultiIterNew2(low_arr, high_arr)
- out_arr = <np.ndarray>np.empty(it.shape, np.{{nptype}})
+ out_arr = <np.ndarray>np.empty(it.shape, np.{{otype}})
it = np.PyArray_MultiIterNew3(low_arr, high_arr, out_arr)
out_data = <uint64_t *>np.PyArray_DATA(out_arr)
@@ -258,12 +258,12 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
"""
_rand_{{nptype}}(low, high, size, use_masked, *state, lock)
- Return random np.{{nptype}} integers from `low` (inclusive) to `high` (exclusive).
+ Return random `np.{{otype}}` integers from `low` (inclusive) to `high` (exclusive).
Return random integers from the "discrete uniform" distribution in the
interval [`low`, `high`). If `high` is None (the default),
then results are from [0, `low`). On entry the arguments are presumed
- to have been validated for size and order for the np.{{nptype}} type.
+ to have been validated for size and order for the `np.{{otype}}` type.
Parameters
----------
@@ -289,7 +289,7 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
Returns
-------
- out : python scalar or ndarray of np.{{nptype}}
+ out : python scalar or ndarray of np.{{otype}}
`size`-shaped array of random integers from the appropriate
distribution, or a single such random int if `size` not provided.
@@ -308,7 +308,7 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
if size is not None:
if (np.prod(size) == 0):
- return np.empty(size, dtype=np.{{nptype}})
+ return np.empty(size, dtype=np.{{otype}})
low_arr = <np.ndarray>np.array(low, copy=False)
high_arr = <np.ndarray>np.array(high, copy=False)
@@ -337,7 +337,7 @@ cdef object _rand_{{nptype}}(object low, object high, object size,
random_bounded_{{utype}}_fill(state, off, rng, 1, use_masked, &out_val)
return np.{{otype}}(<{{nptype}}_t>out_val)
else:
- out_arr = <np.ndarray>np.empty(size, np.{{nptype}})
+ out_arr = <np.ndarray>np.empty(size, np.{{otype}})
cnt = np.PyArray_SIZE(out_arr)
out_data = <{{utype}}_t *>np.PyArray_DATA(out_arr)
with lock, nogil:
diff --git a/numpy/random/_common.pxd b/numpy/random/_common.pxd
index 74bebca83..588f613ae 100644
--- a/numpy/random/_common.pxd
+++ b/numpy/random/_common.pxd
@@ -5,7 +5,7 @@ from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t
import numpy as np
cimport numpy as np
-from ._bit_generator cimport bitgen_t
+from numpy.random cimport bitgen_t
cdef double POISSON_LAM_MAX
cdef double LEGACY_POISSON_LAM_MAX
diff --git a/numpy/random/_examples/cffi/extending.py b/numpy/random/_examples/cffi/extending.py
new file mode 100644
index 000000000..732cbbb1d
--- /dev/null
+++ b/numpy/random/_examples/cffi/extending.py
@@ -0,0 +1,72 @@
+"""
+Use cffi to access the underlying C functions from distributions.h
+"""
+import os
+import numpy as np
+import cffi
+ffi = cffi.FFI()
+
+inc_dir = os.path.join(np.get_include(), 'numpy')
+
+# Basic numpy types
+ffi.cdef('''
+ typedef intptr_t npy_intp;
+ typedef unsigned char npy_bool;
+
+''')
+
+with open(os.path.join(inc_dir, 'random', 'bitgen.h')) as fid:
+ s = []
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
+with open(os.path.join(inc_dir, 'random', 'distributions.h')) as fid:
+ s = []
+ in_skip = 0
+ for line in fid:
+ # massage the include file
+ if line.strip().startswith('#'):
+ continue
+
+ # skip any inlined function definition
+ # which starts with 'static NPY_INLINE xxx(...) {'
+ # and ends with a closing '}'
+ if line.strip().startswith('static NPY_INLINE'):
+ in_skip += line.count('{')
+ continue
+ elif in_skip > 0:
+ in_skip += line.count('{')
+ in_skip -= line.count('}')
+ continue
+
+ # replace defines with their value or remove them
+ line = line.replace('DECLDIR', '')
+ line = line.replace('NPY_INLINE', '')
+ line = line.replace('RAND_INT_TYPE', 'int64_t')
+ s.append(line)
+ ffi.cdef('\n'.join(s))
+
+lib = ffi.dlopen(np.random._generator.__file__)
+
+# Compare the distributions.h random_standard_normal_fill to
+# Generator.standard_random
+bit_gen = np.random.PCG64()
+rng = np.random.Generator(bit_gen)
+state = bit_gen.state
+
+interface = rng.bit_generator.cffi
+n = 100
+vals_cffi = ffi.new('double[%d]' % n)
+lib.random_standard_normal_fill(interface.bit_generator, n, vals_cffi)
+
+# reset the state
+bit_gen.state = state
+
+vals = rng.standard_normal(n)
+
+for i in range(n):
+ assert vals[i] == vals_cffi[i]
diff --git a/numpy/random/examples/cython/extending.pyx b/numpy/random/_examples/cython/extending.pyx
index a6a4ba4bf..7a0dfe078 100644
--- a/numpy/random/examples/cython/extending.pyx
+++ b/numpy/random/_examples/cython/extending.pyx
@@ -8,7 +8,7 @@ import numpy as np
cimport numpy as np
cimport cython
-from numpy.random.common cimport bitgen_t
+from numpy.random cimport bitgen_t
from numpy.random import PCG64
np.import_array()
@@ -39,7 +39,7 @@ def uniform_mean(Py_ssize_t n):
return randoms.mean()
-# This function is declated nogil so it can be used without the GIL below
+# This function is declared nogil so it can be used without the GIL below
cdef uint32_t bounded_uint(uint32_t lb, uint32_t ub, bitgen_t *rng) nogil:
cdef uint32_t mask, delta, val
mask = delta = ub - lb
diff --git a/numpy/random/examples/cython/extending_distributions.pyx b/numpy/random/_examples/cython/extending_distributions.pyx
index 3cefec97e..1bef506ef 100644
--- a/numpy/random/examples/cython/extending_distributions.pyx
+++ b/numpy/random/_examples/cython/extending_distributions.pyx
@@ -1,21 +1,25 @@
#!/usr/bin/env python
#cython: language_level=3
"""
-This file shows how the distributions that are accessed through
-distributions.pxd can be used Cython code.
+This file shows how the to use a BitGenerator to create a distribution.
"""
import numpy as np
cimport numpy as np
cimport cython
from cpython.pycapsule cimport PyCapsule_IsValid, PyCapsule_GetPointer
-from numpy.random.common cimport *
-from numpy.random.distributions cimport random_gauss_zig
+from libc.stdint cimport uint16_t, uint64_t
+from numpy.random cimport bitgen_t
from numpy.random import PCG64
@cython.boundscheck(False)
@cython.wraparound(False)
-def normals_zig(Py_ssize_t n):
+def uniforms(Py_ssize_t n):
+ """
+ Create an array of `n` uniformly distributed doubles.
+ A 'real' distribution would want to process the values into
+ some non-uniform distribution
+ """
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
@@ -23,37 +27,48 @@ def normals_zig(Py_ssize_t n):
x = PCG64()
capsule = x.capsule
+ # Optional check that the capsule if from a BitGenerator
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
+ # Cast the pointer
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
- # Best practice is to release GIL and acquire the lock
+ random_values = np.empty(n, dtype='float64')
with x.lock, nogil:
for i in range(n):
- random_values[i] = random_gauss_zig(rng)
+ # Call the function
+ random_values[i] = rng.next_double(rng.state)
randoms = np.asarray(random_values)
- return randoms
-
+ return randoms
+
+# cython example 2
@cython.boundscheck(False)
@cython.wraparound(False)
-def uniforms(Py_ssize_t n):
+def uint10_uniforms(Py_ssize_t n):
+ """Uniform 10 bit integers stored as 16-bit unsigned integers"""
cdef Py_ssize_t i
cdef bitgen_t *rng
cdef const char *capsule_name = "BitGenerator"
- cdef double[::1] random_values
+ cdef uint16_t[::1] random_values
+ cdef int bits_remaining
+ cdef int width = 10
+ cdef uint64_t buff, mask = 0x3FF
x = PCG64()
capsule = x.capsule
- # Optional check that the capsule if from a BitGenerator
if not PyCapsule_IsValid(capsule, capsule_name):
raise ValueError("Invalid pointer to anon_func_state")
- # Cast the pointer
rng = <bitgen_t *> PyCapsule_GetPointer(capsule, capsule_name)
- random_values = np.empty(n)
+ random_values = np.empty(n, dtype='uint16')
+ # Best practice is to release GIL and acquire the lock
+ bits_remaining = 0
with x.lock, nogil:
for i in range(n):
- # Call the function
- random_values[i] = rng.next_double(rng.state)
+ if bits_remaining < width:
+ buff = rng.next_uint64(rng.state)
+ random_values[i] = buff & mask
+ buff >>= width
+
randoms = np.asarray(random_values)
return randoms
+
diff --git a/numpy/random/_examples/cython/setup.py b/numpy/random/_examples/cython/setup.py
new file mode 100644
index 000000000..19f045fc0
--- /dev/null
+++ b/numpy/random/_examples/cython/setup.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+"""
+Build the Cython demonstrations of low-level access to NumPy random
+
+Usage: python setup.py build_ext -i
+"""
+
+import numpy as np
+from distutils.core import setup
+from Cython.Build import cythonize
+from setuptools.extension import Extension
+from os.path import join, abspath, dirname
+
+path = abspath(dirname(__file__))
+
+extending = Extension("extending",
+ sources=[join(path, 'extending.pyx')],
+ include_dirs=[
+ np.get_include(),
+ join(path, '..', '..')
+ ],
+ )
+distributions = Extension("extending_distributions",
+ sources=[join(path, 'extending_distributions.pyx')],
+ include_dirs=[np.get_include()])
+
+extensions = [extending, distributions]
+
+setup(
+ ext_modules=cythonize(extensions)
+)
diff --git a/numpy/random/_examples/numba/extending.py b/numpy/random/_examples/numba/extending.py
new file mode 100644
index 000000000..0d240596b
--- /dev/null
+++ b/numpy/random/_examples/numba/extending.py
@@ -0,0 +1,84 @@
+import numpy as np
+import numba as nb
+
+from numpy.random import PCG64
+from timeit import timeit
+
+bit_gen = PCG64()
+next_d = bit_gen.cffi.next_double
+state_addr = bit_gen.cffi.state_address
+
+def normals(n, state):
+ out = np.empty(n)
+ for i in range((n + 1) // 2):
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ while r2 >= 1.0 or r2 == 0.0:
+ x1 = 2.0 * next_d(state) - 1.0
+ x2 = 2.0 * next_d(state) - 1.0
+ r2 = x1 * x1 + x2 * x2
+ f = np.sqrt(-2.0 * np.log(r2) / r2)
+ out[2 * i] = f * x1
+ if 2 * i + 1 < n:
+ out[2 * i + 1] = f * x2
+ return out
+
+# Compile using Numba
+normalsj = nb.jit(normals, nopython=True)
+# Must use state address not state with numba
+n = 10000
+
+def numbacall():
+ return normalsj(n, state_addr)
+
+rg = np.random.Generator(PCG64())
+
+def numpycall():
+ return rg.normal(size=n)
+
+# Check that the functions work
+r1 = numbacall()
+r2 = numpycall()
+assert r1.shape == (n,)
+assert r1.shape == r2.shape
+
+t1 = timeit(numbacall, number=1000)
+print('{:.2f} secs for {} PCG64 (Numba/PCG64) gaussian randoms'.format(t1, n))
+t2 = timeit(numpycall, number=1000)
+print('{:.2f} secs for {} PCG64 (NumPy/PCG64) gaussian randoms'.format(t2, n))
+
+# example 2
+
+next_u32 = bit_gen.ctypes.next_uint32
+ctypes_state = bit_gen.ctypes.state
+
+@nb.jit(nopython=True)
+def bounded_uint(lb, ub, state):
+ mask = delta = ub - lb
+ mask |= mask >> 1
+ mask |= mask >> 2
+ mask |= mask >> 4
+ mask |= mask >> 8
+ mask |= mask >> 16
+
+ val = next_u32(state) & mask
+ while val > delta:
+ val = next_u32(state) & mask
+
+ return lb + val
+
+
+print(bounded_uint(323, 2394691, ctypes_state.value))
+
+
+@nb.jit(nopython=True)
+def bounded_uints(lb, ub, n, state):
+ out = np.empty(n, dtype=np.uint32)
+ for i in range(n):
+ out[i] = bounded_uint(lb, ub, state)
+
+
+bounded_uints(323, 2394691, 10000000, ctypes_state.value)
+
+
diff --git a/numpy/random/examples/numba/extending_distributions.py b/numpy/random/_examples/numba/extending_distributions.py
index 9233ccced..7cf8bf0b0 100644
--- a/numpy/random/examples/numba/extending_distributions.py
+++ b/numpy/random/_examples/numba/extending_distributions.py
@@ -1,22 +1,28 @@
r"""
-On *nix, execute in randomgen/src/distributions
+Building the required library in this example requires a source distribution
+of NumPy or clone of the NumPy git repository since distributions.c is not
+included in binary distributions.
+On *nix, execute in numpy/random/src/distributions
+
+export ${PYTHON_VERSION}=3.8 # Python version
export PYTHON_INCLUDE=#path to Python's include folder, usually \
${PYTHON_HOME}/include/python${PYTHON_VERSION}m
export NUMPY_INCLUDE=#path to numpy's include folder, usually \
${PYTHON_HOME}/lib/python${PYTHON_VERSION}/site-packages/numpy/core/include
gcc -shared -o libdistributions.so -fPIC distributions.c \
-I${NUMPY_INCLUDE} -I${PYTHON_INCLUDE}
-mv libdistributions.so ../../examples/numba/
+mv libdistributions.so ../../_examples/numba/
On Windows
-rem PYTHON_HOME is setup dependent, this is an example
+rem PYTHON_HOME and PYTHON_VERSION are setup dependent, this is an example
set PYTHON_HOME=c:\Anaconda
+set PYTHON_VERSION=38
cl.exe /LD .\distributions.c -DDLL_EXPORT \
-I%PYTHON_HOME%\lib\site-packages\numpy\core\include \
- -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python36.lib
-move distributions.dll ../../examples/numba/
+ -I%PYTHON_HOME%\include %PYTHON_HOME%\libs\python%PYTHON_VERSION%.lib
+move distributions.dll ../../_examples/numba/
"""
import os
@@ -35,19 +41,19 @@ else:
raise RuntimeError('Required DLL/so file was not found.')
ffi.cdef("""
-double random_gauss_zig(void *bitgen_state);
+double random_standard_normal(void *bitgen_state);
""")
x = PCG64()
xffi = x.cffi
bit_generator = xffi.bit_generator
-random_gauss_zig = lib.random_gauss_zig
+random_standard_normal = lib.random_standard_normal
def normals(n, bit_generator):
out = np.empty(n)
for i in range(n):
- out[i] = random_gauss_zig(bit_generator)
+ out[i] = random_standard_normal(bit_generator)
return out
diff --git a/numpy/random/_generator.pyx b/numpy/random/_generator.pyx
index b842d6a32..64ec73986 100644
--- a/numpy/random/_generator.pyx
+++ b/numpy/random/_generator.pyx
@@ -19,7 +19,7 @@ from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_uint8, _gen_mask)
from ._bounded_integers import _integers_types
from ._pcg64 import PCG64
-from ._bit_generator cimport bitgen_t
+from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1,
CONS_GT_1, CONS_POSITIVE_NOT_NAN, CONS_POISSON,
@@ -28,7 +28,7 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
)
-cdef extern from "include/distributions.h":
+cdef extern from "numpy/random/distributions.h":
struct s_binomial_t:
int has_binomial
@@ -502,7 +502,7 @@ cdef class Generator:
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
- on the platform. The default value is 'np.int'.
+ on the platform. The default value is `np.int_`.
endpoint : bool, optional
If true, sample from the interval [low, high] instead of the
default [low, high)
@@ -595,7 +595,7 @@ cdef class Generator:
elif key == 'bool':
ret = _rand_bool(low, high, size, _masked, endpoint, &self._bitgen, self.lock)
- if size is None and dtype in (np.bool, np.int, np.long):
+ if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
return dtype(ret)
return ret
@@ -1004,6 +1004,12 @@ cdef class Generator:
A floating-point array of shape ``size`` of drawn samples, or a
single sample if ``size`` was not specified.
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
@@ -1011,12 +1017,6 @@ cdef class Generator:
mu + sigma * gen.standard_normal(size=...)
gen.normal(mu, sigma, size=...)
- See Also
- --------
- normal :
- Equivalent function with additional ``loc`` and ``scale`` arguments
- for setting the mean and standard deviation.
-
Examples
--------
>>> rng = np.random.default_rng()
@@ -3456,7 +3456,7 @@ cdef class Generator:
# Multivariate distributions:
def multivariate_normal(self, mean, cov, size=None, check_valid='warn',
- tol=1e-8):
+ tol=1e-8, *, method='svd'):
"""
multivariate_normal(mean, cov, size=None, check_valid='warn', tol=1e-8)
@@ -3486,6 +3486,15 @@ cdef class Generator:
tol : float, optional
Tolerance when checking the singular values in covariance matrix.
cov is cast to double before the check.
+ method : { 'svd', 'eigh', 'cholesky'}, optional
+ The cov input is used to compute a factor matrix A such that
+ ``A @ A.T = cov``. This argument is used to select the method
+ used to compute the factor matrix A. The default method 'svd' is
+ the slowest, while 'cholesky' is the fastest but less robust than
+ the slowest method. The method `eigh` uses eigen decomposition to
+ compute A and is faster than svd but slower than cholesky.
+
+ .. versionadded:: 1.18.0
Returns
-------
@@ -3546,10 +3555,16 @@ cdef class Generator:
--------
>>> mean = (1, 2)
>>> cov = [[1, 0], [0, 1]]
- >>> x = np.random.default_rng().multivariate_normal(mean, cov, (3, 3))
+ >>> rng = np.random.default_rng()
+ >>> x = rng.multivariate_normal(mean, cov, (3, 3))
>>> x.shape
(3, 3, 2)
+ We can use a different method other than the default to factorize cov:
+ >>> y = rng.multivariate_normal(mean, cov, (3, 3), method='cholesky')
+ >>> y.shape
+ (3, 3, 2)
+
The following is probably true, given that 0.6 is roughly twice the
standard deviation:
@@ -3557,7 +3572,9 @@ cdef class Generator:
[True, True] # random
"""
- from numpy.dual import svd
+ if method not in {'eigh', 'svd', 'cholesky'}:
+ raise ValueError(
+ "method must be one of {'eigh', 'svd', 'cholesky'}")
# Check preconditions on arguments
mean = np.array(mean)
@@ -3600,13 +3617,27 @@ cdef class Generator:
# GH10839, ensure double to make tol meaningful
cov = cov.astype(np.double)
- (u, s, v) = svd(cov)
+ if method == 'svd':
+ from numpy.dual import svd
+ (u, s, vh) = svd(cov)
+ elif method == 'eigh':
+ from numpy.dual import eigh
+ # could call linalg.svd(hermitian=True), but that calculates a vh we don't need
+ (s, u) = eigh(cov)
+ else:
+ from numpy.dual import cholesky
+ l = cholesky(cov)
- if check_valid != 'ignore':
+ # make sure check_valid is ignored whe method == 'cholesky'
+ # since the decomposition will have failed if cov is not valid.
+ if check_valid != 'ignore' and method != 'cholesky':
if check_valid != 'warn' and check_valid != 'raise':
- raise ValueError("check_valid must equal 'warn', 'raise', or 'ignore'")
-
- psd = np.allclose(np.dot(v.T * s, v), cov, rtol=tol, atol=tol)
+ raise ValueError(
+ "check_valid must equal 'warn', 'raise', or 'ignore'")
+ if method == 'svd':
+ psd = np.allclose(np.dot(vh.T * s, vh), cov, rtol=tol, atol=tol)
+ else:
+ psd = not np.any(s < -tol)
if not psd:
if check_valid == 'warn':
warnings.warn("covariance is not positive-semidefinite.",
@@ -3614,7 +3645,17 @@ cdef class Generator:
else:
raise ValueError("covariance is not positive-semidefinite.")
- x = np.dot(x, np.sqrt(s)[:, None] * v)
+ if method == 'cholesky':
+ _factor = l
+ elif method == 'eigh':
+ # if check_valid == 'ignore' we need to ensure that np.sqrt does not
+ # return a NaN if s is a very small negative number that is
+ # approximately zero or when the covariance is not positive-semidefinite
+ _factor = u * np.sqrt(abs(s))
+ else:
+ _factor = np.sqrt(s)[:, None] * vh
+
+ x = np.dot(x, _factor)
x += mean
x.shape = tuple(final_shape)
return x
@@ -4352,11 +4393,15 @@ def default_rng(seed=None):
Additionally, when passed a `BitGenerator`, it will be wrapped by
`Generator`. If passed a `Generator`, it will be returned unaltered.
+ Returns
+ -------
+ Generator
+ The initialized generator object.
+
Notes
-----
- When ``seed`` is omitted or ``None``, a new `BitGenerator` and `Generator` will
- be instantiated each time. This function does not manage a default global
- instance.
+ If ``seed`` is not a `BitGenerator` or a `Generator`, a new `BitGenerator`
+ is instantiated. This function does not manage a default global instance.
"""
if _check_bit_generator(seed):
# We were passed a BitGenerator, so just wrap it up.
diff --git a/numpy/random/_mt19937.pyx b/numpy/random/_mt19937.pyx
index e99652b73..919a96a4c 100644
--- a/numpy/random/_mt19937.pyx
+++ b/numpy/random/_mt19937.pyx
@@ -4,7 +4,7 @@ import numpy as np
cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
-from ._bit_generator cimport BitGenerator, SeedSequence
+from numpy.random cimport BitGenerator, SeedSequence
__all__ = ['MT19937']
diff --git a/numpy/random/_pcg64.pyx b/numpy/random/_pcg64.pyx
index 1a5d852a2..05d27db5c 100644
--- a/numpy/random/_pcg64.pyx
+++ b/numpy/random/_pcg64.pyx
@@ -3,7 +3,7 @@ cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from ._common cimport uint64_to_double, wrap_int
-from ._bit_generator cimport BitGenerator
+from numpy.random cimport BitGenerator
__all__ = ['PCG64']
diff --git a/numpy/random/_philox.pyx b/numpy/random/_philox.pyx
index 9f136c32f..7e8880490 100644
--- a/numpy/random/_philox.pyx
+++ b/numpy/random/_philox.pyx
@@ -10,7 +10,7 @@ cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from ._common cimport uint64_to_double, int_to_array, wrap_int
-from ._bit_generator cimport BitGenerator
+from numpy.random cimport BitGenerator
__all__ = ['Philox']
diff --git a/numpy/random/_sfc64.pyx b/numpy/random/_sfc64.pyx
index 1633669d5..1daee34f8 100644
--- a/numpy/random/_sfc64.pyx
+++ b/numpy/random/_sfc64.pyx
@@ -3,7 +3,7 @@ cimport numpy as np
from libc.stdint cimport uint32_t, uint64_t
from ._common cimport uint64_to_double
-from ._bit_generator cimport BitGenerator
+from numpy.random cimport BitGenerator
__all__ = ['SFC64']
diff --git a/numpy/random/examples/cython/setup.py b/numpy/random/examples/cython/setup.py
deleted file mode 100644
index 69f057ed5..000000000
--- a/numpy/random/examples/cython/setup.py
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/usr/bin/env python3
-"""
-Build the demos
-
-Usage: python setup.py build_ext -i
-"""
-
-import numpy as np
-from distutils.core import setup
-from Cython.Build import cythonize
-from setuptools.extension import Extension
-from os.path import join
-
-extending = Extension("extending",
- sources=['extending.pyx'],
- include_dirs=[np.get_include()])
-distributions = Extension("extending_distributions",
- sources=['extending_distributions.pyx',
- join('..', '..', 'src',
- 'distributions', 'distributions.c')],
- include_dirs=[np.get_include()])
-
-extensions = [extending, distributions]
-
-setup(
- ext_modules=cythonize(extensions)
-)
diff --git a/numpy/random/examples/numba/extending.py b/numpy/random/examples/numba/extending.py
deleted file mode 100644
index d41c2d76f..000000000
--- a/numpy/random/examples/numba/extending.py
+++ /dev/null
@@ -1,77 +0,0 @@
-import datetime as dt
-
-import numpy as np
-import numba as nb
-
-from numpy.random import PCG64
-
-x = PCG64()
-f = x.ctypes.next_uint32
-s = x.ctypes.state
-
-
-@nb.jit(nopython=True)
-def bounded_uint(lb, ub, state):
- mask = delta = ub - lb
- mask |= mask >> 1
- mask |= mask >> 2
- mask |= mask >> 4
- mask |= mask >> 8
- mask |= mask >> 16
-
- val = f(state) & mask
- while val > delta:
- val = f(state) & mask
-
- return lb + val
-
-
-print(bounded_uint(323, 2394691, s.value))
-
-
-@nb.jit(nopython=True)
-def bounded_uints(lb, ub, n, state):
- out = np.empty(n, dtype=np.uint32)
- for i in range(n):
- out[i] = bounded_uint(lb, ub, state)
-
-
-bounded_uints(323, 2394691, 10000000, s.value)
-
-g = x.cffi.next_double
-cffi_state = x.cffi.state
-state_addr = x.cffi.state_address
-
-
-def normals(n, state):
- out = np.empty(n)
- for i in range((n + 1) // 2):
- x1 = 2.0 * g(state) - 1.0
- x2 = 2.0 * g(state) - 1.0
- r2 = x1 * x1 + x2 * x2
- while r2 >= 1.0 or r2 == 0.0:
- x1 = 2.0 * g(state) - 1.0
- x2 = 2.0 * g(state) - 1.0
- r2 = x1 * x1 + x2 * x2
- f = np.sqrt(-2.0 * np.log(r2) / r2)
- out[2 * i] = f * x1
- if 2 * i + 1 < n:
- out[2 * i + 1] = f * x2
- return out
-
-
-print(normals(10, cffi_state).var())
-# Warm up
-normalsj = nb.jit(normals, nopython=True)
-normalsj(1, state_addr)
-
-start = dt.datetime.now()
-normalsj(1000000, state_addr)
-ms = 1000 * (dt.datetime.now() - start).total_seconds()
-print('1,000,000 Polar-transform (numba/PCG64) randoms in '
- '{ms:0.1f}ms'.format(ms=ms))
-
-start = dt.datetime.now()
-np.random.standard_normal(1000000)
-ms = 1000 * (dt.datetime.now() - start).total_seconds()
-print('1,000,000 Polar-transform (NumPy) randoms in {ms:0.1f}ms'.format(ms=ms))
diff --git a/numpy/random/include/legacy-distributions.h b/numpy/random/include/legacy-distributions.h
index 6a0fc7dc4..b8ba0841c 100644
--- a/numpy/random/include/legacy-distributions.h
+++ b/numpy/random/include/legacy-distributions.h
@@ -2,7 +2,7 @@
#define _RANDOMDGEN__DISTRIBUTIONS_LEGACY_H_
-#include "distributions.h"
+#include "numpy/random/distributions.h"
typedef struct aug_bitgen {
bitgen_t *bit_generator;
diff --git a/numpy/random/mtrand.pyx b/numpy/random/mtrand.pyx
index 683a771cc..a4d409f37 100644
--- a/numpy/random/mtrand.pyx
+++ b/numpy/random/mtrand.pyx
@@ -17,7 +17,7 @@ from ._bounded_integers cimport (_rand_bool, _rand_int32, _rand_int64,
_rand_uint8,)
from ._bounded_integers import _integers_types
from ._mt19937 import MT19937 as _MT19937
-from ._bit_generator cimport bitgen_t
+from numpy.random cimport bitgen_t
from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
CONS_NON_NEGATIVE, CONS_BOUNDED_0_1, CONS_BOUNDED_GT_0_1, CONS_GTE_1,
CONS_GT_1, LEGACY_CONS_POISSON,
@@ -25,7 +25,7 @@ from ._common cimport (POISSON_LAM_MAX, CONS_POSITIVE, CONS_NONE,
check_array_constraint, check_constraint, disc, discrete_broadcast_iii,
)
-cdef extern from "include/distributions.h":
+cdef extern from "numpy/random/distributions.h":
struct s_binomial_t:
int has_binomial
double psave
@@ -379,6 +379,10 @@ cdef class RandomState:
(b - a) * random_sample() + a
+ .. note::
+ New code should use the ``random`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -392,6 +396,10 @@ cdef class RandomState:
Array of random floats of shape `size` (unless ``size=None``, in which
case a single float is returned).
+ See Also
+ --------
+ Generator.random: which should be used for new code.
+
Examples
--------
>>> np.random.random_sample()
@@ -441,6 +449,10 @@ cdef class RandomState:
It is often seen in Bayesian inference and order statistics.
+ .. note::
+ New code should use the ``beta`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -458,6 +470,9 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized beta distribution.
+ See Also
+ --------
+ Generator.beta: which should be used for new code.
"""
return cont(&legacy_beta, &self._aug_state, size, self.lock, 2,
a, 'a', CONS_POSITIVE,
@@ -484,6 +499,10 @@ cdef class RandomState:
the size of raindrops measured over many rainstorms [1]_, or the time
between page requests to Wikipedia [2]_.
+ .. note::
+ New code should use the ``exponential`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
scale : float or array_like of floats
@@ -500,6 +519,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized exponential distribution.
+ See Also
+ --------
+ Generator.exponential: which should be used for new code.
+
References
----------
.. [1] Peyton Z. Peebles Jr., "Probability, Random Variables and
@@ -525,6 +548,10 @@ cdef class RandomState:
`standard_exponential` is identical to the exponential distribution
with a scale parameter of 1.
+ .. note::
+ New code should use the ``standard_exponential`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -537,6 +564,10 @@ cdef class RandomState:
out : float or ndarray
Drawn samples.
+ See Also
+ --------
+ Generator.standard_exponential: which should be used for new code.
+
Examples
--------
Output a 3x8000 array:
@@ -555,7 +586,7 @@ cdef class RandomState:
tomaxint(size=None)
Return a sample of uniformly distributed random integers in the interval
- [0, ``np.iinfo(np.int).max``]. The np.int type translates to the C long
+ [0, ``np.iinfo(np.int_).max``]. The `np.int_` type translates to the C long
integer type and its precision is platform dependent.
Parameters
@@ -584,7 +615,7 @@ cdef class RandomState:
[ 739731006, 1947757578]],
[[1871712945, 752307660],
[1601631370, 1479324245]]])
- >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int).max
+ >>> rs.tomaxint((2,2,2)) < np.iinfo(np.int_).max
array([[[ True, True],
[ True, True]],
[[ True, True],
@@ -618,6 +649,10 @@ cdef class RandomState:
the specified dtype in the "half-open" interval [`low`, `high`). If
`high` is None (the default), then results are from [0, `low`).
+ .. note::
+ New code should use the ``integers`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
low : int or array-like of ints
@@ -636,7 +671,7 @@ cdef class RandomState:
Desired dtype of the result. All dtypes are determined by their
name, i.e., 'int64', 'int', etc, so byteorder is not available
and a specific precision may have different C types depending
- on the platform. The default value is 'np.int'.
+ on the platform. The default value is `np.int_`.
.. versionadded:: 1.11.0
@@ -651,6 +686,7 @@ cdef class RandomState:
random_integers : similar to `randint`, only for the closed
interval [`low`, `high`], and 1 is the lowest value if `high` is
omitted.
+ Generator.integers: which should be used for new code.
Examples
--------
@@ -724,7 +760,7 @@ cdef class RandomState:
elif key == 'bool':
ret = _rand_bool(low, high, size, _masked, _endpoint, &self._bitgen, self.lock)
- if size is None and dtype in (np.bool, np.int, np.long):
+ if size is None and dtype in (bool, int, np.compat.long):
if np.array(ret).shape == ():
return dtype(ret)
return ret
@@ -735,6 +771,10 @@ cdef class RandomState:
Return random bytes.
+ .. note::
+ New code should use the ``bytes`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
length : int
@@ -745,11 +785,14 @@ cdef class RandomState:
out : str
String of length `length`.
+ See Also
+ --------
+ Generator.bytes: which should be used for new code.
+
Examples
--------
>>> np.random.bytes(10)
' eh\\x85\\x022SZ\\xbf\\xa4' #random
-
"""
cdef Py_ssize_t n_uint32 = ((length - 1) // 4 + 1)
# Interpret the uint32s as little-endian to convert them to bytes
@@ -766,6 +809,10 @@ cdef class RandomState:
.. versionadded:: 1.7.0
+ .. note::
+ New code should use the ``choice`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : 1-D array-like or int
@@ -799,6 +846,7 @@ cdef class RandomState:
See Also
--------
randint, shuffle, permutation
+ Generator.choice: which should be used in new code
Examples
--------
@@ -958,6 +1006,10 @@ cdef class RandomState:
any value within the given interval is equally likely to be drawn
by `uniform`.
+ .. note::
+ New code should use the ``uniform`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
low : float or array_like of floats, optional
@@ -987,6 +1039,7 @@ cdef class RandomState:
rand : Convenience function that accepts dimensions as input, e.g.,
``rand(2,2)`` would generate a 2-by-2 array of floats,
uniformly distributed over ``[0, 1)``.
+ Generator.uniform: which should be used for new code.
Notes
-----
@@ -1114,6 +1167,10 @@ cdef class RandomState:
tuple to specify the size of the output, which is consistent with
other NumPy functions like `numpy.zeros` and `numpy.ones`.
+ .. note::
+ New code should use the ``standard_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
If positive int_like arguments are provided, `randn` generates an array
of shape ``(d0, d1, ..., dn)``, filled
with random floats sampled from a univariate "normal" (Gaussian)
@@ -1137,6 +1194,7 @@ cdef class RandomState:
--------
standard_normal : Similar, but takes a tuple as its argument.
normal : Also accepts mu and sigma arguments.
+ Generator.standard_normal: which should be used for new code.
Notes
-----
@@ -1165,11 +1223,11 @@ cdef class RandomState:
"""
random_integers(low, high=None, size=None)
- Random integers of type np.int between `low` and `high`, inclusive.
+ Random integers of type `np.int_` between `low` and `high`, inclusive.
- Return random integers of type np.int from the "discrete uniform"
+ Return random integers of type `np.int_` from the "discrete uniform"
distribution in the closed interval [`low`, `high`]. If `high` is
- None (the default), then results are from [1, `low`]. The np.int
+ None (the default), then results are from [1, `low`]. The `np.int_`
type translates to the C long integer type and its precision
is platform dependent.
@@ -1263,6 +1321,10 @@ cdef class RandomState:
Draw samples from a standard Normal distribution (mean=0, stdev=1).
+ .. note::
+ New code should use the ``standard_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -1276,6 +1338,13 @@ cdef class RandomState:
A floating-point array of shape ``size`` of drawn samples, or a
single sample if ``size`` was not specified.
+ See Also
+ --------
+ normal :
+ Equivalent function with additional ``loc`` and ``scale`` arguments
+ for setting the mean and standard deviation.
+ Generator.standard_normal: which should be used for new code.
+
Notes
-----
For random samples from :math:`N(\\mu, \\sigma^2)`, use one of::
@@ -1283,12 +1352,6 @@ cdef class RandomState:
mu + sigma * np.random.standard_normal(size=...)
np.random.normal(mu, sigma, size=...)
- See Also
- --------
- normal :
- Equivalent function with additional ``loc`` and ``scale`` arguments
- for setting the mean and standard deviation.
-
Examples
--------
>>> np.random.standard_normal()
@@ -1333,6 +1396,10 @@ cdef class RandomState:
by a large number of tiny, random disturbances, each with its own
unique distribution [2]_.
+ .. note::
+ New code should use the ``normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats
@@ -1355,6 +1422,7 @@ cdef class RandomState:
--------
scipy.stats.norm : probability density function, distribution or
cumulative density function, etc.
+ Generator.normal: which should be used for new code.
Notes
-----
@@ -1428,6 +1496,10 @@ cdef class RandomState:
Samples are drawn from a Gamma distribution with specified parameters,
shape (sometimes designated "k") and scale=1.
+ .. note::
+ New code should use the ``standard_gamma`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
shape : float or array_like of floats
@@ -1447,6 +1519,7 @@ cdef class RandomState:
--------
scipy.stats.gamma : probability density function, distribution or
cumulative density function, etc.
+ Generator.standard_gamma: which should be used for new code.
Notes
-----
@@ -1504,6 +1577,10 @@ cdef class RandomState:
`shape` (sometimes designated "k") and `scale` (sometimes designated
"theta"), where both parameters are > 0.
+ .. note::
+ New code should use the ``gamma`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
shape : float or array_like of floats
@@ -1526,6 +1603,7 @@ cdef class RandomState:
--------
scipy.stats.gamma : probability density function, distribution or
cumulative density function, etc.
+ Generator.gamma: which should be used for new code.
Notes
-----
@@ -1588,6 +1666,10 @@ cdef class RandomState:
that arises in ANOVA tests, and is the ratio of two chi-square
variates.
+ .. note::
+ New code should use the ``f`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
dfnum : float or array_like of floats
@@ -1609,6 +1691,7 @@ cdef class RandomState:
--------
scipy.stats.f : probability density function, distribution or
cumulative density function, etc.
+ Generator.f: which should be used for new code.
Notes
-----
@@ -1671,6 +1754,10 @@ cdef class RandomState:
freedom in denominator), where both parameters > 1.
`nonc` is the non-centrality parameter.
+ .. note::
+ New code should use the ``noncentral_f`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
dfnum : float or array_like of floats
@@ -1695,6 +1782,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized noncentral Fisher distribution.
+ See Also
+ --------
+ Generator.noncentral_f: which should be used for new code.
+
Notes
-----
When calculating the power of an experiment (power = probability of
@@ -1748,6 +1839,10 @@ cdef class RandomState:
resulting distribution is chi-square (see Notes). This distribution
is often used in hypothesis testing.
+ .. note::
+ New code should use the ``chisquare`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
@@ -1769,6 +1864,10 @@ cdef class RandomState:
When `df` <= 0 or when an inappropriate `size` (e.g. ``size=-1``)
is given.
+ See Also
+ --------
+ Generator.chisquare: which should be used for new code.
+
Notes
-----
The variable obtained by summing the squares of `df` independent,
@@ -1798,7 +1897,6 @@ cdef class RandomState:
--------
>>> np.random.chisquare(2,4)
array([ 1.89920014, 9.00867716, 3.13710533, 5.62318272]) # random
-
"""
return cont(&legacy_chisquare, &self._aug_state, size, self.lock, 1,
df, 'df', CONS_POSITIVE,
@@ -1814,6 +1912,10 @@ cdef class RandomState:
The noncentral :math:`\\chi^2` distribution is a generalization of
the :math:`\\chi^2` distribution.
+ .. note::
+ New code should use the ``noncentral_chisquare`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
@@ -1834,6 +1936,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized noncentral chi-square distribution.
+ See Also
+ --------
+ Generator.noncentral_chisquare: which should be used for new code.
+
Notes
-----
The probability density function for the noncentral Chi-square
@@ -1892,6 +1998,10 @@ cdef class RandomState:
Also known as the Lorentz distribution.
+ .. note::
+ New code should use the ``standard_cauchy`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
size : int or tuple of ints, optional
@@ -1904,6 +2014,10 @@ cdef class RandomState:
samples : ndarray or scalar
The drawn samples.
+ See Also
+ --------
+ Generator.standard_cauchy: which should be used for new code.
+
Notes
-----
The probability density function for the full Cauchy distribution is
@@ -1960,6 +2074,10 @@ cdef class RandomState:
large, the result resembles that of the standard normal
distribution (`standard_normal`).
+ .. note::
+ New code should use the ``standard_t`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
df : float or array_like of floats
@@ -1975,6 +2093,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized standard Student's t distribution.
+ See Also
+ --------
+ Generator.standard_t: which should be used for new code.
+
Notes
-----
The probability density function for the t distribution is
@@ -2057,6 +2179,10 @@ cdef class RandomState:
circle. It may be thought of as the circular analogue of the normal
distribution.
+ .. note::
+ New code should use the ``vonmises`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mu : float or array_like of floats
@@ -2078,6 +2204,7 @@ cdef class RandomState:
--------
scipy.stats.vonmises : probability density function, distribution, or
cumulative density function, etc.
+ Generator.vonmises: which should be used for new code.
Notes
-----
@@ -2150,6 +2277,10 @@ cdef class RandomState:
20 percent of the range, while the other 20 percent fill the
remaining 80 percent of the range.
+ .. note::
+ New code should use the ``pareto`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -2171,6 +2302,7 @@ cdef class RandomState:
cumulative density function, etc.
scipy.stats.genpareto : probability density function, distribution or
cumulative density function, etc.
+ Generator.pareto: which should be used for new code.
Notes
-----
@@ -2239,6 +2371,10 @@ cdef class RandomState:
The more common 2-parameter Weibull, including a scale parameter
:math:`\\lambda` is just :math:`X = \\lambda(-ln(U))^{1/a}`.
+ .. note::
+ New code should use the ``weibull`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -2260,6 +2396,7 @@ cdef class RandomState:
scipy.stats.weibull_min
scipy.stats.genextreme
gumbel
+ Generator.weibull: which should be used for new code.
Notes
-----
@@ -2330,6 +2467,10 @@ cdef class RandomState:
Also known as the power function distribution.
+ .. note::
+ New code should use the ``power`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -2350,6 +2491,10 @@ cdef class RandomState:
ValueError
If a < 1.
+ See Also
+ --------
+ Generator.power: which should be used for new code.
+
Notes
-----
The probability density function is
@@ -2433,6 +2578,10 @@ cdef class RandomState:
difference between two independent, identically distributed exponential
random variables.
+ .. note::
+ New code should use the ``laplace`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
@@ -2451,6 +2600,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Laplace distribution.
+ See Also
+ --------
+ Generator.laplace: which should be used for new code.
+
Notes
-----
It has the probability density function
@@ -2516,6 +2669,10 @@ cdef class RandomState:
scale. For more information on the Gumbel distribution, see
Notes and References below.
+ .. note::
+ New code should use the ``gumbel`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
@@ -2540,6 +2697,7 @@ cdef class RandomState:
scipy.stats.gumbel_r
scipy.stats.genextreme
weibull
+ Generator.gumbel: which should be used for new code.
Notes
-----
@@ -2633,6 +2791,10 @@ cdef class RandomState:
Samples are drawn from a logistic distribution with specified
parameters, loc (location or mean, also median), and scale (>0).
+ .. note::
+ New code should use the ``logistic`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
loc : float or array_like of floats, optional
@@ -2655,6 +2817,7 @@ cdef class RandomState:
--------
scipy.stats.logistic : probability density function, distribution or
cumulative density function, etc.
+ Generator.logistic: which should be used for new code.
Notes
-----
@@ -2715,6 +2878,10 @@ cdef class RandomState:
deviation are not the values for the distribution itself, but of the
underlying normal distribution it is derived from.
+ .. note::
+ New code should use the ``lognormal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : float or array_like of floats, optional
@@ -2737,6 +2904,7 @@ cdef class RandomState:
--------
scipy.stats.lognorm : probability density function, distribution,
cumulative density function, etc.
+ Generator.lognormal: which should be used for new code.
Notes
-----
@@ -2823,6 +2991,10 @@ cdef class RandomState:
The :math:`\\chi` and Weibull distributions are generalizations of the
Rayleigh.
+ .. note::
+ New code should use the ``rayleigh`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
scale : float or array_like of floats, optional
@@ -2838,6 +3010,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Rayleigh distribution.
+ See Also
+ --------
+ Generator.rayleigh: which should be used for new code.
+
Notes
-----
The probability density function for the Rayleigh distribution is
@@ -2897,6 +3073,10 @@ cdef class RandomState:
because there is an inverse relationship between the time to cover a
unit distance and distance covered in unit time.
+ .. note::
+ New code should use the ``wald`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : float or array_like of floats
@@ -2914,6 +3094,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Wald distribution.
+ See Also
+ --------
+ Generator.wald: which should be used for new code.
+
Notes
-----
The probability density function for the Wald distribution is
@@ -2962,6 +3146,10 @@ cdef class RandomState:
limit right. Unlike the other distributions, these parameters
directly define the shape of the pdf.
+ .. note::
+ New code should use the ``triangular`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
left : float or array_like of floats
@@ -2983,6 +3171,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized triangular distribution.
+ See Also
+ --------
+ Generator.triangular: which should be used for new code.
+
Notes
-----
The probability density function for the triangular distribution is
@@ -3061,6 +3253,10 @@ cdef class RandomState:
n an integer >= 0 and p is in the interval [0,1]. (n may be
input as a float, but it is truncated to an integer in use)
+ .. note::
+ New code should use the ``binomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : int or array_like of ints
@@ -3084,6 +3280,7 @@ cdef class RandomState:
--------
scipy.stats.binom : probability density function, distribution or
cumulative density function, etc.
+ Generator.binomial: which should be used for new code.
Notes
-----
@@ -3206,6 +3403,10 @@ cdef class RandomState:
parameters, `n` successes and `p` probability of success where `n`
is > 0 and `p` is in the interval [0, 1].
+ .. note::
+ New code should use the ``negative_binomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : float or array_like of floats
@@ -3225,6 +3426,10 @@ cdef class RandomState:
where each sample is equal to N, the number of failures that
occurred before a total of n successes was reached.
+ See Also
+ --------
+ Generator.negative_binomial: which should be used for new code.
+
Notes
-----
The probability mass function of the negative binomial distribution is
@@ -3283,6 +3488,10 @@ cdef class RandomState:
The Poisson distribution is the limit of the binomial distribution
for large N.
+ .. note::
+ New code should use the ``poisson`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
lam : float or array_like of floats
@@ -3299,6 +3508,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized Poisson distribution.
+ See Also
+ --------
+ Generator.poisson: which should be used for new code.
+
Notes
-----
The Poisson distribution
@@ -3361,6 +3574,10 @@ cdef class RandomState:
frequency of an item is inversely proportional to its rank in a
frequency table.
+ .. note::
+ New code should use the ``zipf`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
a : float or array_like of floats
@@ -3380,6 +3597,7 @@ cdef class RandomState:
--------
scipy.stats.zipf : probability density function, distribution, or
cumulative density function, etc.
+ Generator.zipf: which should be used for new code.
Notes
-----
@@ -3446,6 +3664,10 @@ cdef class RandomState:
where `p` is the probability of success of an individual trial.
+ .. note::
+ New code should use the ``geometric`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
p : float or array_like of floats
@@ -3461,6 +3683,10 @@ cdef class RandomState:
out : ndarray or scalar
Drawn samples from the parameterized geometric distribution.
+ See Also
+ --------
+ Generator.geometric: which should be used for new code.
+
Examples
--------
Draw ten thousand values from the geometric distribution,
@@ -3492,6 +3718,10 @@ cdef class RandomState:
a bad selection), and `nsample` (number of items sampled, which is less
than or equal to the sum ``ngood + nbad``).
+ .. note::
+ New code should use the ``hypergeometric`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
ngood : int or array_like of ints
@@ -3519,6 +3749,7 @@ cdef class RandomState:
--------
scipy.stats.hypergeom : probability density function, distribution or
cumulative density function, etc.
+ Generator.hypergeometric: which should be used for new code.
Notes
-----
@@ -3618,6 +3849,10 @@ cdef class RandomState:
Samples are drawn from a log series distribution with specified
shape parameter, 0 < ``p`` < 1.
+ .. note::
+ New code should use the ``logseries`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
p : float or array_like of floats
@@ -3637,6 +3872,7 @@ cdef class RandomState:
--------
scipy.stats.logser : probability density function, distribution or
cumulative density function, etc.
+ Generator.logseries: which should be used for new code.
Notes
-----
@@ -3706,6 +3942,10 @@ cdef class RandomState:
(average or "center") and variance (standard deviation, or "width,"
squared) of the one-dimensional normal distribution.
+ .. note::
+ New code should use the ``multivariate_normal`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
mean : 1-D array_like, of length N
@@ -3733,6 +3973,10 @@ cdef class RandomState:
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
+ See Also
+ --------
+ Generator.multivariate_normal: which should be used for new code.
+
Notes
-----
The mean is a coordinate in N-dimensional space, which represents the
@@ -3872,6 +4116,10 @@ cdef class RandomState:
``X_i = [X_0, X_1, ..., X_p]``, represent the number of times the
outcome was ``i``.
+ .. note::
+ New code should use the ``multinomial`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
n : int
@@ -3895,6 +4143,10 @@ cdef class RandomState:
In other words, each entry ``out[i,j,...,:]`` is an N-dimensional
value drawn from the distribution.
+ See Also
+ --------
+ Generator.multinomial: which should be used for new code.
+
Examples
--------
Throw a dice 20 times:
@@ -3982,6 +4234,10 @@ cdef class RandomState:
is a conjugate prior of a multinomial distribution in Bayesian
inference.
+ .. note::
+ New code should use the ``dirichlet`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
alpha : array
@@ -4002,6 +4258,10 @@ cdef class RandomState:
ValueError
If any value in alpha is less than or equal to zero
+ See Also
+ --------
+ Generator.dirichlet: which should be used for new code.
+
Notes
-----
The Dirichlet distribution is a distribution over vectors
@@ -4119,6 +4379,10 @@ cdef class RandomState:
multi-dimensional array. The order of sub-arrays is changed but
their contents remains the same.
+ .. note::
+ New code should use the ``shuffle`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
x : array_like
@@ -4128,6 +4392,10 @@ cdef class RandomState:
-------
None
+ See Also
+ --------
+ Generator.shuffle: which should be used for new code.
+
Examples
--------
>>> arr = np.arange(10)
@@ -4206,6 +4474,10 @@ cdef class RandomState:
If `x` is a multi-dimensional array, it is only shuffled along its
first index.
+ .. note::
+ New code should use the ``permutation`` method of a ``default_rng()``
+ instance instead; see `random-quick-start`.
+
Parameters
----------
x : int or array_like
@@ -4218,6 +4490,9 @@ cdef class RandomState:
out : ndarray
Permuted sequence or array range.
+ See Also
+ --------
+ Generator.permutation: which should be used for new code.
Examples
--------
diff --git a/numpy/random/setup.py b/numpy/random/setup.py
index ca01250f4..1b093d6d3 100644
--- a/numpy/random/setup.py
+++ b/numpy/random/setup.py
@@ -34,6 +34,7 @@ def configuration(parent_package='', top_path=None):
defs.append(('NPY_NO_DEPRECATED_API', 0))
config.add_data_dir('tests')
+ config.add_data_dir('_examples')
EXTRA_LINK_ARGS = []
# Math lib
@@ -47,11 +48,6 @@ def configuration(parent_package='', top_path=None):
elif not is_msvc:
# Some bit generators require c99
EXTRA_COMPILE_ARGS += ['-std=c99']
- INTEL_LIKE = any(arch in platform.machine()
- for arch in ('x86', 'i686', 'i386', 'amd64'))
- if INTEL_LIKE:
- # Assumes GCC or GCC-like compiler
- EXTRA_COMPILE_ARGS += ['-msse2']
# Use legacy integer variable sizes
LEGACY_DEFS = [('NP_RANDOM_LEGACY', '1')]
@@ -82,8 +78,8 @@ def configuration(parent_package='', top_path=None):
libraries=EXTRA_LIBRARIES,
extra_compile_args=EXTRA_COMPILE_ARGS,
extra_link_args=EXTRA_LINK_ARGS,
- depends=['_%s.pyx' % gen, 'bit_generator.pyx',
- 'bit_generator.pxd'],
+ depends=['_%s.pyx' % gen, '_bit_generator.pyx',
+ '_bit_generator.pxd'],
define_macros=_defs,
)
for gen in ['_common', '_bit_generator']:
@@ -97,6 +93,7 @@ def configuration(parent_package='', top_path=None):
depends=['%s.pyx' % gen, '%s.pxd' % gen,],
define_macros=defs,
)
+ config.add_data_files('{0}.pxd'.format(gen))
other_srcs = [
'src/distributions/logfactorial.c',
'src/distributions/distributions.c',
@@ -115,6 +112,7 @@ def configuration(parent_package='', top_path=None):
depends=['%s.pyx' % gen],
define_macros=defs,
)
+ config.add_data_files('_bounded_integers.pxd')
config.add_extension('mtrand',
sources=['mtrand.c',
'src/legacy/legacy-distributions.c',
@@ -127,6 +125,7 @@ def configuration(parent_package='', top_path=None):
depends=['mtrand.pyx'],
define_macros=defs + LEGACY_DEFS,
)
+ config.add_data_files('__init__.pxd')
return config
diff --git a/numpy/random/src/distributions/distributions.c b/numpy/random/src/distributions/distributions.c
index ab8de8bcb..df3323408 100644
--- a/numpy/random/src/distributions/distributions.c
+++ b/numpy/random/src/distributions/distributions.c
@@ -1,4 +1,4 @@
-#include "include/distributions.h"
+#include "numpy/random/distributions.h"
#include "ziggurat_constants.h"
#include "logfactorial.h"
diff --git a/numpy/random/src/distributions/random_hypergeometric.c b/numpy/random/src/distributions/random_hypergeometric.c
index da5ea9c68..0da49bd62 100644
--- a/numpy/random/src/distributions/random_hypergeometric.c
+++ b/numpy/random/src/distributions/random_hypergeometric.c
@@ -1,4 +1,4 @@
-#include "include/distributions.h"
+#include "numpy/random/distributions.h"
#include "logfactorial.h"
#include <stdint.h>
diff --git a/numpy/random/src/distributions/random_mvhg_count.c b/numpy/random/src/distributions/random_mvhg_count.c
index 9c0cc045d..0c46ea417 100644
--- a/numpy/random/src/distributions/random_mvhg_count.c
+++ b/numpy/random/src/distributions/random_mvhg_count.c
@@ -2,7 +2,7 @@
#include <stdlib.h>
#include <stdbool.h>
-#include "include/distributions.h"
+#include "numpy/random/distributions.h"
/*
* random_mvhg_count
diff --git a/numpy/random/src/distributions/random_mvhg_marginals.c b/numpy/random/src/distributions/random_mvhg_marginals.c
index 301a4acad..7e4c24988 100644
--- a/numpy/random/src/distributions/random_mvhg_marginals.c
+++ b/numpy/random/src/distributions/random_mvhg_marginals.c
@@ -3,7 +3,7 @@
#include <stdbool.h>
#include <math.h>
-#include "include/distributions.h"
+#include "numpy/random/distributions.h"
#include "logfactorial.h"
diff --git a/numpy/random/tests/test_direct.py b/numpy/random/tests/test_direct.py
index 34d7bd278..9f77f0ad2 100644
--- a/numpy/random/tests/test_direct.py
+++ b/numpy/random/tests/test_direct.py
@@ -1,5 +1,6 @@
import os
from os.path import join
+import sys
import numpy as np
from numpy.testing import (assert_equal, assert_allclose, assert_array_equal,
@@ -26,6 +27,12 @@ try:
except ImportError:
MISSING_CTYPES = False
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ MISSING_CFFI = True
+
+
pwd = os.path.dirname(os.path.abspath(__file__))
diff --git a/numpy/random/tests/test_extending.py b/numpy/random/tests/test_extending.py
new file mode 100644
index 000000000..807de1a25
--- /dev/null
+++ b/numpy/random/tests/test_extending.py
@@ -0,0 +1,51 @@
+import os, sys
+import pytest
+import warnings
+
+try:
+ import cffi
+except ImportError:
+ cffi = None
+
+if sys.flags.optimize > 1:
+ # no docstrings present to inspect when PYTHONOPTIMIZE/Py_OptimizeFlag > 1
+ # cffi cannot succeed
+ cffi = None
+
+try:
+ with warnings.catch_warnings(record=True) as w:
+ # numba issue gh-4733
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ import numba
+except ImportError:
+ numba = None
+
+try:
+ import cython
+except ImportError:
+ cython = None
+
+@pytest.mark.skipif(cython is None, reason="requires cython")
+def test_cython():
+ curdir = os.getcwd()
+ argv = sys.argv
+ examples = (os.path.dirname(__file__), '..', '_examples')
+ try:
+ os.chdir(os.path.join(*examples))
+ sys.argv = argv[:1] + ['build']
+ with warnings.catch_warnings(record=True) as w:
+ # setuptools issue gh-1885
+ warnings.filterwarnings('always', '', DeprecationWarning)
+ from numpy.random._examples.cython import setup
+ finally:
+ sys.argv = argv
+ os.chdir(curdir)
+
+@pytest.mark.skipif(numba is None or cffi is None,
+ reason="requires numba and cffi")
+def test_numba():
+ from numpy.random._examples.numba import extending
+
+@pytest.mark.skipif(cffi is None, reason="requires cffi")
+def test_cffi():
+ from numpy.random._examples.cffi import extending
diff --git a/numpy/random/tests/test_generator_mt19937.py b/numpy/random/tests/test_generator_mt19937.py
index d4502d276..d835f16bd 100644
--- a/numpy/random/tests/test_generator_mt19937.py
+++ b/numpy/random/tests/test_generator_mt19937.py
@@ -3,6 +3,8 @@ import sys
import pytest
import numpy as np
+from numpy.dual import cholesky, eigh, svd
+from numpy.linalg import LinAlgError
from numpy.testing import (
assert_, assert_raises, assert_equal, assert_allclose,
assert_warns, assert_no_warnings, assert_array_equal,
@@ -492,9 +494,8 @@ class TestIntegers(object):
def test_repeatability_broadcasting(self, endpoint):
for dt in self.itype:
- lbnd = 0 if dt in (np.bool, bool, np.bool_) else np.iinfo(dt).min
- ubnd = 2 if dt in (
- np.bool, bool, np.bool_) else np.iinfo(dt).max + 1
+ lbnd = 0 if dt in (bool, np.bool_) else np.iinfo(dt).min
+ ubnd = 2 if dt in (bool, np.bool_) else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
# view as little endian for hash
@@ -533,8 +534,8 @@ class TestIntegers(object):
assert_raises(ValueError, random.integers, low_a, high_a,
endpoint=endpoint, dtype=dtype)
- low_o = np.array([[low]*10], dtype=np.object)
- high_o = np.array([high] * 10, dtype=np.object)
+ low_o = np.array([[low]*10], dtype=object)
+ high_o = np.array([high] * 10, dtype=object)
assert_raises(ValueError, random.integers, low_o, high,
endpoint=endpoint, dtype=dtype)
assert_raises(ValueError, random.integers, low, high_o,
@@ -576,7 +577,7 @@ class TestIntegers(object):
sample = self.rfunc(lbnd, ubnd, endpoint=endpoint, dtype=dt)
assert_equal(sample.dtype, dt)
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
ubnd = ubnd - 1 if endpoint else ubnd
@@ -1196,12 +1197,13 @@ class TestRandomDist(object):
[5, 5, 3, 1, 2, 4]]])
assert_array_equal(actual, desired)
- def test_multivariate_normal(self):
+ @pytest.mark.parametrize("method", ["svd", "eigh", "cholesky"])
+ def test_multivariate_normal(self, method):
random = Generator(MT19937(self.seed))
mean = (.123456789, 10)
cov = [[1, 0], [0, 1]]
size = (3, 2)
- actual = random.multivariate_normal(mean, cov, size)
+ actual = random.multivariate_normal(mean, cov, size, method=method)
desired = np.array([[[-1.747478062846581, 11.25613495182354 ],
[-0.9967333370066214, 10.342002097029821 ]],
[[ 0.7850019631242964, 11.181113712443013 ],
@@ -1212,15 +1214,24 @@ class TestRandomDist(object):
assert_array_almost_equal(actual, desired, decimal=15)
# Check for default size, was raising deprecation warning
- actual = random.multivariate_normal(mean, cov)
+ actual = random.multivariate_normal(mean, cov, method=method)
desired = np.array([0.233278563284287, 9.424140804347195])
assert_array_almost_equal(actual, desired, decimal=15)
+ # Check that non symmetric covariance input raises exception when
+ # check_valid='raises' if using default svd method.
+ mean = [0, 0]
+ cov = [[1, 2], [1, 2]]
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise')
# Check that non positive-semidefinite covariance warns with
# RuntimeWarning
- mean = [0, 0]
cov = [[1, 2], [2, 1]]
assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov)
+ assert_warns(RuntimeWarning, random.multivariate_normal, mean, cov,
+ method='eigh')
+ assert_raises(LinAlgError, random.multivariate_normal, mean, cov,
+ method='cholesky')
# and that it doesn't warn with RuntimeWarning check_valid='ignore'
assert_no_warnings(random.multivariate_normal, mean, cov,
@@ -1229,10 +1240,12 @@ class TestRandomDist(object):
# and that it raises with RuntimeWarning check_valid='raises'
assert_raises(ValueError, random.multivariate_normal, mean, cov,
check_valid='raise')
+ assert_raises(ValueError, random.multivariate_normal, mean, cov,
+ check_valid='raise', method='eigh')
cov = np.array([[1, 0.1], [0.1, 1]], dtype=np.float32)
with suppress_warnings() as sup:
- random.multivariate_normal(mean, cov)
+ random.multivariate_normal(mean, cov, method=method)
w = sup.record(RuntimeWarning)
assert len(w) == 0
@@ -2206,7 +2219,7 @@ class TestSingleEltArrayInput(object):
assert_equal(out.shape, self.tgtShape)
def test_integers(self, endpoint):
- itype = [np.bool, np.int8, np.uint8, np.int16, np.uint16,
+ itype = [np.bool_, np.int8, np.uint8, np.int16, np.uint16,
np.int32, np.uint32, np.int64, np.uint64]
func = random.integers
high = np.array([1])
diff --git a/numpy/random/tests/test_random.py b/numpy/random/tests/test_random.py
index 37bd121f3..2e2ecedf8 100644
--- a/numpy/random/tests/test_random.py
+++ b/numpy/random/tests/test_random.py
@@ -269,7 +269,7 @@ class TestRandint(object):
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
diff --git a/numpy/random/tests/test_randomstate.py b/numpy/random/tests/test_randomstate.py
index 5131f1839..c12b685ad 100644
--- a/numpy/random/tests/test_randomstate.py
+++ b/numpy/random/tests/test_randomstate.py
@@ -229,7 +229,7 @@ class TestSetState(object):
new_state = ('Unknown', ) + state[1:]
assert_raises(ValueError, self.random_state.set_state, new_state)
assert_raises(TypeError, self.random_state.set_state,
- np.array(new_state, dtype=np.object))
+ np.array(new_state, dtype=object))
state = self.random_state.get_state(legacy=False)
del state['bit_generator']
assert_raises(ValueError, self.random_state.set_state, state)
@@ -382,7 +382,7 @@ class TestRandint(object):
sample = self.rfunc(lbnd, ubnd, dtype=dt)
assert_equal(sample.dtype, np.dtype(dt))
- for dt in (bool, int, np.long):
+ for dt in (bool, int, np.compat.long):
lbnd = 0 if dt is bool else np.iinfo(dt).min
ubnd = 2 if dt is bool else np.iinfo(dt).max + 1
@@ -455,7 +455,7 @@ class TestRandomDist(object):
random.seed(self.seed)
rs = random.RandomState(self.seed)
actual = rs.tomaxint(size=(3, 2))
- if np.iinfo(np.int).max == 2147483647:
+ if np.iinfo(int).max == 2147483647:
desired = np.array([[1328851649, 731237375],
[1270502067, 320041495],
[1908433478, 499156889]], dtype=np.int64)
diff --git a/numpy/random/tests/test_smoke.py b/numpy/random/tests/test_smoke.py
index 6e641b5f4..58ef6a09a 100644
--- a/numpy/random/tests/test_smoke.py
+++ b/numpy/random/tests/test_smoke.py
@@ -8,7 +8,7 @@ from numpy.testing import assert_equal, assert_, assert_array_equal
from numpy.random import (Generator, MT19937, PCG64, Philox, SFC64)
@pytest.fixture(scope='module',
- params=(np.bool, np.int8, np.int16, np.int32, np.int64,
+ params=(np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64))
def dtype(request):
return request.param
@@ -655,7 +655,7 @@ class RNG(object):
rg.standard_gamma(1.0, out=existing[::3])
def test_integers_broadcast(self, dtype):
- if dtype == np.bool:
+ if dtype == np.bool_:
upper = 2
lower = 0
else:
@@ -672,7 +672,7 @@ class RNG(object):
assert_equal(a, c)
self._reset_state()
d = self.rg.integers(np.array(
- [lower] * 10), np.array([upper], dtype=np.object), size=10,
+ [lower] * 10), np.array([upper], dtype=object), size=10,
dtype=dtype)
assert_equal(a, d)
self._reset_state()
@@ -701,7 +701,7 @@ class RNG(object):
assert out.shape == (1,)
def test_integers_broadcast_errors(self, dtype):
- if dtype == np.bool:
+ if dtype == np.bool_:
upper = 2
lower = 0
else:
diff --git a/numpy/testing/_private/decorators.py b/numpy/testing/_private/decorators.py
index 24c4e385d..eab40e7c9 100644
--- a/numpy/testing/_private/decorators.py
+++ b/numpy/testing/_private/decorators.py
@@ -15,6 +15,10 @@ function name, setup and teardown functions and so on - see
"""
from __future__ import division, absolute_import, print_function
+import sys
+import os
+import re
+
try:
# Accessing collections abstract classes from collections
# has been deprecated since Python 3.3
diff --git a/numpy/testing/_private/utils.py b/numpy/testing/_private/utils.py
index 8a31fcf15..4642cc0f8 100644
--- a/numpy/testing/_private/utils.py
+++ b/numpy/testing/_private/utils.py
@@ -21,6 +21,7 @@ import pprint
from numpy.core import(
intp, float32, empty, arange, array_repr, ndarray, isnat, array)
+import numpy.__config__
if sys.version_info[0] >= 3:
from io import StringIO
@@ -39,7 +40,7 @@ __all__ = [
'SkipTest', 'KnownFailureException', 'temppath', 'tempdir', 'IS_PYPY',
'HAS_REFCOUNT', 'suppress_warnings', 'assert_array_compare',
'_assert_valid_refcount', '_gen_alignment_data', 'assert_no_gc_cycles',
- 'break_cycles',
+ 'break_cycles', 'HAS_LAPACK64'
]
@@ -53,6 +54,7 @@ verbose = 0
IS_PYPY = platform.python_implementation() == 'PyPy'
HAS_REFCOUNT = getattr(sys, 'getrefcount', None) is not None
+HAS_LAPACK64 = hasattr(numpy.__config__, 'lapack64__opt_info')
def import_nose():
@@ -284,6 +286,10 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
check that all elements of these objects are equal. An exception is raised
at the first conflicting values.
+ When one of `actual` and `desired` is a scalar and the other is array_like,
+ the function checks that each element of the array_like object is equal to
+ the scalar.
+
This function handles NaN comparisons as if NaN was a "normal" number.
That is, no assertion is raised if both objects have NaNs in the same
positions. This is in contrast to the IEEE standard on NaNs, which says
@@ -374,21 +380,6 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
if isscalar(desired) != isscalar(actual):
raise AssertionError(msg)
- # Inf/nan/negative zero handling
- try:
- isdesnan = gisnan(desired)
- isactnan = gisnan(actual)
- if isdesnan and isactnan:
- return # both nan, so equal
-
- # handle signed zero specially for floats
- if desired == 0 and actual == 0:
- if not signbit(desired) == signbit(actual):
- raise AssertionError(msg)
-
- except (TypeError, ValueError, NotImplementedError):
- pass
-
try:
isdesnat = isnat(desired)
isactnat = isnat(actual)
@@ -404,6 +395,33 @@ def assert_equal(actual, desired, err_msg='', verbose=True):
except (TypeError, ValueError, NotImplementedError):
pass
+ # Inf/nan/negative zero handling
+ try:
+ isdesnan = gisnan(desired)
+ isactnan = gisnan(actual)
+ if isdesnan and isactnan:
+ return # both nan, so equal
+
+ # handle signed zero specially for floats
+ array_actual = array(actual)
+ array_desired = array(desired)
+ if (array_actual.dtype.char in 'Mm' or
+ array_desired.dtype.char in 'Mm'):
+ # version 1.18
+ # until this version, gisnan failed for datetime64 and timedelta64.
+ # Now it succeeds but comparison to scalar with a different type
+ # emits a DeprecationWarning.
+ # Avoid that by skipping the next check
+ raise NotImplementedError('cannot compare to a scalar '
+ 'with a different type')
+
+ if desired == 0 and actual == 0:
+ if not signbit(desired) == signbit(actual):
+ raise AssertionError(msg)
+
+ except (TypeError, ValueError, NotImplementedError):
+ pass
+
try:
# Explicitly use __eq__ for comparison, gh-2552
if not (desired == actual):
@@ -841,10 +859,11 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
Raises an AssertionError if two array_like objects are not equal.
Given two array_like objects, check that the shape is equal and all
- elements of these objects are equal. An exception is raised at
- shape mismatch or conflicting values. In contrast to the standard usage
- in numpy, NaNs are compared like numbers, no assertion is raised if
- both objects have NaNs in the same positions.
+ elements of these objects are equal (but see the Notes for the special
+ handling of a scalar). An exception is raised at shape mismatch or
+ conflicting values. In contrast to the standard usage in numpy, NaNs
+ are compared like numbers, no assertion is raised if both objects have
+ NaNs in the same positions.
The usual caution for verifying equality with floating point numbers is
advised.
@@ -871,6 +890,12 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
relative and/or absolute precision.
assert_array_almost_equal_nulp, assert_array_max_ulp, assert_equal
+ Notes
+ -----
+ When one of `x` and `y` is a scalar and the other is array_like, the
+ function checks that each element of the array_like object is equal to
+ the scalar.
+
Examples
--------
The first assert does not raise an exception:
@@ -878,7 +903,7 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
>>> np.testing.assert_array_equal([1.0,2.33333,np.nan],
... [np.exp(0),2.33333, np.nan])
- Assert fails with numerical inprecision with floats:
+ Assert fails with numerical imprecision with floats:
>>> np.testing.assert_array_equal([1.0,np.pi,np.nan],
... [1, np.sqrt(np.pi)**2, np.nan])
@@ -899,6 +924,12 @@ def assert_array_equal(x, y, err_msg='', verbose=True):
... [1, np.sqrt(np.pi)**2, np.nan],
... rtol=1e-10, atol=0)
+ As mentioned in the Notes section, `assert_array_equal` has special
+ handling for scalars. Here the test checks that each value in `x` is 3:
+
+ >>> x = np.full((2, 5), fill_value=3)
+ >>> np.testing.assert_array_equal(x, 3)
+
"""
__tracebackhide__ = True # Hide traceback for py.test
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
@@ -2351,3 +2382,73 @@ def break_cycles():
gc.collect()
# one more, just to make sure
gc.collect()
+
+
+def requires_memory(free_bytes):
+ """Decorator to skip a test if not enough memory is available"""
+ import pytest
+
+ env_var = 'NPY_AVAILABLE_MEM'
+ env_value = os.environ.get(env_var)
+ if env_value is not None:
+ try:
+ mem_free = _parse_size(env_value)
+ except ValueError as exc:
+ raise ValueError('Invalid environment variable {}: {!s}'.format(
+ env_var, exc))
+
+ msg = ('{0} GB memory required, but environment variable '
+ 'NPY_AVAILABLE_MEM={1} set'.format(
+ free_bytes/1e9, env_value))
+ else:
+ mem_free = _get_mem_available()
+
+ if mem_free is None:
+ msg = ("Could not determine available memory; set NPY_AVAILABLE_MEM "
+ "environment variable (e.g. NPY_AVAILABLE_MEM=16GB) to run "
+ "the test.")
+ mem_free = -1
+ else:
+ msg = '{0} GB memory required, but {1} GB available'.format(
+ free_bytes/1e9, mem_free/1e9)
+
+ return pytest.mark.skipif(mem_free < free_bytes, reason=msg)
+
+
+def _parse_size(size_str):
+ """Convert memory size strings ('12 GB' etc.) to float"""
+ suffixes = {'': 1.0, 'b': 1.0,
+ 'k': 1e3, 'm': 1e6, 'g': 1e9, 't': 1e12,
+ 'kb': 1e3, 'mb': 1e6, 'gb': 1e9, 'tb': 1e12}
+
+ size_re = re.compile(r'^\s*(\d+|\d+\.\d+)\s*({0})\s*$'.format(
+ '|'.join(suffixes.keys())), re.I)
+
+ m = size_re.match(size_str.lower())
+ if not m or m.group(2) not in suffixes:
+ raise ValueError("value {!r} not a valid size".format(size_str))
+ return float(m.group(1)) * suffixes[m.group(2)]
+
+
+def _get_mem_available():
+ """Return available memory in bytes, or None if unknown."""
+ try:
+ import psutil
+ return psutil.virtual_memory().available
+ except (ImportError, AttributeError):
+ pass
+
+ if sys.platform.startswith('linux'):
+ info = {}
+ with open('/proc/meminfo', 'r') as f:
+ for line in f:
+ p = line.split()
+ info[p[0].strip(':').lower()] = float(p[1]) * 1e3
+
+ if 'memavailable' in info:
+ # Linux >= 3.14
+ return info['memavailable']
+ else:
+ return info['memfree'] + info['cached']
+
+ return None
diff --git a/numpy/tests/test_public_api.py b/numpy/tests/test_public_api.py
index c71d03432..0484bb8cd 100644
--- a/numpy/tests/test_public_api.py
+++ b/numpy/tests/test_public_api.py
@@ -386,7 +386,7 @@ SKIP_LIST_2 = [
'numpy.matlib.fft',
'numpy.matlib.random',
'numpy.matlib.ctypeslib',
- 'numpy.matlib.ma'
+ 'numpy.matlib.ma',
]