summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--doc/source/reference/random/multithreading.rst65
-rw-r--r--numpy/__init__.py7
-rw-r--r--numpy/core/defchararray.py2
-rw-r--r--numpy/core/src/multiarray/_multiarray_tests.c.src8
-rw-r--r--numpy/core/src/multiarray/arraytypes.c.src6
-rw-r--r--numpy/core/src/multiarray/dragon4.c16
-rw-r--r--numpy/core/src/multiarray/scalartypes.c.src44
-rw-r--r--numpy/distutils/misc_util.py1
-rwxr-xr-xnumpy/f2py/f2py2e.py2
-rwxr-xr-xnumpy/f2py/rules.py12
10 files changed, 78 insertions, 85 deletions
diff --git a/doc/source/reference/random/multithreading.rst b/doc/source/reference/random/multithreading.rst
index 8502429ce..4b221d9aa 100644
--- a/doc/source/reference/random/multithreading.rst
+++ b/doc/source/reference/random/multithreading.rst
@@ -11,32 +11,28 @@ these requirements.
This example makes use of Python 3 :mod:`concurrent.futures` to fill an array
using multiple threads. Threads are long-lived so that repeated calls do not
-require any additional overheads from thread creation. The underlying
-BitGenerator is `PCG64` which is fast, has a long period and supports
-using `PCG64.jumped` to return a new generator while advancing the
-state. The random numbers generated are reproducible in the sense that the same
-seed will produce the same outputs.
+require any additional overheads from thread creation.
+
+The random numbers generated are reproducible in the sense that the same
+seed will produce the same outputs, given that the number of threads does not
+change.
.. code-block:: ipython
- from numpy.random import Generator, PCG64
+ from numpy.random import default_rng, SeedSequence
import multiprocessing
import concurrent.futures
import numpy as np
class MultithreadedRNG:
def __init__(self, n, seed=None, threads=None):
- rg = PCG64(seed)
if threads is None:
threads = multiprocessing.cpu_count()
self.threads = threads
- self._random_generators = [rg]
- last_rg = rg
- for _ in range(0, threads-1):
- new_rg = last_rg.jumped()
- self._random_generators.append(new_rg)
- last_rg = new_rg
+ seq = SeedSequence(seed)
+ self._random_generators = [default_rng(s)
+ for s in seq.spawn(threads)]
self.n = n
self.executor = concurrent.futures.ThreadPoolExecutor(threads)
@@ -61,19 +57,20 @@ seed will produce the same outputs.
self.executor.shutdown(False)
+
The multithreaded random number generator can be used to fill an array.
The ``values`` attributes shows the zero-value before the fill and the
random value after.
.. code-block:: ipython
- In [2]: mrng = MultithreadedRNG(10000000, seed=0)
- ...: print(mrng.values[-1])
- 0.0
+ In [2]: mrng = MultithreadedRNG(10000000, seed=12345)
+ ...: print(mrng.values[-1])
+ Out[2]: 0.0
In [3]: mrng.fill()
- ...: print(mrng.values[-1])
- 3.296046120254392
+ ...: print(mrng.values[-1])
+ Out[3]: 2.4545724517479104
The time required to produce using multiple threads can be compared to
the time required to generate using a single thread.
@@ -81,28 +78,38 @@ the time required to generate using a single thread.
.. code-block:: ipython
In [4]: print(mrng.threads)
- ...: %timeit mrng.fill()
+ ...: %timeit mrng.fill()
- 4
- 32.8 ms ± 2.71 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ Out[4]: 4
+ ...: 32.8 ms ± 2.71 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)
The single threaded call directly uses the BitGenerator.
.. code-block:: ipython
In [5]: values = np.empty(10000000)
- ...: rg = Generator(PCG64())
- ...: %timeit rg.standard_normal(out=values)
+ ...: rg = default_rng()
+ ...: %timeit rg.standard_normal(out=values)
- 99.6 ms ± 222 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ Out[5]: 99.6 ms ± 222 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
-The gains are substantial and the scaling is reasonable even for large that
-are only moderately large. The gains are even larger when compared to a call
+The gains are substantial and the scaling is reasonable even for arrays that
+are only moderately large. The gains are even larger when compared to a call
that does not use an existing array due to array creation overhead.
.. code-block:: ipython
- In [6]: rg = Generator(PCG64())
- ...: %timeit rg.standard_normal(10000000)
+ In [6]: rg = default_rng()
+ ...: %timeit rg.standard_normal(10000000)
+
+ Out[6]: 125 ms ± 309 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+
+Note that if `threads` is not set by the user, it will be determined by
+`multiprocessing.cpu_count()`.
+
+.. code-block:: ipython
- 125 ms ± 309 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
+ In [7]: # simulate the behavior for `threads=None`, if the machine had only one thread
+ ...: mrng = MultithreadedRNG(10000000, seed=12345, threads=1)
+ ...: print(mrng.values[-1])
+ Out[7]: 1.1800150052158556
diff --git a/numpy/__init__.py b/numpy/__init__.py
index 73e979a18..00c4a3d78 100644
--- a/numpy/__init__.py
+++ b/numpy/__init__.py
@@ -157,11 +157,8 @@ else:
# Make these accessible from numpy name-space
# but not imported in from numpy import *
# TODO[gh-6103]: Deprecate these
- if sys.version_info[0] >= 3:
- from builtins import bool, int, float, complex, object, str
- unicode = str
- else:
- from __builtin__ import bool, int, float, complex, object, unicode, str
+ from builtins import bool, int, float, complex, object, str
+ unicode = str
from .core import round, abs, max, min
# now that numpy modules are imported, can initialize limits
diff --git a/numpy/core/defchararray.py b/numpy/core/defchararray.py
index bc101f84b..942a698a9 100644
--- a/numpy/core/defchararray.py
+++ b/numpy/core/defchararray.py
@@ -274,7 +274,7 @@ def str_len(a):
See also
--------
- __builtin__.len
+ builtins.len
"""
return _vec_string(a, integer, '__len__')
diff --git a/numpy/core/src/multiarray/_multiarray_tests.c.src b/numpy/core/src/multiarray/_multiarray_tests.c.src
index 7007dd204..ec2928c8f 100644
--- a/numpy/core/src/multiarray/_multiarray_tests.c.src
+++ b/numpy/core/src/multiarray/_multiarray_tests.c.src
@@ -1877,21 +1877,21 @@ PrintFloat_Printf_g(PyObject *obj, int precision)
char str[1024];
if (PyArray_IsScalar(obj, Half)) {
- npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ npy_half x = PyArrayScalar_VAL(obj, Half);
PyOS_snprintf(str, sizeof(str), "%.*g", precision,
npy_half_to_double(x));
}
else if (PyArray_IsScalar(obj, Float)) {
- npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ npy_float x = PyArrayScalar_VAL(obj, Float);
PyOS_snprintf(str, sizeof(str), "%.*g", precision, x);
}
else if (PyArray_IsScalar(obj, Double)) {
- npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ npy_double x = PyArrayScalar_VAL(obj, Double);
PyOS_snprintf(str, sizeof(str), "%.*g", precision, x);
/* would be better to use lg, but not available in C90 */
}
else if (PyArray_IsScalar(obj, LongDouble)) {
- npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ npy_longdouble x = PyArrayScalar_VAL(obj, LongDouble);
PyOS_snprintf(str, sizeof(str), "%.*Lg", precision, x);
}
else{
diff --git a/numpy/core/src/multiarray/arraytypes.c.src b/numpy/core/src/multiarray/arraytypes.c.src
index 808cfaa14..61270dbef 100644
--- a/numpy/core/src/multiarray/arraytypes.c.src
+++ b/numpy/core/src/multiarray/arraytypes.c.src
@@ -210,7 +210,7 @@ static int
@type@ temp; /* ensures alignment */
if (PyArray_IsScalar(op, @kind@)) {
- temp = ((Py@kind@ScalarObject *)op)->obval;
+ temp = PyArrayScalar_VAL(op, @kind@);
}
else {
temp = (@type@)@func2@(op);
@@ -291,7 +291,7 @@ static int
}
if (PyArray_IsScalar(op, @kind@)){
- temp = ((Py@kind@ScalarObject *)op)->obval;
+ temp = PyArrayScalar_VAL(op, @kind@);
}
else {
if (op == Py_None) {
@@ -406,7 +406,7 @@ LONGDOUBLE_setitem(PyObject *op, void *ov, void *vap)
}
if (PyArray_IsScalar(op, LongDouble)) {
- temp = ((PyLongDoubleScalarObject *)op)->obval;
+ temp = PyArrayScalar_VAL(op, LongDouble);
}
else {
/* In case something funny happened in PyArray_IsScalar */
diff --git a/numpy/core/src/multiarray/dragon4.c b/numpy/core/src/multiarray/dragon4.c
index d14b8e638..282cdad28 100644
--- a/numpy/core/src/multiarray/dragon4.c
+++ b/numpy/core/src/multiarray/dragon4.c
@@ -3183,19 +3183,19 @@ Dragon4_Positional(PyObject *obj, DigitMode digit_mode, CutoffMode cutoff_mode,
opt.exp_digits = -1;
if (PyArray_IsScalar(obj, Half)) {
- npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ npy_half x = PyArrayScalar_VAL(obj, Half);
return Dragon4_Positional_Half_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Float)) {
- npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ npy_float x = PyArrayScalar_VAL(obj, Float);
return Dragon4_Positional_Float_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Double)) {
- npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ npy_double x = PyArrayScalar_VAL(obj, Double);
return Dragon4_Positional_Double_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, LongDouble)) {
- npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ npy_longdouble x = PyArrayScalar_VAL(obj, LongDouble);
return Dragon4_Positional_LongDouble_opt(&x, &opt);
}
@@ -3224,19 +3224,19 @@ Dragon4_Scientific(PyObject *obj, DigitMode digit_mode, int precision,
opt.exp_digits = exp_digits;
if (PyArray_IsScalar(obj, Half)) {
- npy_half x = ((PyHalfScalarObject *)obj)->obval;
+ npy_half x = PyArrayScalar_VAL(obj, Half);
return Dragon4_Scientific_Half_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Float)) {
- npy_float x = ((PyFloatScalarObject *)obj)->obval;
+ npy_float x = PyArrayScalar_VAL(obj, Float);
return Dragon4_Scientific_Float_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, Double)) {
- npy_double x = ((PyDoubleScalarObject *)obj)->obval;
+ npy_double x = PyArrayScalar_VAL(obj, Double);
return Dragon4_Scientific_Double_opt(&x, &opt);
}
else if (PyArray_IsScalar(obj, LongDouble)) {
- npy_longdouble x = ((PyLongDoubleScalarObject *)obj)->obval;
+ npy_longdouble x = PyArrayScalar_VAL(obj, LongDouble);
return Dragon4_Scientific_LongDouble_opt(&x, &opt);
}
diff --git a/numpy/core/src/multiarray/scalartypes.c.src b/numpy/core/src/multiarray/scalartypes.c.src
index eb3b00242..2bd4cd2ee 100644
--- a/numpy/core/src/multiarray/scalartypes.c.src
+++ b/numpy/core/src/multiarray/scalartypes.c.src
@@ -318,7 +318,7 @@ gentype_format(PyObject *self, PyObject *args)
* because it throws away precision.
*/
if (Py_TYPE(self) == &PyBoolArrType_Type) {
- obj = PyBool_FromLong(((PyBoolScalarObject *)self)->obval);
+ obj = PyBool_FromLong(PyArrayScalar_VAL(self, Bool));
}
else if (PyArray_IsScalar(self, Integer)) {
obj = Py_TYPE(self)->tp_as_number->nb_int(self);
@@ -903,7 +903,7 @@ static PyObject *
static PyObject *
@name@type_@kind@(PyObject *self)
{
- return @name@type_@kind@_either(((Py@Name@ScalarObject *)self)->obval,
+ return @name@type_@kind@_either(PyArrayScalar_VAL(self, @Name@),
TrimMode_LeaveOneZero, TrimMode_DptZeros, 0);
}
@@ -911,7 +911,7 @@ static PyObject *
c@name@type_@kind@(PyObject *self)
{
PyObject *rstr, *istr, *ret;
- npy_c@name@ val = ((PyC@Name@ScalarObject *)self)->obval;
+ npy_c@name@ val = PyArrayScalar_VAL(self, C@Name@);
TrimMode trim = TrimMode_DptZeros;
if (npy_legacy_print_mode == 113) {
@@ -975,7 +975,7 @@ c@name@type_@kind@(PyObject *self)
static PyObject *
halftype_@kind@(PyObject *self)
{
- npy_half val = ((PyHalfScalarObject *)self)->obval;
+ npy_half val = PyArrayScalar_VAL(self, Half);
float floatval = npy_half_to_float(val);
float absval;
@@ -1318,7 +1318,7 @@ gentype_real_get(PyObject *self)
return ret;
}
else if (PyArray_IsScalar(self, Object)) {
- PyObject *obj = ((PyObjectScalarObject *)self)->obval;
+ PyObject *obj = PyArrayScalar_VAL(self, Object);
ret = PyObject_GetAttrString(obj, "real");
if (ret != NULL) {
return ret;
@@ -1343,7 +1343,7 @@ gentype_imag_get(PyObject *self)
ret = PyArray_Scalar(ptr + typecode->elsize, typecode, NULL);
}
else if (PyArray_IsScalar(self, Object)) {
- PyObject *obj = ((PyObjectScalarObject *)self)->obval;
+ PyObject *obj = PyArrayScalar_VAL(self, Object);
PyArray_Descr *newtype;
ret = PyObject_GetAttrString(obj, "imag");
if (ret == NULL) {
@@ -1753,7 +1753,7 @@ gentype_reduce(PyObject *self, PyObject *NPY_UNUSED(args))
PyTuple_SET_ITEM(ret, 0, obj);
obj = PyObject_GetAttrString((PyObject *)self, "dtype");
if (PyArray_IsScalar(self, Object)) {
- PyObject *val = ((PyObjectScalarObject *)self)->obval;
+ PyObject *val = PyArrayScalar_VAL(self, Object);
PyObject *tup = Py_BuildValue("NO", obj, val);
if (tup == NULL) {
return NULL;
@@ -2518,7 +2518,7 @@ void_dealloc(PyVoidScalarObject *v)
static void
object_arrtype_dealloc(PyObject *v)
{
- Py_XDECREF(((PyObjectScalarObject *)v)->obval);
+ Py_XDECREF(PyArrayScalar_VAL(v, Object));
Py_TYPE(v)->tp_free(v);
}
@@ -2603,7 +2603,7 @@ static PyObject *
Py_DECREF(typecode);
return NULL;
}
- memset(&((Py@Name@ScalarObject *)robj)->obval, 0, sizeof(npy_@name@));
+ memset(&PyArrayScalar_VAL(robj, @Name@), 0, sizeof(npy_@name@));
#elif @default@ == 1
robj = PyArray_Scalar(NULL, typecode, NULL);
#elif @default@ == 2
@@ -2960,7 +2960,7 @@ void_arrtype_new(PyTypeObject *type, PyObject *args, PyObject *NPY_UNUSED(kwds))
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- return (npy_hash_t)(((Py@name@ScalarObject *)obj)->obval);
+ return (npy_hash_t)(PyArrayScalar_VAL(obj, @name@));
}
/**end repeat**/
@@ -2971,7 +2971,7 @@ static npy_hash_t
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- npy_hash_t x = (npy_hash_t)(((Py@name@ScalarObject *)obj)->obval);
+ npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, @name@));
if (x == -1) {
x = -2;
}
@@ -2982,7 +2982,7 @@ static npy_hash_t
static npy_hash_t
ulong_arrtype_hash(PyObject *obj)
{
- PyObject * l = PyLong_FromUnsignedLong(((PyULongScalarObject*)obj)->obval);
+ PyObject * l = PyLong_FromUnsignedLong(PyArrayScalar_VAL(obj, ULong));
npy_hash_t x = PyObject_Hash(l);
Py_DECREF(l);
return x;
@@ -2991,7 +2991,7 @@ ulong_arrtype_hash(PyObject *obj)
static npy_hash_t
int_arrtype_hash(PyObject *obj)
{
- npy_hash_t x = (npy_hash_t)(((PyIntScalarObject *)obj)->obval);
+ npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, Int));
if (x == -1) {
x = -2;
}
@@ -3001,7 +3001,7 @@ int_arrtype_hash(PyObject *obj)
static npy_hash_t
long_arrtype_hash(PyObject *obj)
{
- PyObject * l = PyLong_FromLong(((PyLongScalarObject*)obj)->obval);
+ PyObject * l = PyLong_FromLong(PyArrayScalar_VAL(obj, Long));
npy_hash_t x = PyObject_Hash(l);
Py_DECREF(l);
return x;
@@ -3016,7 +3016,7 @@ static NPY_INLINE npy_hash_t
@char@longlong_arrtype_hash(PyObject *obj)
{
PyObject * l = PyLong_From@Word@LongLong(
- ((Py@Char@LongLongScalarObject*)obj)->obval);
+ PyArrayScalar_VAL(obj, @Char@LongLong));
npy_hash_t x = PyObject_Hash(l);
Py_DECREF(l);
return x;
@@ -3032,7 +3032,7 @@ static NPY_INLINE npy_hash_t
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- npy_hash_t x = (npy_hash_t)(((Py@name@ScalarObject *)obj)->obval);
+ npy_hash_t x = (npy_hash_t)(PyArrayScalar_VAL(obj, @name@));
if (x == -1) {
x = -2;
}
@@ -3043,7 +3043,7 @@ static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
npy_hash_t y;
- npy_longlong x = (((Py@name@ScalarObject *)obj)->obval);
+ npy_longlong x = (PyArrayScalar_VAL(obj, @name@));
if ((x <= LONG_MAX)) {
y = (npy_hash_t) x;
@@ -3076,7 +3076,7 @@ static npy_hash_t
static npy_hash_t
@lname@_arrtype_hash(PyObject *obj)
{
- return _Py_HashDouble((double) ((Py@name@ScalarObject *)obj)->obval);
+ return _Py_HashDouble((double) PyArrayScalar_VAL(obj, @name@));
}
/* borrowed from complex_hash */
@@ -3085,13 +3085,13 @@ c@lname@_arrtype_hash(PyObject *obj)
{
npy_hash_t hashreal, hashimag, combined;
hashreal = _Py_HashDouble((double)
- (((PyC@name@ScalarObject *)obj)->obval).real);
+ PyArrayScalar_VAL(obj, C@name@).real);
if (hashreal == -1) {
return -1;
}
hashimag = _Py_HashDouble((double)
- (((PyC@name@ScalarObject *)obj)->obval).imag);
+ PyArrayScalar_VAL(obj, C@name@).imag);
if (hashimag == -1) {
return -1;
}
@@ -3106,13 +3106,13 @@ c@lname@_arrtype_hash(PyObject *obj)
static npy_hash_t
half_arrtype_hash(PyObject *obj)
{
- return _Py_HashDouble(npy_half_to_double(((PyHalfScalarObject *)obj)->obval));
+ return _Py_HashDouble(npy_half_to_double(PyArrayScalar_VAL(obj, Half)));
}
static npy_hash_t
object_arrtype_hash(PyObject *obj)
{
- return PyObject_Hash(((PyObjectScalarObject *)obj)->obval);
+ return PyObject_Hash(PyArrayScalar_VAL(obj, Object));
}
/* we used to just hash the pointer */
diff --git a/numpy/distutils/misc_util.py b/numpy/distutils/misc_util.py
index b7fd74c47..4be1df94e 100644
--- a/numpy/distutils/misc_util.py
+++ b/numpy/distutils/misc_util.py
@@ -164,7 +164,6 @@ def get_path_from_frame(frame, parent_path=None):
# we're probably running setup.py as execfile("setup.py")
# (likely we're building an egg)
d = os.path.abspath('.')
- # hmm, should we use sys.argv[0] like in __builtin__ case?
if parent_path is not None:
d = rel_path(d, parent_path)
diff --git a/numpy/f2py/f2py2e.py b/numpy/f2py/f2py2e.py
index a6751154b..9707d6f7d 100755
--- a/numpy/f2py/f2py2e.py
+++ b/numpy/f2py/f2py2e.py
@@ -169,7 +169,7 @@ Extra options (only effective with -c):
Version: %s
numpy Version: %s
-Requires: Python 2.3 or higher.
+Requires: Python 3.5 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version)
diff --git a/numpy/f2py/rules.py b/numpy/f2py/rules.py
index 459c87aa3..14d6dd8f2 100755
--- a/numpy/f2py/rules.py
+++ b/numpy/f2py/rules.py
@@ -276,7 +276,7 @@ static PyObject *#apiname#(const PyObject *capi_self,
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
-\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
+\t\t\"#argformat#|#keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
@@ -1445,16 +1445,6 @@ def buildapi(rout):
['\\begin{description}'] + rd[k][1:] +\
['\\end{description}']
- # Workaround for Python 2.6, 2.6.1 bug: https://bugs.python.org/issue4720
- if rd['keyformat'] or rd['xaformat']:
- argformat = rd['argformat']
- if isinstance(argformat, list):
- argformat.append('|')
- else:
- assert isinstance(argformat, str), repr(
- (argformat, type(argformat)))
- rd['argformat'] += '|'
-
ar = applyrules(routine_rules, rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n' % (ar['docshort']))