summaryrefslogtreecommitdiff
path: root/doc/source
diff options
context:
space:
mode:
authorMatti Picus <matti.picus@gmail.com>2021-12-22 14:31:56 +0200
committerGitHub <noreply@github.com>2021-12-22 14:31:56 +0200
commit6d5bc233442c034d54e0c0b15fdd7ed27e36fcac (patch)
tree4d22c2129c0edf2e615e2afe3378761cb3a6cd87 /doc/source
parent7acb0fd4123673dc38aa5634b47f93770e61cfab (diff)
parent7bc1d5100fc67abdcaf5400223386ea613d7a872 (diff)
downloadnumpy-6d5bc233442c034d54e0c0b15fdd7ed27e36fcac.tar.gz
Merge pull request #20288 from pdebuyl/doctest_for_pytest
DOC: make some doctests in user,reference pass pytest
Diffstat (limited to 'doc/source')
-rw-r--r--doc/source/reference/arrays.classes.rst1
-rw-r--r--doc/source/reference/arrays.ndarray.rst6
-rw-r--r--doc/source/reference/arrays.nditer.cython.rst8
-rw-r--r--doc/source/reference/arrays.nditer.rst8
-rw-r--r--doc/source/reference/distutils.rst2
-rw-r--r--doc/source/reference/maskedarray.baseclass.rst1
-rw-r--r--doc/source/reference/maskedarray.generic.rst2
-rw-r--r--doc/source/reference/random/generator.rst10
-rw-r--r--doc/source/reference/routines.polynomials.classes.rst20
-rw-r--r--doc/source/user/absolute_beginners.rst29
-rw-r--r--doc/source/user/basics.broadcasting.rst30
-rw-r--r--doc/source/user/basics.byteswapping.rst2
-rw-r--r--doc/source/user/basics.copies.rst2
-rw-r--r--doc/source/user/basics.creation.rst18
-rw-r--r--doc/source/user/basics.dispatch.rst27
-rw-r--r--doc/source/user/basics.indexing.rst8
-rw-r--r--doc/source/user/basics.io.genfromtxt.rst46
-rw-r--r--doc/source/user/basics.rec.rst8
-rw-r--r--doc/source/user/basics.subclassing.rst25
-rw-r--r--doc/source/user/basics.types.rst8
-rw-r--r--doc/source/user/basics.ufuncs.rst2
-rw-r--r--doc/source/user/misc.rst14
-rw-r--r--doc/source/user/quickstart.rst10
23 files changed, 146 insertions, 141 deletions
diff --git a/doc/source/reference/arrays.classes.rst b/doc/source/reference/arrays.classes.rst
index 92c271f6b..4e908678d 100644
--- a/doc/source/reference/arrays.classes.rst
+++ b/doc/source/reference/arrays.classes.rst
@@ -7,7 +7,6 @@ Standard array subclasses
.. currentmodule:: numpy
.. for doctests
- >>> import numpy as np
>>> np.random.seed(1)
.. note::
diff --git a/doc/source/reference/arrays.ndarray.rst b/doc/source/reference/arrays.ndarray.rst
index 0f703b475..66ebb66fb 100644
--- a/doc/source/reference/arrays.ndarray.rst
+++ b/doc/source/reference/arrays.ndarray.rst
@@ -54,13 +54,13 @@ objects implementing the :class:`buffer` or :ref:`array
>>> y = x[:,1]
>>> y
- array([2, 5])
+ array([2, 5], dtype=int32)
>>> y[0] = 9 # this also changes the corresponding element in x
>>> y
- array([9, 5])
+ array([9, 5], dtype=int32)
>>> x
array([[1, 9, 3],
- [4, 5, 6]])
+ [4, 5, 6]], dtype=int32)
Constructing arrays
diff --git a/doc/source/reference/arrays.nditer.cython.rst b/doc/source/reference/arrays.nditer.cython.rst
index 43aad9927..66485fc8a 100644
--- a/doc/source/reference/arrays.nditer.cython.rst
+++ b/doc/source/reference/arrays.nditer.cython.rst
@@ -49,7 +49,7 @@ Here's how this looks.
...
>>> a = np.arange(6).reshape(2,3)
>>> sum_squares_py(a)
- array(55.0)
+ array(55.)
>>> sum_squares_py(a, axis=-1)
array([ 5., 50.])
@@ -117,11 +117,11 @@ as our native Python/NumPy code did.
.. admonition:: Example
- >>> from sum_squares import sum_squares_cy
+ >>> from sum_squares import sum_squares_cy #doctest: +SKIP
>>> a = np.arange(6).reshape(2,3)
- >>> sum_squares_cy(a)
+ >>> sum_squares_cy(a) #doctest: +SKIP
array(55.0)
- >>> sum_squares_cy(a, axis=-1)
+ >>> sum_squares_cy(a, axis=-1) #doctest: +SKIP
array([ 5., 50.])
Doing a little timing in IPython shows that the reduced overhead and
diff --git a/doc/source/reference/arrays.nditer.rst b/doc/source/reference/arrays.nditer.rst
index 72a04f73e..8cabc1a06 100644
--- a/doc/source/reference/arrays.nditer.rst
+++ b/doc/source/reference/arrays.nditer.rst
@@ -1,9 +1,5 @@
.. currentmodule:: numpy
-.. for doctests
- The last section on Cython is 'included' at the end of this file. The tests
- for that section are disabled.
-
.. _arrays.nditer:
*********************
@@ -489,9 +485,9 @@ reasons.
>>> b = np.zeros((3,))
>>> square([1,2,3], out=b)
- array([ 1., 4., 9.])
+ array([1., 4., 9.])
>>> b
- array([ 1., 4., 9.])
+ array([1., 4., 9.])
>>> square(np.arange(6).reshape(2,3), out=b)
Traceback (most recent call last):
diff --git a/doc/source/reference/distutils.rst b/doc/source/reference/distutils.rst
index f201ba668..9db757c89 100644
--- a/doc/source/reference/distutils.rst
+++ b/doc/source/reference/distutils.rst
@@ -188,6 +188,8 @@ Info are easily retrieved from the `get_info` function in
>>> info = np.distutils.misc_util.get_info('npymath')
>>> config.add_extension('foo', sources=['foo.c'], extra_info=info)
+ <numpy.distutils.extension.Extension('foo') at 0x...>
+
An additional list of paths to look for .ini files can be given to `get_info`.
diff --git a/doc/source/reference/maskedarray.baseclass.rst b/doc/source/reference/maskedarray.baseclass.rst
index 5a0f99651..44792a0d6 100644
--- a/doc/source/reference/maskedarray.baseclass.rst
+++ b/doc/source/reference/maskedarray.baseclass.rst
@@ -1,7 +1,6 @@
.. currentmodule:: numpy.ma
.. for doctests
- >>> import numpy as np
>>> from numpy import ma
.. _numpy.ma.constants:
diff --git a/doc/source/reference/maskedarray.generic.rst b/doc/source/reference/maskedarray.generic.rst
index d3849c50d..29fc2fe07 100644
--- a/doc/source/reference/maskedarray.generic.rst
+++ b/doc/source/reference/maskedarray.generic.rst
@@ -467,7 +467,7 @@ Suppose now that we wish to print that same data, but with the missing values
replaced by the average value.
>>> print(mx.filled(mx.mean()))
- [ 0. 1. 2. 3. 4.]
+ [0. 1. 2. 3. 4.]
Numerical operations
diff --git a/doc/source/reference/random/generator.rst b/doc/source/reference/random/generator.rst
index 7934be98a..a0ef01dcb 100644
--- a/doc/source/reference/random/generator.rst
+++ b/doc/source/reference/random/generator.rst
@@ -73,12 +73,12 @@ the value of the ``out`` parameter. For example,
>>> rng = np.random.default_rng()
>>> x = np.arange(0, 15).reshape(3, 5)
- >>> x
+ >>> x #doctest: +SKIP
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]])
>>> y = rng.permuted(x, axis=1, out=x)
- >>> x
+ >>> x #doctest: +SKIP
array([[ 1, 0, 2, 4, 3], # random
[ 6, 7, 8, 9, 5],
[10, 14, 11, 13, 12]])
@@ -103,7 +103,7 @@ array, and ``axis=1`` will rearrange the columns. For example
array([[ 0, 1, 2, 3, 4],
[ 5, 6, 7, 8, 9],
[10, 11, 12, 13, 14]])
- >>> rng.permutation(x, axis=1)
+ >>> rng.permutation(x, axis=1) #doctest: +SKIP
array([[ 1, 3, 2, 0, 4], # random
[ 6, 8, 7, 5, 9],
[11, 13, 12, 10, 14]])
@@ -116,7 +116,7 @@ how `numpy.sort` treats it. Each slice along the given axis is shuffled
independently of the others. Compare the following example of the use of
`Generator.permuted` to the above example of `Generator.permutation`:
- >>> rng.permuted(x, axis=1)
+ >>> rng.permuted(x, axis=1) #doctest: +SKIP
array([[ 1, 0, 2, 4, 3], # random
[ 5, 7, 6, 9, 8],
[10, 14, 12, 13, 11]])
@@ -134,7 +134,7 @@ For example,
>>> rng = np.random.default_rng()
>>> a = ['A', 'B', 'C', 'D', 'E']
>>> rng.shuffle(a) # shuffle the list in-place
- >>> a
+ >>> a #doctest: +SKIP
['B', 'D', 'A', 'E', 'C'] # random
Distributions
diff --git a/doc/source/reference/routines.polynomials.classes.rst b/doc/source/reference/routines.polynomials.classes.rst
index 5f575bed1..2ce29d9d0 100644
--- a/doc/source/reference/routines.polynomials.classes.rst
+++ b/doc/source/reference/routines.polynomials.classes.rst
@@ -59,11 +59,11 @@ first is the coefficients, the second is the domain, and the third is the
window::
>>> p.coef
- array([ 1., 2., 3.])
+ array([1., 2., 3.])
>>> p.domain
- array([-1., 1.])
+ array([-1, 1])
>>> p.window
- array([-1., 1.])
+ array([-1, 1])
Printing a polynomial yields the polynomial expression in a more familiar
format::
@@ -77,7 +77,7 @@ representation is also available (default on Windows). The polynomial string
format can be toggled at the package-level with the
`~numpy.polynomial.set_default_printstyle` function::
- >>> numpy.polynomial.set_default_printstyle('ascii')
+ >>> np.polynomial.set_default_printstyle('ascii')
>>> print(p)
1.0 + 2.0 x**1 + 3.0 x**2
@@ -137,9 +137,9 @@ Evaluation::
array([ 1., 6., 17., 34., 57.])
>>> x = np.arange(6).reshape(3,2)
>>> p(x)
- array([[ 1., 6.],
- [ 17., 34.],
- [ 57., 86.]])
+ array([[ 1., 6.],
+ [17., 34.],
+ [57., 86.]])
Substitution:
@@ -294,7 +294,6 @@ polynomials up to degree 5 are plotted below.
... ax = plt.plot(x, T.basis(i)(x), lw=2, label=f"$T_{i}$")
...
>>> plt.legend(loc="upper left")
- <matplotlib.legend.Legend object at 0x3b3ee10>
>>> plt.show()
In the range -1 <= `x` <= 1 they are nice, equiripple functions lying between +/- 1.
@@ -309,7 +308,6 @@ The same plots over the range -2 <= `x` <= 2 look very different:
... ax = plt.plot(x, T.basis(i)(x), lw=2, label=f"$T_{i}$")
...
>>> plt.legend(loc="lower right")
- <matplotlib.legend.Legend object at 0x3b3ee10>
>>> plt.show()
As can be seen, the "good" parts have shrunk to insignificance. In using
@@ -335,12 +333,10 @@ illustrated below for a fit to a noisy sine curve.
>>> y = np.sin(x) + np.random.normal(scale=.1, size=x.shape)
>>> p = T.fit(x, y, 5)
>>> plt.plot(x, y, 'o')
- [<matplotlib.lines.Line2D object at 0x2136c10>]
>>> xx, yy = p.linspace()
>>> plt.plot(xx, yy, lw=2)
- [<matplotlib.lines.Line2D object at 0x1cf2890>]
>>> p.domain
- array([ 0. , 6.28318531])
+ array([0. , 6.28318531])
>>> p.window
array([-1., 1.])
>>> plt.show()
diff --git a/doc/source/user/absolute_beginners.rst b/doc/source/user/absolute_beginners.rst
index 27e9e1f63..2c6882905 100644
--- a/doc/source/user/absolute_beginners.rst
+++ b/doc/source/user/absolute_beginners.rst
@@ -229,8 +229,8 @@ content is random and depends on the state of the memory. The reason to use
fill every element afterwards! ::
>>> # Create an empty array with 2 elements
- >>> np.empty(2)
- array([ 3.14, 42. ]) # may vary
+ >>> np.empty(2) #doctest: +SKIP
+ array([3.14, 42. ]) # may vary
You can create an array with a range of elements::
@@ -669,18 +669,18 @@ If you wanted to split this array into three equally shaped arrays, you would
run::
>>> np.hsplit(x, 3)
- [array([[1, 2, 3, 4],
- [13, 14, 15, 16]]), array([[ 5, 6, 7, 8],
- [17, 18, 19, 20]]), array([[ 9, 10, 11, 12],
- [21, 22, 23, 24]])]
+ [array([[ 1, 2, 3, 4],
+ [13, 14, 15, 16]]), array([[ 5, 6, 7, 8],
+ [17, 18, 19, 20]]), array([[ 9, 10, 11, 12],
+ [21, 22, 23, 24]])]
If you wanted to split your array after the third and fourth column, you'd run::
>>> np.hsplit(x, (3, 4))
- [array([[1, 2, 3],
- [13, 14, 15]]), array([[ 4],
- [16]]), array([[ 5, 6, 7, 8, 9, 10, 11, 12],
- [17, 18, 19, 20, 21, 22, 23, 24]])]
+ [array([[ 1, 2, 3],
+ [13, 14, 15]]), array([[ 4],
+ [16]]), array([[ 5, 6, 7, 8, 9, 10, 11, 12],
+ [17, 18, 19, 20, 21, 22, 23, 24]])]
:ref:`Learn more about stacking and splitting arrays here <quickstart.stacking-arrays>`.
@@ -967,9 +967,8 @@ All you need to do is pass in the number of elements you want it to generate::
array([1., 1., 1.])
>>> np.zeros(3)
array([0., 0., 0.])
- # the simplest way to generate random numbers
- >>> rng = np.random.default_rng(0)
- >>> rng.random(3)
+ >>> rng = np.random.default_rng() # the simplest way to generate random numbers
+ >>> rng.random(3) #doctest: +SKIP
array([0.63696169, 0.26978671, 0.04097352])
.. image:: images/np_ones_zeros_random.png
@@ -985,7 +984,7 @@ a 2D array if you give them a tuple describing the dimensions of the matrix::
array([[0., 0.],
[0., 0.],
[0., 0.]])
- >>> rng.random((3, 2))
+ >>> rng.random((3, 2)) #doctest: +SKIP
array([[0.01652764, 0.81327024],
[0.91275558, 0.60663578],
[0.72949656, 0.54362499]]) # may vary
@@ -1011,7 +1010,7 @@ that this is inclusive with NumPy) to high (exclusive). You can set
You can generate a 2 x 4 array of random integers between 0 and 4 with::
- >>> rng.integers(5, size=(2, 4))
+ >>> rng.integers(5, size=(2, 4)) #doctest: +SKIP
array([[2, 1, 1, 0],
[0, 0, 0, 4]]) # may vary
diff --git a/doc/source/user/basics.broadcasting.rst b/doc/source/user/basics.broadcasting.rst
index ca299085a..7d4c185b6 100644
--- a/doc/source/user/basics.broadcasting.rst
+++ b/doc/source/user/basics.broadcasting.rst
@@ -26,7 +26,7 @@ have exactly the same shape, as in the following example:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = np.array([2.0, 2.0, 2.0])
>>> a * b
- array([ 2., 4., 6.])
+ array([2., 4., 6.])
NumPy's broadcasting rule relaxes this constraint when the arrays'
shapes meet certain constraints. The simplest broadcasting example occurs
@@ -35,7 +35,7 @@ when an array and a scalar value are combined in an operation:
>>> a = np.array([1.0, 2.0, 3.0])
>>> b = 2.0
>>> a * b
-array([ 2., 4., 6.])
+array([2., 4., 6.])
The result is equivalent to the previous example where ``b`` was an array.
We can think of the scalar ``b`` being *stretched* during the arithmetic
@@ -158,17 +158,17 @@ Here are examples of shapes that do not broadcast::
An example of broadcasting when a 1-d array is added to a 2-d array::
- >>> a = array([[ 0.0, 0.0, 0.0],
- ... [10.0, 10.0, 10.0],
- ... [20.0, 20.0, 20.0],
- ... [30.0, 30.0, 30.0]])
- >>> b = array([1.0, 2.0, 3.0])
+ >>> a = np.array([[ 0.0, 0.0, 0.0],
+ ... [10.0, 10.0, 10.0],
+ ... [20.0, 20.0, 20.0],
+ ... [30.0, 30.0, 30.0]])
+ >>> b = np.array([1.0, 2.0, 3.0])
>>> a + b
array([[ 1., 2., 3.],
- [ 11., 12., 13.],
- [ 21., 22., 23.],
- [ 31., 32., 33.]])
- >>> b = array([1.0, 2.0, 3.0, 4.0])
+ [11., 12., 13.],
+ [21., 22., 23.],
+ [31., 32., 33.]])
+ >>> b = np.array([1.0, 2.0, 3.0, 4.0])
>>> a + b
Traceback (most recent call last):
ValueError: operands could not be broadcast together with shapes (4,3) (4,)
@@ -208,10 +208,10 @@ outer addition operation of two 1-d arrays::
>>> a = np.array([0.0, 10.0, 20.0, 30.0])
>>> b = np.array([1.0, 2.0, 3.0])
>>> a[:, np.newaxis] + b
- array([[ 1., 2., 3.],
- [ 11., 12., 13.],
- [ 21., 22., 23.],
- [ 31., 32., 33.]])
+ array([[ 1., 2., 3.],
+ [11., 12., 13.],
+ [21., 22., 23.],
+ [31., 32., 33.]])
.. figure:: broadcasting_4.svg
:alt: A 2-d array of shape (4, 1) and a 1-d array of shape (3) are
diff --git a/doc/source/user/basics.byteswapping.rst b/doc/source/user/basics.byteswapping.rst
index fecdb9ee8..d0a662390 100644
--- a/doc/source/user/basics.byteswapping.rst
+++ b/doc/source/user/basics.byteswapping.rst
@@ -31,7 +31,7 @@ The bytes I have loaded from the file would have these contents:
>>> big_end_buffer = bytearray([0,1,3,2])
>>> big_end_buffer
-bytearray(b'\\x00\\x01\\x03\\x02')
+bytearray(b'\x00\x01\x03\x02')
We might want to use an ``ndarray`` to access these integers. In that
case, we can create an array around this memory, and tell numpy that
diff --git a/doc/source/user/basics.copies.rst b/doc/source/user/basics.copies.rst
index e8ba68bc0..482cbc189 100644
--- a/doc/source/user/basics.copies.rst
+++ b/doc/source/user/basics.copies.rst
@@ -151,4 +151,4 @@ the original array while it returns ``None`` for a copy.
Note that the ``base`` attribute should not be used to determine
if an ndarray object is *new*; only if it is a view or a copy
-of another ndarray. \ No newline at end of file
+of another ndarray.
diff --git a/doc/source/user/basics.creation.rst b/doc/source/user/basics.creation.rst
index 523a05379..c0a4fd7cf 100644
--- a/doc/source/user/basics.creation.rst
+++ b/doc/source/user/basics.creation.rst
@@ -109,9 +109,9 @@ examples are shown::
>>> np.arange(10)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> np.arange(2, 10, dtype=float)
- array([ 2., 3., 4., 5., 6., 7., 8., 9.])
+ array([2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.arange(2, 3, 0.1)
- array([ 2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
+ array([2. , 2.1, 2.2, 2.3, 2.4, 2.5, 2.6, 2.7, 2.8, 2.9])
Note: best practice for :func:`numpy.arange` is to use integer start, end, and
step values. There are some subtleties regarding ``dtype``. In the second
@@ -124,7 +124,7 @@ spaced equally between the specified beginning and end values. For
example: ::
>>> np.linspace(1., 4., 6)
- array([ 1. , 1.6, 2.2, 2.8, 3.4, 4. ])
+ array([1. , 1.6, 2.2, 2.8, 3.4, 4. ])
The advantage of this creation function is that you guarantee the
number of elements and the starting and end point. The previous
@@ -217,8 +217,8 @@ specified shape. The default dtype is ``float64``::
``zeros`` in all other respects as such::
>>> np.ones((2, 3))
- array([[ 1., 1., 1.],
- [ 1., 1., 1.]])
+ array([[1., 1., 1.],
+ [1., 1., 1.]])
>>> np.ones((2, 3, 2))
array([[[1., 1.],
[1., 1.],
@@ -300,10 +300,10 @@ arrays into a 4-by-4 array using ``block``::
>>> C = np.zeros((2, 2))
>>> D = np.diag((-3, -4))
>>> np.block([[A, B], [C, D]])
- array([[ 1., 1., 1., 0. ],
- [ 1., 1., 0., 1. ],
- [ 0., 0., -3., 0. ],
- [ 0., 0., 0., -4. ]])
+ array([[ 1., 1., 1., 0.],
+ [ 1., 1., 0., 1.],
+ [ 0., 0., -3., 0.],
+ [ 0., 0., 0., -4.]])
Other routines use similar syntax to join ndarrays. Check the
routine's documentation for further examples and syntax.
diff --git a/doc/source/user/basics.dispatch.rst b/doc/source/user/basics.dispatch.rst
index 089a7df17..35c73dde4 100644
--- a/doc/source/user/basics.dispatch.rst
+++ b/doc/source/user/basics.dispatch.rst
@@ -57,7 +57,7 @@ array([[2., 0., 0., 0., 0.],
Notice that the return type is a standard ``numpy.ndarray``.
>>> type(np.multiply(arr, 2))
-numpy.ndarray
+<class 'numpy.ndarray'>
How can we pass our custom array type through this function? Numpy allows a
class to indicate that it would like to handle computations in a custom-defined
@@ -119,7 +119,9 @@ DiagonalArray(N=5, value=0.8414709848078965)
At this point ``arr + 3`` does not work.
>>> arr + 3
-TypeError: unsupported operand type(s) for *: 'DiagonalArray' and 'int'
+Traceback (most recent call last):
+...
+TypeError: unsupported operand type(s) for +: 'DiagonalArray' and 'int'
To support it, we need to define the Python interfaces ``__add__``, ``__lt__``,
and so on to dispatch to the corresponding ufunc. We can achieve this
@@ -193,14 +195,14 @@ functions to our custom variants.
... return self.__class__(N, ufunc(*scalars, **kwargs))
... else:
... return NotImplemented
-... def __array_function__(self, func, types, args, kwargs):
-... if func not in HANDLED_FUNCTIONS:
-... return NotImplemented
-... # Note: this allows subclasses that don't override
-... # __array_function__ to handle DiagonalArray objects.
-... if not all(issubclass(t, self.__class__) for t in types):
-... return NotImplemented
-... return HANDLED_FUNCTIONS[func](*args, **kwargs)
+... def __array_function__(self, func, types, args, kwargs):
+... if func not in HANDLED_FUNCTIONS:
+... return NotImplemented
+... # Note: this allows subclasses that don't override
+... # __array_function__ to handle DiagonalArray objects.
+... if not all(issubclass(t, self.__class__) for t in types):
+... return NotImplemented
+... return HANDLED_FUNCTIONS[func](*args, **kwargs)
...
A convenient pattern is to define a decorator ``implements`` that can be used
@@ -241,14 +243,19 @@ this operation is not supported. For example, concatenating two
supported.
>>> np.concatenate([arr, arr])
+Traceback (most recent call last):
+...
TypeError: no implementation found for 'numpy.concatenate' on types that implement __array_function__: [<class '__main__.DiagonalArray'>]
Additionally, our implementations of ``sum`` and ``mean`` do not accept the
optional arguments that numpy's implementation does.
>>> np.sum(arr, axis=0)
+Traceback (most recent call last):
+...
TypeError: sum() got an unexpected keyword argument 'axis'
+
The user always has the option of converting to a normal ``numpy.ndarray`` with
:func:`numpy.asarray` and using standard numpy from there.
diff --git a/doc/source/user/basics.indexing.rst b/doc/source/user/basics.indexing.rst
index e99682f02..d435a13e3 100644
--- a/doc/source/user/basics.indexing.rst
+++ b/doc/source/user/basics.indexing.rst
@@ -328,6 +328,8 @@ If the index values are out of bounds then an ``IndexError`` is thrown::
array([[3, 4],
[5, 6]])
>>> x[np.array([3, 4])]
+ Traceback (most recent call last):
+ ...
IndexError: index 3 is out of bounds for axis 0 with size 3
When the index consists of as many integer arrays as dimensions of the array
@@ -371,6 +373,8 @@ broadcast them to the same shape. If they cannot be broadcast to the same
shape, an exception is raised::
>>> y[np.array([0, 2, 4]), np.array([0, 1])]
+ Traceback (most recent call last):
+ ...
IndexError: shape mismatch: indexing arrays could not be broadcast
together with shapes (3,) (2,)
@@ -506,7 +510,7 @@ Or wish to add a constant to all negative elements::
>>> x = np.array([1., -1., -2., 3])
>>> x[x < 0] += 20
>>> x
- array([1., 19., 18., 3.])
+ array([ 1., 19., 18., 3.])
In general if an index includes a Boolean array, the result will be
identical to inserting ``obj.nonzero()`` into the same position
@@ -790,6 +794,8 @@ exceptions (assigning complex to floats or ints): ::
>>> x[1]
1
>>> x[1] = 1.2j
+ Traceback (most recent call last):
+ ...
TypeError: can't convert complex to int
diff --git a/doc/source/user/basics.io.genfromtxt.rst b/doc/source/user/basics.io.genfromtxt.rst
index 6a1ba75dd..a9c521fa3 100644
--- a/doc/source/user/basics.io.genfromtxt.rst
+++ b/doc/source/user/basics.io.genfromtxt.rst
@@ -60,8 +60,8 @@ example, comma-separated files (CSV) use a comma (``,``) or a semicolon
>>> data = u"1, 2, 3\n4, 5, 6"
>>> np.genfromtxt(StringIO(data), delimiter=",")
- array([[ 1., 2., 3.],
- [ 4., 5., 6.]])
+ array([[1., 2., 3.],
+ [4., 5., 6.]])
Another common separator is ``"\t"``, the tabulation character. However,
we are not limited to a single character, any string will do. By default,
@@ -76,14 +76,14 @@ size) or to a sequence of integers (if columns can have different sizes)::
>>> data = u" 1 2 3\n 4 5 67\n890123 4"
>>> np.genfromtxt(StringIO(data), delimiter=3)
- array([[ 1., 2., 3.],
- [ 4., 5., 67.],
- [ 890., 123., 4.]])
+ array([[ 1., 2., 3.],
+ [ 4., 5., 67.],
+ [890., 123., 4.]])
>>> data = u"123456789\n 4 7 9\n 4567 9"
>>> np.genfromtxt(StringIO(data), delimiter=(4, 3, 2))
- array([[ 1234., 567., 89.],
- [ 4., 7., 9.],
- [ 4., 567., 9.]])
+ array([[1234., 567., 89.],
+ [ 4., 7., 9.],
+ [ 4., 567., 9.]])
The ``autostrip`` argument
@@ -156,10 +156,10 @@ using the ``skip_footer`` attribute and giving it a value of ``n``::
>>> data = u"\n".join(str(i) for i in range(10))
>>> np.genfromtxt(StringIO(data),)
- array([ 0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
+ array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])
>>> np.genfromtxt(StringIO(data),
... skip_header=3, skip_footer=5)
- array([ 3., 4.])
+ array([3., 4.])
By default, ``skip_header=0`` and ``skip_footer=0``, meaning that no lines
are skipped.
@@ -180,8 +180,8 @@ can use ``usecols=(0, -1)``::
>>> data = u"1 2 3\n4 5 6"
>>> np.genfromtxt(StringIO(data), usecols=(0, -1))
- array([[ 1., 3.],
- [ 4., 6.]])
+ array([[1., 3.],
+ [4., 6.]])
If the columns have names, we can also select which columns to import by
giving their name to the ``usecols`` argument, either as a sequence
@@ -190,12 +190,10 @@ of strings or a comma-separated string::
>>> data = u"1 2 3\n4 5 6"
>>> np.genfromtxt(StringIO(data),
... names="a, b, c", usecols=("a", "c"))
- array([(1.0, 3.0), (4.0, 6.0)],
- dtype=[('a', '<f8'), ('c', '<f8')])
+ array([(1., 3.), (4., 6.)], dtype=[('a', '<f8'), ('c', '<f8')])
>>> np.genfromtxt(StringIO(data),
... names="a, b, c", usecols=("a, c"))
- array([(1.0, 3.0), (4.0, 6.0)],
- dtype=[('a', '<f8'), ('c', '<f8')])
+ array([(1., 3.), (4., 6.)], dtype=[('a', '<f8'), ('c', '<f8')])
@@ -258,7 +256,7 @@ sequence of strings or a comma-separated string::
>>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, names="A, B, C")
- array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
+ array([(1., 2., 3.), (4., 5., 6.)],
dtype=[('A', '<f8'), ('B', '<f8'), ('C', '<f8')])
In the example above, we used the fact that by default, ``dtype=float``.
@@ -272,7 +270,7 @@ that case, we must use the ``names`` keyword with a value of
>>> data = StringIO("So it goes\n#a b c\n1 2 3\n 4 5 6")
>>> np.genfromtxt(data, skip_header=1, names=True)
- array([(1.0, 2.0, 3.0), (4.0, 5.0, 6.0)],
+ array([(1., 2., 3.), (4., 5., 6.)],
dtype=[('a', '<f8'), ('b', '<f8'), ('c', '<f8')])
The default value of ``names`` is ``None``. If we give any other
@@ -283,7 +281,7 @@ have defined with the dtype::
>>> ndtype=[('a',int), ('b', float), ('c', int)]
>>> names = ["A", "B", "C"]
>>> np.genfromtxt(data, names=names, dtype=ndtype)
- array([(1, 2.0, 3), (4, 5.0, 6)],
+ array([(1, 2., 3), (4, 5., 6)],
dtype=[('A', '<i8'), ('B', '<f8'), ('C', '<i8')])
@@ -296,7 +294,7 @@ with the standard NumPy default of ``"f%i"``, yielding names like ``f0``,
>>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int))
- array([(1, 2.0, 3), (4, 5.0, 6)],
+ array([(1, 2., 3), (4, 5., 6)],
dtype=[('f0', '<i8'), ('f1', '<f8'), ('f2', '<i8')])
In the same way, if we don't give enough names to match the length of the
@@ -304,7 +302,7 @@ dtype, the missing names will be defined with this default template::
>>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int), names="a")
- array([(1, 2.0, 3), (4, 5.0, 6)],
+ array([(1, 2., 3), (4, 5., 6)],
dtype=[('a', '<i8'), ('f0', '<f8'), ('f1', '<i8')])
We can overwrite this default with the ``defaultfmt`` argument, that
@@ -312,7 +310,7 @@ takes any format string::
>>> data = StringIO("1 2 3\n 4 5 6")
>>> np.genfromtxt(data, dtype=(int, float, int), defaultfmt="var_%02i")
- array([(1, 2.0, 3), (4, 5.0, 6)],
+ array([(1, 2., 3), (4, 5., 6)],
dtype=[('var_00', '<i8'), ('var_01', '<f8'), ('var_02', '<i8')])
.. note::
@@ -388,7 +386,7 @@ and ``' 78.9%'`` cannot be converted to float and we end up having
>>> # Converted case ...
>>> np.genfromtxt(StringIO(data), delimiter=",", names=names,
... converters={1: convertfunc})
- array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)],
+ array([(1., 0.023, 45.), (6., 0.789, 0.)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
The same results can be obtained by using the name of the second column
@@ -397,7 +395,7 @@ The same results can be obtained by using the name of the second column
>>> # Using a name for the converter ...
>>> np.genfromtxt(StringIO(data), delimiter=",", names=names,
... converters={"p": convertfunc})
- array([(1.0, 0.023, 45.0), (6.0, 0.78900000000000003, 0.0)],
+ array([(1., 0.023, 45.), (6., 0.789, 0.)],
dtype=[('i', '<f8'), ('p', '<f8'), ('n', '<f8')])
diff --git a/doc/source/user/basics.rec.rst b/doc/source/user/basics.rec.rst
index 7f487f39b..4b4b8815f 100644
--- a/doc/source/user/basics.rec.rst
+++ b/doc/source/user/basics.rec.rst
@@ -15,7 +15,7 @@ datatypes organized as a sequence of named :term:`fields <field>`. For example,
... dtype=[('name', 'U10'), ('age', 'i4'), ('weight', 'f4')])
>>> x
array([('Rex', 9, 81.), ('Fido', 3, 27.)],
- dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
+ dtype=[('name', '<U10'), ('age', '<i4'), ('weight', '<f4')])
Here ``x`` is a one-dimensional array of length two whose datatype is a
structure with three fields: 1. A string of length 10 or less named 'name', 2.
@@ -24,7 +24,7 @@ a 32-bit integer named 'age', and 3. a 32-bit float named 'weight'.
If you index ``x`` at position 1 you get a structure::
>>> x[1]
- ('Fido', 3, 27.0)
+ ('Fido', 3, 27.)
You can access and modify individual fields of a structured array by indexing
with the field name::
@@ -34,7 +34,7 @@ with the field name::
>>> x['age'] = 5
>>> x
array([('Rex', 5, 81.), ('Fido', 5, 27.)],
- dtype=[('name', 'U10'), ('age', '<i4'), ('weight', '<f4')])
+ dtype=[('name', '<U10'), ('age', '<i4'), ('weight', '<f4')])
Structured datatypes are designed to be able to mimic 'structs' in the C
language, and share a similar memory layout. They are meant for interfacing with
@@ -425,7 +425,7 @@ array, as follows::
>>> a = np.zeros(3, dtype=[('a', 'i4'), ('b', 'i4'), ('c', 'f4')])
>>> a[['a', 'c']]
array([(0, 0.), (0, 0.), (0, 0.)],
- dtype={'names':['a','c'], 'formats':['<i4','<f4'], 'offsets':[0,8], 'itemsize':12})
+ dtype={'names': ['a', 'c'], 'formats': ['<i4', '<f4'], 'offsets': [0, 8], 'itemsize': 12})
Assignment to the view modifies the original array. The view's fields will be
in the order they were indexed. Note that unlike for single-field indexing, the
diff --git a/doc/source/user/basics.subclassing.rst b/doc/source/user/basics.subclassing.rst
index 1b7880986..55b23bb78 100644
--- a/doc/source/user/basics.subclassing.rst
+++ b/doc/source/user/basics.subclassing.rst
@@ -48,7 +48,7 @@ ndarray of any subclass, and return a view of the array as another
>>> # take a view of it, as our useless subclass
>>> c_arr = arr.view(C)
>>> type(c_arr)
-<class 'C'>
+<class '__main__.C'>
.. _new-from-template:
@@ -63,7 +63,7 @@ For example:
>>> v = c_arr[1:]
>>> type(v) # the view is of type 'C'
-<class 'C'>
+<class '__main__.C'>
>>> v is c_arr # but it's a new instance
False
@@ -114,18 +114,15 @@ __new__ documentation
For example, consider the following Python code:
-.. testcode::
-
- class C:
- def __new__(cls, *args):
- print('Cls in __new__:', cls)
- print('Args in __new__:', args)
- # The `object` type __new__ method takes a single argument.
- return object.__new__(cls)
-
- def __init__(self, *args):
- print('type(self) in __init__:', type(self))
- print('Args in __init__:', args)
+>>> class C:
+>>> def __new__(cls, *args):
+>>> print('Cls in __new__:', cls)
+>>> print('Args in __new__:', args)
+>>> # The `object` type __new__ method takes a single argument.
+>>> return object.__new__(cls)
+>>> def __init__(self, *args):
+>>> print('type(self) in __init__:', type(self))
+>>> print('Args in __init__:', args)
meaning that we get:
diff --git a/doc/source/user/basics.types.rst b/doc/source/user/basics.types.rst
index fb36af46c..3a49d213b 100644
--- a/doc/source/user/basics.types.rst
+++ b/doc/source/user/basics.types.rst
@@ -143,15 +143,15 @@ backward compatibility with older packages such as Numeric. Some
documentation may still refer to these, for example::
>>> np.array([1, 2, 3], dtype='f')
- array([ 1., 2., 3.], dtype=float32)
+ array([1., 2., 3.], dtype=float32)
We recommend using dtype objects instead.
To convert the type of an array, use the .astype() method (preferred) or
the type itself as a function. For example: ::
- >>> z.astype(float) #doctest: +NORMALIZE_WHITESPACE
- array([ 0., 1., 2.])
+ >>> z.astype(float)
+ array([0., 1., 2.])
>>> np.int8(z)
array([0, 1, 2], dtype=int8)
@@ -170,7 +170,7 @@ and its byte-order. The data type can also be used indirectly to query
properties of the type, such as whether it is an integer::
>>> d = np.dtype(int)
- >>> d
+ >>> d #doctest: +SKIP
dtype('int32')
>>> np.issubdtype(d, np.integer)
diff --git a/doc/source/user/basics.ufuncs.rst b/doc/source/user/basics.ufuncs.rst
index 083e31f70..5e83621aa 100644
--- a/doc/source/user/basics.ufuncs.rst
+++ b/doc/source/user/basics.ufuncs.rst
@@ -89,7 +89,7 @@ Considering ``x`` from the previous example::
>>> y
array([0, 0, 0])
>>> np.multiply.reduce(x, dtype=float, out=y)
- array([ 0, 28, 80]) # dtype argument is ignored
+ array([ 0, 28, 80])
Ufuncs also have a fifth method, :func:`numpy.ufunc.at`, that allows in place
operations to be performed using advanced indexing. No
diff --git a/doc/source/user/misc.rst b/doc/source/user/misc.rst
index 316473151..8d5ca86f1 100644
--- a/doc/source/user/misc.rst
+++ b/doc/source/user/misc.rst
@@ -19,10 +19,10 @@ Note: cannot use equality to test NaNs. E.g.: ::
False
>>> myarr[myarr == np.nan] = 0. # doesn't work
>>> myarr
- array([ 1., 0., NaN, 3.])
+ array([ 1., 0., nan, 3.])
>>> myarr[np.isnan(myarr)] = 0. # use this instead find
>>> myarr
- array([ 1., 0., 0., 3.])
+ array([1., 0., 0., 3.])
Other related special value functions: ::
@@ -79,20 +79,24 @@ Examples
>>> oldsettings = np.seterr(all='warn')
>>> np.zeros(5,dtype=np.float32)/0.
- invalid value encountered in divide
+ Traceback (most recent call last):
+ ...
+ RuntimeWarning: invalid value encountered in true_divide
>>> j = np.seterr(under='ignore')
>>> np.array([1.e-100])**10
+ array([0.])
>>> j = np.seterr(invalid='raise')
>>> np.sqrt(np.array([-1.]))
+ Traceback (most recent call last):
+ ...
FloatingPointError: invalid value encountered in sqrt
>>> def errorhandler(errstr, errflag):
... print("saw stupid error!")
>>> np.seterrcall(errorhandler)
- <function err_handler at 0x...>
>>> j = np.seterr(all='call')
>>> np.zeros(5, dtype=np.int32)/0
- FloatingPointError: invalid value encountered in divide
saw stupid error!
+ array([nan, nan, nan, nan, nan])
>>> j = np.seterr(**oldsettings) # restore previous
... # error-handling settings
diff --git a/doc/source/user/quickstart.rst b/doc/source/user/quickstart.rst
index a9cfeca31..8e0e3b6ba 100644
--- a/doc/source/user/quickstart.rst
+++ b/doc/source/user/quickstart.rst
@@ -193,7 +193,7 @@ state of the memory. By default, the dtype of the created array is
[[1, 1, 1, 1],
[1, 1, 1, 1],
[1, 1, 1, 1]]], dtype=int16)
- >>> np.empty((2, 3))
+ >>> np.empty((2, 3)) #doctest: +SKIP
array([[3.73603959e-262, 6.02658058e-154, 6.55490914e-260], # may vary
[5.30498948e-313, 3.14673309e-307, 1.00000000e+000]])
@@ -868,9 +868,9 @@ copy.
>>> def f(x):
... print(id(x))
...
- >>> id(a) # id is a unique identifier of an object
+ >>> id(a) # id is a unique identifier of an object #doctest: +SKIP
148293216 # may vary
- >>> f(a)
+ >>> f(a) #doctest: +SKIP
148293216 # may vary
View or Shallow Copy
@@ -1272,6 +1272,7 @@ set <https://en.wikipedia.org/wiki/Mandelbrot_set>`__:
... z[diverge] = r # avoid diverging too much
...
... return divtime
+ >>> plt.clf()
>>> plt.imshow(mandelbrot(400, 400))
The second way of indexing with booleans is more similar to integer
@@ -1468,9 +1469,10 @@ that ``pylab.hist`` plots the histogram automatically, while
>>> v = rg.normal(mu, sigma, 10000)
>>> # Plot a normalized histogram with 50 bins
>>> plt.hist(v, bins=50, density=True) # matplotlib version (plot)
+ (array...)
>>> # Compute the histogram with numpy and then plot it
>>> (n, bins) = np.histogram(v, bins=50, density=True) # NumPy version (no plot)
- >>> plt.plot(.5 * (bins[1:] + bins[:-1]), n)
+ >>> plt.plot(.5 * (bins[1:] + bins[:-1]), n) #doctest: +SKIP
With Matplotlib >=3.4 you can also use ``plt.stairs(n, bins)``.