diff options
author | Michael Seifert <michaelseifert04@yahoo.de> | 2017-02-26 16:53:20 +0100 |
---|---|---|
committer | Michael Seifert <michaelseifert04@yahoo.de> | 2017-02-26 16:53:20 +0100 |
commit | 4f8e1a36d099c8ed901645daca3e99976835cd95 (patch) | |
tree | 15736cafc0fca36b2a13d3c1557f5acd3a22dd6e /numpy/lib/function_base.py | |
parent | 2a284725b692288c8c5e6554bf5b0e1bdb498421 (diff) | |
download | numpy-4f8e1a36d099c8ed901645daca3e99976835cd95.tar.gz |
DOC: Fixed small mistakes in numpy.copy documentation.
Including missing backticks around link, included a missing colon in
example and removed excessive indentation before "doctest skip directive".
[skip ci]
Diffstat (limited to 'numpy/lib/function_base.py')
-rw-r--r-- | numpy/lib/function_base.py | 136 |
1 files changed, 68 insertions, 68 deletions
diff --git a/numpy/lib/function_base.py b/numpy/lib/function_base.py index d01a4a6cd..c54512c21 100644 --- a/numpy/lib/function_base.py +++ b/numpy/lib/function_base.py @@ -1462,7 +1462,7 @@ def copy(a, order='K'): Controls the memory layout of the copy. 'C' means C-order, 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 'C' otherwise. 'K' means match the layout of `a` as closely - as possible. (Note that this function and :meth:ndarray.copy are very + as possible. (Note that this function and :meth:`ndarray.copy` are very similar, but have different default values for their order= arguments.) @@ -1473,9 +1473,9 @@ def copy(a, order='K'): Notes ----- - This is equivalent to + This is equivalent to: - >>> np.array(a, copy=True) #doctest: +SKIP + >>> np.array(a, copy=True) #doctest: +SKIP Examples -------- @@ -1515,18 +1515,18 @@ def gradient(f, *varargs, **kwargs): varargs : list of scalar or array, optional Spacing between f values. Default unitary spacing for all dimensions. Spacing can be specified using: - + 1. single scalar to specify a sample distance for all dimensions. 2. N scalars to specify a constant sample distance for each dimension. i.e. `dx`, `dy`, `dz`, ... - 3. N arrays to specify the coordinates of the values along each - dimension of F. The length of the array must match the size of + 3. N arrays to specify the coordinates of the values along each + dimension of F. The length of the array must match the size of the corresponding dimension 4. Any combination of N scalars/arrays with the meaning of 2. and 3. - + If `axis` is given, the number of varargs must equal the number of axes. Default: 1. - + edge_order : {1, 2}, optional Gradient is calculated using N-th order accurate differences at the boundaries. Default: 1. @@ -1535,8 +1535,8 @@ def gradient(f, *varargs, **kwargs): axis : None or int or tuple of ints, optional Gradient is calculated only along the given axis or axes - The default (axis = None) is to calculate the gradient for all the axes - of the input array. axis may be negative, in which case it counts from + The default (axis = None) is to calculate the gradient for all the axes + of the input array. axis may be negative, in which case it counts from the last to the first axis. .. versionadded:: 1.11.0 @@ -1547,7 +1547,7 @@ def gradient(f, *varargs, **kwargs): A set of ndarrays (or a single ndarray if there is only one dimension) corresponding to the derivatives of f with respect to each dimension. Each derivative has the same shape as f. - + Examples -------- >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=np.float) @@ -1555,7 +1555,7 @@ def gradient(f, *varargs, **kwargs): array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) >>> np.gradient(f, 2) array([ 0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) - + Spacing can be also specified with an array that represents the coordinates of the values F along the dimensions. For instance a uniform spacing: @@ -1564,12 +1564,12 @@ def gradient(f, *varargs, **kwargs): >>> np.gradient(f, x) array([ 1. , 1.5, 2.5, 3.5, 4.5, 5. ]) - Or a non uniform one: - + Or a non uniform one: + >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=np.float) >>> np.gradient(f, x) array([ 1. , 3. , 3.5, 6.7, 6.9, 2.5]) - + For two dimensional arrays, the return will be two arrays ordered by axis. In this example the first array stands for the gradient in rows and the second one in columns direction: @@ -1579,7 +1579,7 @@ def gradient(f, *varargs, **kwargs): [ 2., 2., -1.]]), array([[ 1. , 2.5, 4. ], [ 1. , 1. , 1. ]])] - In this example the spacing is also specified: + In this example the spacing is also specified: uniform for axis=0 and non uniform for axis=1 >>> dx = 2. @@ -1590,7 +1590,7 @@ def gradient(f, *varargs, **kwargs): [ 2. , 1.7, 0.5]])] It is possible to specify how boundaries are treated using `edge_order` - + >>> x = np.array([0, 1, 2, 3, 4]) >>> f = x**2 >>> np.gradient(f, edge_order=1) @@ -1598,35 +1598,35 @@ def gradient(f, *varargs, **kwargs): >>> np.gradient(f, edge_order=2) array([-0., 2., 4., 6., 8.]) - The `axis` keyword can be used to specify a subset of axes of which the + The `axis` keyword can be used to specify a subset of axes of which the gradient is calculated - + >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float), axis=0) array([[ 2., 2., -1.], [ 2., 2., -1.]]) Notes ----- - Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous - derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the - spacing the finite difference coefficients are computed by minimising + Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continous + derivatives) and let be :math:`h_{*}` a non homogeneous stepsize, the + spacing the finite difference coefficients are computed by minimising the consistency error :math:`\\eta_{i}`: - - .. math:: - - \\eta_{i} = f_{i}^{\\left(1\\right)} - - \\left[ \\alpha f\\left(x_{i}\\right) + + + .. math:: + + \\eta_{i} = f_{i}^{\\left(1\\right)} - + \\left[ \\alpha f\\left(x_{i}\\right) + \\beta f\\left(x_{i} + h_{d}\\right) + \\gamma f\\left(x_{i}-h_{s}\\right) \\right] - - By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` - with their Taylor series expansion, this translates into solving + + By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` + with their Taylor series expansion, this translates into solving the following the linear system: - .. math:: + .. math:: - \\left\\{ + \\left\\{ \\begin{array}{r} \\alpha+\\beta+\\gamma=0 \\\\ -\\beta h_{d}+\\gamma h_{s}=1 \\\\ @@ -1636,40 +1636,40 @@ def gradient(f, *varargs, **kwargs): The resulting approximation of :math:`f_{i}^{(1)}` is the following: - .. math:: - - \\hat f_{i}^{(1)} = - \\frac{ - h_{s}^{2}f\\left(x_{i} + h_{d}\\right) - + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) + .. math:: + + \\hat f_{i}^{(1)} = + \\frac{ + h_{s}^{2}f\\left(x_{i} + h_{d}\\right) + + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} - + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} - + h_{s}h_{d}^{2}}{h_{d} + + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} + + h_{s}h_{d}^{2}}{h_{d} + h_{s}}\\right) - It is worth noting that if :math:`h_{s}=h_{d}` - (i.e., data are evenly spaced) + It is worth noting that if :math:`h_{s}=h_{d}` + (i.e., data are evenly spaced) we find the standard second order approximation: - - .. math:: - + + .. math:: + \\hat f_{i}^{(1)}= - \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} + \\mathcal{O}\\left(h^{2}\\right) - With a similar procedure the forward/backward approximations used for + With a similar procedure the forward/backward approximations used for boundaries can be derived. - + References ---------- - .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics + .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics (Texts in Applied Mathematics). New York: Springer. - .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations + .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations in Geophysical Fluid Dynamics. New York: Springer. - .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on - Arbitrarily Spaced Grids, - Mathematics of Computation 51, no. 184 : 699-706. + .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on + Arbitrarily Spaced Grids, + Mathematics of Computation 51, no. 184 : 699-706. `PDF <http://www.ams.org/journals/mcom/1988-51-184/ S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. """ @@ -1707,15 +1707,15 @@ def gradient(f, *varargs, **kwargs): "the length of the corresponding dimension") diffx = np.diff(dx[i]) # if distances are constant reduce to the scalar case - # since it brings a consistent speedup - if (diffx == diffx[0]).all(): + # since it brings a consistent speedup + if (diffx == diffx[0]).all(): diffx = diffx[0] dx[i] = diffx if len(dx) == 1: dx *= len_axes else: raise TypeError("invalid number of arguments") - + edge_order = kwargs.pop('edge_order', 1) if kwargs: raise TypeError('"{}" are not valid keyword arguments.'.format( @@ -1762,14 +1762,14 @@ def gradient(f, *varargs, **kwargs): # result allocation out = np.empty_like(y, dtype=otype) - uniform_spacing = np.isscalar(dx[i]) + uniform_spacing = np.isscalar(dx[i]) # Numerical differentiation: 2nd order interior slice1[axis] = slice(1, -1) slice2[axis] = slice(None, -2) slice3[axis] = slice(1, -1) slice4[axis] = slice(2, None) - + if uniform_spacing: out[slice1] = (f[slice4] - f[slice2]) / (2. * dx[i]) else: @@ -1784,7 +1784,7 @@ def gradient(f, *varargs, **kwargs): a.shape = b.shape = c.shape = shape # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] out[slice1] = a * f[slice2] + b * f[slice3] + c * f[slice4] - + # Numerical differentiation: 1st order edges if edge_order == 1: slice1[axis] = 0 @@ -1793,14 +1793,14 @@ def gradient(f, *varargs, **kwargs): dx_0 = dx[i] if uniform_spacing else dx[i][0] # 1D equivalent -- out[0] = (y[1] - y[0]) / (x[1] - x[0]) out[slice1] = (y[slice2] - y[slice3]) / dx_0 - + slice1[axis] = -1 slice2[axis] = -1 slice3[axis] = -2 dx_n = dx[i] if uniform_spacing else dx[i][-1] # 1D equivalent -- out[-1] = (y[-1] - y[-2]) / (x[-1] - x[-2]) out[slice1] = (y[slice2] - y[slice3]) / dx_n - + # Numerical differentiation: 2nd order edges else: slice1[axis] = 0 @@ -1819,7 +1819,7 @@ def gradient(f, *varargs, **kwargs): c = - dx1 / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[0] = a * y[0] + b * y[1] + c * y[2] out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] - + slice1[axis] = -1 slice2[axis] = -3 slice3[axis] = -2 @@ -1835,8 +1835,8 @@ def gradient(f, *varargs, **kwargs): b = - (dx2 + dx1) / (dx1 * dx2) c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] - out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] - + out[slice1] = a * y[slice2] + b * y[slice3] + c * y[slice4] + outvals.append(out) # reset the slice object in this dimension to ":" @@ -2903,8 +2903,8 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, contain observations. bias : bool, optional Default normalization (False) is by ``(N - 1)``, where ``N`` is the - number of observations given (unbiased estimate). If `bias` is True, - then normalization is by ``N``. These values can be overridden by using + number of observations given (unbiased estimate). If `bias` is True, + then normalization is by ``N``. These values can be overridden by using the keyword ``ddof`` in numpy versions >= 1.5. ddof : int, optional If not ``None`` the default value implied by `bias` is overridden. @@ -3069,7 +3069,7 @@ def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, fact = w_sum - ddof*sum(w*aweights)/w_sum if fact <= 0: - warnings.warn("Degrees of freedom <= 0 for slice", + warnings.warn("Degrees of freedom <= 0 for slice", RuntimeWarning, stacklevel=2) fact = 0.0 @@ -4823,7 +4823,7 @@ def delete(arr, obj, axis=None): # After removing the special handling of booleans and out of # bounds values, the conversion to the array can be removed. if obj.dtype == bool: - warnings.warn("in the future insert will treat boolean arrays and " + warnings.warn("in the future insert will treat boolean arrays and " "array-likes as boolean index instead of casting it " "to integer", FutureWarning, stacklevel=2) obj = obj.astype(intp) |